1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.0.8"
62 #define DRV_MODULE_RELDATE "Feb 15, 2010"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
69 #define RUN_AT(x) (jiffies + (x))
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
74 static char version
[] __devinitdata
=
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION
);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06
);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06
);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09
);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09
);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax
);
87 static int disable_msi
= 0;
89 module_param(disable_msi
, int, 0);
90 MODULE_PARM_DESC(disable_msi
, "Disable Message Signaled Interrupt (MSI)");
106 /* indexed by board_t, above */
109 } board_info
[] __devinitdata
= {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl
) = {
124 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
125 PCI_VENDOR_ID_HP
, 0x3101, 0, 0, NC370T
},
126 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
127 PCI_VENDOR_ID_HP
, 0x3106, 0, 0, NC370I
},
128 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
129 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706
},
130 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708
,
131 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708
},
132 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
133 PCI_VENDOR_ID_HP
, 0x3102, 0, 0, NC370F
},
134 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
135 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706S
},
136 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708S
,
137 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708S
},
138 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5709
,
139 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5709
},
140 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5709S
,
141 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5709S
},
142 { PCI_VENDOR_ID_BROADCOM
, 0x163b,
143 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5716
},
144 { PCI_VENDOR_ID_BROADCOM
, 0x163c,
145 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5716S
},
149 static const struct flash_spec flash_table
[] =
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 BUFFERED_FLAGS
, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
156 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
161 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
167 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
173 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*4,
174 "Non-buffered flash (256kB)"},
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
178 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS
, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS
, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
188 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
194 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
,
195 "Non-buffered flash (64kB)"},
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 BUFFERED_FLAGS
, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
199 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
204 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
209 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS
, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
214 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
219 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
224 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS
, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
229 BUFFERED_FLASH_BYTE_ADDR_MASK
, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS
, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
234 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
*2,
235 "Buffered flash (256kB)"},
238 static const struct flash_spec flash_5709
= {
239 .flags
= BNX2_NV_BUFFERED
,
240 .page_bits
= BCM5709_FLASH_PAGE_BITS
,
241 .page_size
= BCM5709_FLASH_PAGE_SIZE
,
242 .addr_mask
= BCM5709_FLASH_BYTE_ADDR_MASK
,
243 .total_size
= BUFFERED_FLASH_TOTAL_SIZE
*2,
244 .name
= "5709 Buffered flash (256kB)",
247 MODULE_DEVICE_TABLE(pci
, bnx2_pci_tbl
);
249 static void bnx2_init_napi(struct bnx2
*bp
);
251 static inline u32
bnx2_tx_avail(struct bnx2
*bp
, struct bnx2_tx_ring_info
*txr
)
257 /* The ring uses 256 indices for 255 entries, one of them
258 * needs to be skipped.
260 diff
= txr
->tx_prod
- txr
->tx_cons
;
261 if (unlikely(diff
>= TX_DESC_CNT
)) {
263 if (diff
== TX_DESC_CNT
)
264 diff
= MAX_TX_DESC_CNT
;
266 return (bp
->tx_ring_size
- diff
);
270 bnx2_reg_rd_ind(struct bnx2
*bp
, u32 offset
)
274 spin_lock_bh(&bp
->indirect_lock
);
275 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
276 val
= REG_RD(bp
, BNX2_PCICFG_REG_WINDOW
);
277 spin_unlock_bh(&bp
->indirect_lock
);
282 bnx2_reg_wr_ind(struct bnx2
*bp
, u32 offset
, u32 val
)
284 spin_lock_bh(&bp
->indirect_lock
);
285 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
286 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW
, val
);
287 spin_unlock_bh(&bp
->indirect_lock
);
291 bnx2_shmem_wr(struct bnx2
*bp
, u32 offset
, u32 val
)
293 bnx2_reg_wr_ind(bp
, bp
->shmem_base
+ offset
, val
);
297 bnx2_shmem_rd(struct bnx2
*bp
, u32 offset
)
299 return (bnx2_reg_rd_ind(bp
, bp
->shmem_base
+ offset
));
303 bnx2_ctx_wr(struct bnx2
*bp
, u32 cid_addr
, u32 offset
, u32 val
)
306 spin_lock_bh(&bp
->indirect_lock
);
307 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
310 REG_WR(bp
, BNX2_CTX_CTX_DATA
, val
);
311 REG_WR(bp
, BNX2_CTX_CTX_CTRL
,
312 offset
| BNX2_CTX_CTX_CTRL_WRITE_REQ
);
313 for (i
= 0; i
< 5; i
++) {
314 val
= REG_RD(bp
, BNX2_CTX_CTX_CTRL
);
315 if ((val
& BNX2_CTX_CTX_CTRL_WRITE_REQ
) == 0)
320 REG_WR(bp
, BNX2_CTX_DATA_ADR
, offset
);
321 REG_WR(bp
, BNX2_CTX_DATA
, val
);
323 spin_unlock_bh(&bp
->indirect_lock
);
328 bnx2_drv_ctl(struct net_device
*dev
, struct drv_ctl_info
*info
)
330 struct bnx2
*bp
= netdev_priv(dev
);
331 struct drv_ctl_io
*io
= &info
->data
.io
;
334 case DRV_CTL_IO_WR_CMD
:
335 bnx2_reg_wr_ind(bp
, io
->offset
, io
->data
);
337 case DRV_CTL_IO_RD_CMD
:
338 io
->data
= bnx2_reg_rd_ind(bp
, io
->offset
);
340 case DRV_CTL_CTX_WR_CMD
:
341 bnx2_ctx_wr(bp
, io
->cid_addr
, io
->offset
, io
->data
);
349 static void bnx2_setup_cnic_irq_info(struct bnx2
*bp
)
351 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
352 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
355 if (bp
->flags
& BNX2_FLAG_USING_MSIX
) {
356 cp
->drv_state
|= CNIC_DRV_STATE_USING_MSIX
;
357 bnapi
->cnic_present
= 0;
358 sb_id
= bp
->irq_nvecs
;
359 cp
->irq_arr
[0].irq_flags
|= CNIC_IRQ_FL_MSIX
;
361 cp
->drv_state
&= ~CNIC_DRV_STATE_USING_MSIX
;
362 bnapi
->cnic_tag
= bnapi
->last_status_idx
;
363 bnapi
->cnic_present
= 1;
365 cp
->irq_arr
[0].irq_flags
&= ~CNIC_IRQ_FL_MSIX
;
368 cp
->irq_arr
[0].vector
= bp
->irq_tbl
[sb_id
].vector
;
369 cp
->irq_arr
[0].status_blk
= (void *)
370 ((unsigned long) bnapi
->status_blk
.msi
+
371 (BNX2_SBLK_MSIX_ALIGN_SIZE
* sb_id
));
372 cp
->irq_arr
[0].status_blk_num
= sb_id
;
376 static int bnx2_register_cnic(struct net_device
*dev
, struct cnic_ops
*ops
,
379 struct bnx2
*bp
= netdev_priv(dev
);
380 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
385 if (cp
->drv_state
& CNIC_DRV_STATE_REGD
)
388 bp
->cnic_data
= data
;
389 rcu_assign_pointer(bp
->cnic_ops
, ops
);
392 cp
->drv_state
= CNIC_DRV_STATE_REGD
;
394 bnx2_setup_cnic_irq_info(bp
);
399 static int bnx2_unregister_cnic(struct net_device
*dev
)
401 struct bnx2
*bp
= netdev_priv(dev
);
402 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
403 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
405 mutex_lock(&bp
->cnic_lock
);
407 bnapi
->cnic_present
= 0;
408 rcu_assign_pointer(bp
->cnic_ops
, NULL
);
409 mutex_unlock(&bp
->cnic_lock
);
414 struct cnic_eth_dev
*bnx2_cnic_probe(struct net_device
*dev
)
416 struct bnx2
*bp
= netdev_priv(dev
);
417 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
419 cp
->drv_owner
= THIS_MODULE
;
420 cp
->chip_id
= bp
->chip_id
;
422 cp
->io_base
= bp
->regview
;
423 cp
->drv_ctl
= bnx2_drv_ctl
;
424 cp
->drv_register_cnic
= bnx2_register_cnic
;
425 cp
->drv_unregister_cnic
= bnx2_unregister_cnic
;
429 EXPORT_SYMBOL(bnx2_cnic_probe
);
432 bnx2_cnic_stop(struct bnx2
*bp
)
434 struct cnic_ops
*c_ops
;
435 struct cnic_ctl_info info
;
437 mutex_lock(&bp
->cnic_lock
);
438 c_ops
= bp
->cnic_ops
;
440 info
.cmd
= CNIC_CTL_STOP_CMD
;
441 c_ops
->cnic_ctl(bp
->cnic_data
, &info
);
443 mutex_unlock(&bp
->cnic_lock
);
447 bnx2_cnic_start(struct bnx2
*bp
)
449 struct cnic_ops
*c_ops
;
450 struct cnic_ctl_info info
;
452 mutex_lock(&bp
->cnic_lock
);
453 c_ops
= bp
->cnic_ops
;
455 if (!(bp
->flags
& BNX2_FLAG_USING_MSIX
)) {
456 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
458 bnapi
->cnic_tag
= bnapi
->last_status_idx
;
460 info
.cmd
= CNIC_CTL_START_CMD
;
461 c_ops
->cnic_ctl(bp
->cnic_data
, &info
);
463 mutex_unlock(&bp
->cnic_lock
);
469 bnx2_cnic_stop(struct bnx2
*bp
)
474 bnx2_cnic_start(struct bnx2
*bp
)
481 bnx2_read_phy(struct bnx2
*bp
, u32 reg
, u32
*val
)
486 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
487 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
488 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
490 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
491 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
496 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) |
497 BNX2_EMAC_MDIO_COMM_COMMAND_READ
| BNX2_EMAC_MDIO_COMM_DISEXT
|
498 BNX2_EMAC_MDIO_COMM_START_BUSY
;
499 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
501 for (i
= 0; i
< 50; i
++) {
504 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
505 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
508 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
509 val1
&= BNX2_EMAC_MDIO_COMM_DATA
;
515 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
) {
524 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
525 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
526 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
528 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
529 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
538 bnx2_write_phy(struct bnx2
*bp
, u32 reg
, u32 val
)
543 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
544 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
545 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
547 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
548 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
553 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) | val
|
554 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE
|
555 BNX2_EMAC_MDIO_COMM_START_BUSY
| BNX2_EMAC_MDIO_COMM_DISEXT
;
556 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
558 for (i
= 0; i
< 50; i
++) {
561 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
562 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
568 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)
573 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
574 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
575 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
577 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
578 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
587 bnx2_disable_int(struct bnx2
*bp
)
590 struct bnx2_napi
*bnapi
;
592 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
593 bnapi
= &bp
->bnx2_napi
[i
];
594 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
595 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
597 REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
);
601 bnx2_enable_int(struct bnx2
*bp
)
604 struct bnx2_napi
*bnapi
;
606 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
607 bnapi
= &bp
->bnx2_napi
[i
];
609 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
610 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
611 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
|
612 bnapi
->last_status_idx
);
614 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
615 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
616 bnapi
->last_status_idx
);
618 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
622 bnx2_disable_int_sync(struct bnx2
*bp
)
626 atomic_inc(&bp
->intr_sem
);
627 if (!netif_running(bp
->dev
))
630 bnx2_disable_int(bp
);
631 for (i
= 0; i
< bp
->irq_nvecs
; i
++)
632 synchronize_irq(bp
->irq_tbl
[i
].vector
);
636 bnx2_napi_disable(struct bnx2
*bp
)
640 for (i
= 0; i
< bp
->irq_nvecs
; i
++)
641 napi_disable(&bp
->bnx2_napi
[i
].napi
);
645 bnx2_napi_enable(struct bnx2
*bp
)
649 for (i
= 0; i
< bp
->irq_nvecs
; i
++)
650 napi_enable(&bp
->bnx2_napi
[i
].napi
);
654 bnx2_netif_stop(struct bnx2
*bp
)
657 if (netif_running(bp
->dev
)) {
660 bnx2_napi_disable(bp
);
661 netif_tx_disable(bp
->dev
);
662 /* prevent tx timeout */
663 for (i
= 0; i
< bp
->dev
->num_tx_queues
; i
++) {
664 struct netdev_queue
*txq
;
666 txq
= netdev_get_tx_queue(bp
->dev
, i
);
667 txq
->trans_start
= jiffies
;
670 bnx2_disable_int_sync(bp
);
674 bnx2_netif_start(struct bnx2
*bp
)
676 if (atomic_dec_and_test(&bp
->intr_sem
)) {
677 if (netif_running(bp
->dev
)) {
678 netif_tx_wake_all_queues(bp
->dev
);
679 bnx2_napi_enable(bp
);
687 bnx2_free_tx_mem(struct bnx2
*bp
)
691 for (i
= 0; i
< bp
->num_tx_rings
; i
++) {
692 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
693 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
695 if (txr
->tx_desc_ring
) {
696 pci_free_consistent(bp
->pdev
, TXBD_RING_SIZE
,
698 txr
->tx_desc_mapping
);
699 txr
->tx_desc_ring
= NULL
;
701 kfree(txr
->tx_buf_ring
);
702 txr
->tx_buf_ring
= NULL
;
707 bnx2_free_rx_mem(struct bnx2
*bp
)
711 for (i
= 0; i
< bp
->num_rx_rings
; i
++) {
712 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
713 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
716 for (j
= 0; j
< bp
->rx_max_ring
; j
++) {
717 if (rxr
->rx_desc_ring
[j
])
718 pci_free_consistent(bp
->pdev
, RXBD_RING_SIZE
,
719 rxr
->rx_desc_ring
[j
],
720 rxr
->rx_desc_mapping
[j
]);
721 rxr
->rx_desc_ring
[j
] = NULL
;
723 vfree(rxr
->rx_buf_ring
);
724 rxr
->rx_buf_ring
= NULL
;
726 for (j
= 0; j
< bp
->rx_max_pg_ring
; j
++) {
727 if (rxr
->rx_pg_desc_ring
[j
])
728 pci_free_consistent(bp
->pdev
, RXBD_RING_SIZE
,
729 rxr
->rx_pg_desc_ring
[j
],
730 rxr
->rx_pg_desc_mapping
[j
]);
731 rxr
->rx_pg_desc_ring
[j
] = NULL
;
733 vfree(rxr
->rx_pg_ring
);
734 rxr
->rx_pg_ring
= NULL
;
739 bnx2_alloc_tx_mem(struct bnx2
*bp
)
743 for (i
= 0; i
< bp
->num_tx_rings
; i
++) {
744 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
745 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
747 txr
->tx_buf_ring
= kzalloc(SW_TXBD_RING_SIZE
, GFP_KERNEL
);
748 if (txr
->tx_buf_ring
== NULL
)
752 pci_alloc_consistent(bp
->pdev
, TXBD_RING_SIZE
,
753 &txr
->tx_desc_mapping
);
754 if (txr
->tx_desc_ring
== NULL
)
761 bnx2_alloc_rx_mem(struct bnx2
*bp
)
765 for (i
= 0; i
< bp
->num_rx_rings
; i
++) {
766 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
767 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
771 vmalloc(SW_RXBD_RING_SIZE
* bp
->rx_max_ring
);
772 if (rxr
->rx_buf_ring
== NULL
)
775 memset(rxr
->rx_buf_ring
, 0,
776 SW_RXBD_RING_SIZE
* bp
->rx_max_ring
);
778 for (j
= 0; j
< bp
->rx_max_ring
; j
++) {
779 rxr
->rx_desc_ring
[j
] =
780 pci_alloc_consistent(bp
->pdev
, RXBD_RING_SIZE
,
781 &rxr
->rx_desc_mapping
[j
]);
782 if (rxr
->rx_desc_ring
[j
] == NULL
)
787 if (bp
->rx_pg_ring_size
) {
788 rxr
->rx_pg_ring
= vmalloc(SW_RXPG_RING_SIZE
*
790 if (rxr
->rx_pg_ring
== NULL
)
793 memset(rxr
->rx_pg_ring
, 0, SW_RXPG_RING_SIZE
*
797 for (j
= 0; j
< bp
->rx_max_pg_ring
; j
++) {
798 rxr
->rx_pg_desc_ring
[j
] =
799 pci_alloc_consistent(bp
->pdev
, RXBD_RING_SIZE
,
800 &rxr
->rx_pg_desc_mapping
[j
]);
801 if (rxr
->rx_pg_desc_ring
[j
] == NULL
)
810 bnx2_free_mem(struct bnx2
*bp
)
813 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
815 bnx2_free_tx_mem(bp
);
816 bnx2_free_rx_mem(bp
);
818 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
819 if (bp
->ctx_blk
[i
]) {
820 pci_free_consistent(bp
->pdev
, BCM_PAGE_SIZE
,
822 bp
->ctx_blk_mapping
[i
]);
823 bp
->ctx_blk
[i
] = NULL
;
826 if (bnapi
->status_blk
.msi
) {
827 pci_free_consistent(bp
->pdev
, bp
->status_stats_size
,
828 bnapi
->status_blk
.msi
,
829 bp
->status_blk_mapping
);
830 bnapi
->status_blk
.msi
= NULL
;
831 bp
->stats_blk
= NULL
;
836 bnx2_alloc_mem(struct bnx2
*bp
)
838 int i
, status_blk_size
, err
;
839 struct bnx2_napi
*bnapi
;
842 /* Combine status and statistics blocks into one allocation. */
843 status_blk_size
= L1_CACHE_ALIGN(sizeof(struct status_block
));
844 if (bp
->flags
& BNX2_FLAG_MSIX_CAP
)
845 status_blk_size
= L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC
*
846 BNX2_SBLK_MSIX_ALIGN_SIZE
);
847 bp
->status_stats_size
= status_blk_size
+
848 sizeof(struct statistics_block
);
850 status_blk
= pci_alloc_consistent(bp
->pdev
, bp
->status_stats_size
,
851 &bp
->status_blk_mapping
);
852 if (status_blk
== NULL
)
855 memset(status_blk
, 0, bp
->status_stats_size
);
857 bnapi
= &bp
->bnx2_napi
[0];
858 bnapi
->status_blk
.msi
= status_blk
;
859 bnapi
->hw_tx_cons_ptr
=
860 &bnapi
->status_blk
.msi
->status_tx_quick_consumer_index0
;
861 bnapi
->hw_rx_cons_ptr
=
862 &bnapi
->status_blk
.msi
->status_rx_quick_consumer_index0
;
863 if (bp
->flags
& BNX2_FLAG_MSIX_CAP
) {
864 for (i
= 1; i
< BNX2_MAX_MSIX_VEC
; i
++) {
865 struct status_block_msix
*sblk
;
867 bnapi
= &bp
->bnx2_napi
[i
];
869 sblk
= (void *) (status_blk
+
870 BNX2_SBLK_MSIX_ALIGN_SIZE
* i
);
871 bnapi
->status_blk
.msix
= sblk
;
872 bnapi
->hw_tx_cons_ptr
=
873 &sblk
->status_tx_quick_consumer_index
;
874 bnapi
->hw_rx_cons_ptr
=
875 &sblk
->status_rx_quick_consumer_index
;
876 bnapi
->int_num
= i
<< 24;
880 bp
->stats_blk
= status_blk
+ status_blk_size
;
882 bp
->stats_blk_mapping
= bp
->status_blk_mapping
+ status_blk_size
;
884 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
885 bp
->ctx_pages
= 0x2000 / BCM_PAGE_SIZE
;
886 if (bp
->ctx_pages
== 0)
888 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
889 bp
->ctx_blk
[i
] = pci_alloc_consistent(bp
->pdev
,
891 &bp
->ctx_blk_mapping
[i
]);
892 if (bp
->ctx_blk
[i
] == NULL
)
897 err
= bnx2_alloc_rx_mem(bp
);
901 err
= bnx2_alloc_tx_mem(bp
);
913 bnx2_report_fw_link(struct bnx2
*bp
)
915 u32 fw_link_status
= 0;
917 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
923 switch (bp
->line_speed
) {
925 if (bp
->duplex
== DUPLEX_HALF
)
926 fw_link_status
= BNX2_LINK_STATUS_10HALF
;
928 fw_link_status
= BNX2_LINK_STATUS_10FULL
;
931 if (bp
->duplex
== DUPLEX_HALF
)
932 fw_link_status
= BNX2_LINK_STATUS_100HALF
;
934 fw_link_status
= BNX2_LINK_STATUS_100FULL
;
937 if (bp
->duplex
== DUPLEX_HALF
)
938 fw_link_status
= BNX2_LINK_STATUS_1000HALF
;
940 fw_link_status
= BNX2_LINK_STATUS_1000FULL
;
943 if (bp
->duplex
== DUPLEX_HALF
)
944 fw_link_status
= BNX2_LINK_STATUS_2500HALF
;
946 fw_link_status
= BNX2_LINK_STATUS_2500FULL
;
950 fw_link_status
|= BNX2_LINK_STATUS_LINK_UP
;
953 fw_link_status
|= BNX2_LINK_STATUS_AN_ENABLED
;
955 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
956 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
958 if (!(bmsr
& BMSR_ANEGCOMPLETE
) ||
959 bp
->phy_flags
& BNX2_PHY_FLAG_PARALLEL_DETECT
)
960 fw_link_status
|= BNX2_LINK_STATUS_PARALLEL_DET
;
962 fw_link_status
|= BNX2_LINK_STATUS_AN_COMPLETE
;
966 fw_link_status
= BNX2_LINK_STATUS_LINK_DOWN
;
968 bnx2_shmem_wr(bp
, BNX2_LINK_STATUS
, fw_link_status
);
972 bnx2_xceiver_str(struct bnx2
*bp
)
974 return ((bp
->phy_port
== PORT_FIBRE
) ? "SerDes" :
975 ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) ? "Remote Copper" :
980 bnx2_report_link(struct bnx2
*bp
)
983 netif_carrier_on(bp
->dev
);
984 netdev_info(bp
->dev
, "NIC %s Link is Up, %d Mbps %s duplex",
985 bnx2_xceiver_str(bp
),
987 bp
->duplex
== DUPLEX_FULL
? "full" : "half");
990 if (bp
->flow_ctrl
& FLOW_CTRL_RX
) {
991 pr_cont(", receive ");
992 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
993 pr_cont("& transmit ");
996 pr_cont(", transmit ");
998 pr_cont("flow control ON");
1002 netif_carrier_off(bp
->dev
);
1003 netdev_err(bp
->dev
, "NIC %s Link is Down\n",
1004 bnx2_xceiver_str(bp
));
1007 bnx2_report_fw_link(bp
);
1011 bnx2_resolve_flow_ctrl(struct bnx2
*bp
)
1013 u32 local_adv
, remote_adv
;
1016 if ((bp
->autoneg
& (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) !=
1017 (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) {
1019 if (bp
->duplex
== DUPLEX_FULL
) {
1020 bp
->flow_ctrl
= bp
->req_flow_ctrl
;
1025 if (bp
->duplex
!= DUPLEX_FULL
) {
1029 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1030 (CHIP_NUM(bp
) == CHIP_NUM_5708
)) {
1033 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
1034 if (val
& BCM5708S_1000X_STAT1_TX_PAUSE
)
1035 bp
->flow_ctrl
|= FLOW_CTRL_TX
;
1036 if (val
& BCM5708S_1000X_STAT1_RX_PAUSE
)
1037 bp
->flow_ctrl
|= FLOW_CTRL_RX
;
1041 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
1042 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
1044 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1045 u32 new_local_adv
= 0;
1046 u32 new_remote_adv
= 0;
1048 if (local_adv
& ADVERTISE_1000XPAUSE
)
1049 new_local_adv
|= ADVERTISE_PAUSE_CAP
;
1050 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
1051 new_local_adv
|= ADVERTISE_PAUSE_ASYM
;
1052 if (remote_adv
& ADVERTISE_1000XPAUSE
)
1053 new_remote_adv
|= ADVERTISE_PAUSE_CAP
;
1054 if (remote_adv
& ADVERTISE_1000XPSE_ASYM
)
1055 new_remote_adv
|= ADVERTISE_PAUSE_ASYM
;
1057 local_adv
= new_local_adv
;
1058 remote_adv
= new_remote_adv
;
1061 /* See Table 28B-3 of 802.3ab-1999 spec. */
1062 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
1063 if(local_adv
& ADVERTISE_PAUSE_ASYM
) {
1064 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
1065 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1067 else if (remote_adv
& ADVERTISE_PAUSE_ASYM
) {
1068 bp
->flow_ctrl
= FLOW_CTRL_RX
;
1072 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
1073 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1077 else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1078 if ((remote_adv
& ADVERTISE_PAUSE_CAP
) &&
1079 (remote_adv
& ADVERTISE_PAUSE_ASYM
)) {
1081 bp
->flow_ctrl
= FLOW_CTRL_TX
;
1087 bnx2_5709s_linkup(struct bnx2
*bp
)
1093 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_GP_STATUS
);
1094 bnx2_read_phy(bp
, MII_BNX2_GP_TOP_AN_STATUS1
, &val
);
1095 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1097 if ((bp
->autoneg
& AUTONEG_SPEED
) == 0) {
1098 bp
->line_speed
= bp
->req_line_speed
;
1099 bp
->duplex
= bp
->req_duplex
;
1102 speed
= val
& MII_BNX2_GP_TOP_AN_SPEED_MSK
;
1104 case MII_BNX2_GP_TOP_AN_SPEED_10
:
1105 bp
->line_speed
= SPEED_10
;
1107 case MII_BNX2_GP_TOP_AN_SPEED_100
:
1108 bp
->line_speed
= SPEED_100
;
1110 case MII_BNX2_GP_TOP_AN_SPEED_1G
:
1111 case MII_BNX2_GP_TOP_AN_SPEED_1GKV
:
1112 bp
->line_speed
= SPEED_1000
;
1114 case MII_BNX2_GP_TOP_AN_SPEED_2_5G
:
1115 bp
->line_speed
= SPEED_2500
;
1118 if (val
& MII_BNX2_GP_TOP_AN_FD
)
1119 bp
->duplex
= DUPLEX_FULL
;
1121 bp
->duplex
= DUPLEX_HALF
;
1126 bnx2_5708s_linkup(struct bnx2
*bp
)
1131 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
1132 switch (val
& BCM5708S_1000X_STAT1_SPEED_MASK
) {
1133 case BCM5708S_1000X_STAT1_SPEED_10
:
1134 bp
->line_speed
= SPEED_10
;
1136 case BCM5708S_1000X_STAT1_SPEED_100
:
1137 bp
->line_speed
= SPEED_100
;
1139 case BCM5708S_1000X_STAT1_SPEED_1G
:
1140 bp
->line_speed
= SPEED_1000
;
1142 case BCM5708S_1000X_STAT1_SPEED_2G5
:
1143 bp
->line_speed
= SPEED_2500
;
1146 if (val
& BCM5708S_1000X_STAT1_FD
)
1147 bp
->duplex
= DUPLEX_FULL
;
1149 bp
->duplex
= DUPLEX_HALF
;
1155 bnx2_5706s_linkup(struct bnx2
*bp
)
1157 u32 bmcr
, local_adv
, remote_adv
, common
;
1160 bp
->line_speed
= SPEED_1000
;
1162 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1163 if (bmcr
& BMCR_FULLDPLX
) {
1164 bp
->duplex
= DUPLEX_FULL
;
1167 bp
->duplex
= DUPLEX_HALF
;
1170 if (!(bmcr
& BMCR_ANENABLE
)) {
1174 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
1175 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
1177 common
= local_adv
& remote_adv
;
1178 if (common
& (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
)) {
1180 if (common
& ADVERTISE_1000XFULL
) {
1181 bp
->duplex
= DUPLEX_FULL
;
1184 bp
->duplex
= DUPLEX_HALF
;
1192 bnx2_copper_linkup(struct bnx2
*bp
)
1196 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1197 if (bmcr
& BMCR_ANENABLE
) {
1198 u32 local_adv
, remote_adv
, common
;
1200 bnx2_read_phy(bp
, MII_CTRL1000
, &local_adv
);
1201 bnx2_read_phy(bp
, MII_STAT1000
, &remote_adv
);
1203 common
= local_adv
& (remote_adv
>> 2);
1204 if (common
& ADVERTISE_1000FULL
) {
1205 bp
->line_speed
= SPEED_1000
;
1206 bp
->duplex
= DUPLEX_FULL
;
1208 else if (common
& ADVERTISE_1000HALF
) {
1209 bp
->line_speed
= SPEED_1000
;
1210 bp
->duplex
= DUPLEX_HALF
;
1213 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
1214 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
1216 common
= local_adv
& remote_adv
;
1217 if (common
& ADVERTISE_100FULL
) {
1218 bp
->line_speed
= SPEED_100
;
1219 bp
->duplex
= DUPLEX_FULL
;
1221 else if (common
& ADVERTISE_100HALF
) {
1222 bp
->line_speed
= SPEED_100
;
1223 bp
->duplex
= DUPLEX_HALF
;
1225 else if (common
& ADVERTISE_10FULL
) {
1226 bp
->line_speed
= SPEED_10
;
1227 bp
->duplex
= DUPLEX_FULL
;
1229 else if (common
& ADVERTISE_10HALF
) {
1230 bp
->line_speed
= SPEED_10
;
1231 bp
->duplex
= DUPLEX_HALF
;
1240 if (bmcr
& BMCR_SPEED100
) {
1241 bp
->line_speed
= SPEED_100
;
1244 bp
->line_speed
= SPEED_10
;
1246 if (bmcr
& BMCR_FULLDPLX
) {
1247 bp
->duplex
= DUPLEX_FULL
;
1250 bp
->duplex
= DUPLEX_HALF
;
1258 bnx2_init_rx_context(struct bnx2
*bp
, u32 cid
)
1260 u32 val
, rx_cid_addr
= GET_CID_ADDR(cid
);
1262 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
;
1263 val
|= BNX2_L2CTX_CTX_TYPE_SIZE_L2
;
1266 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1267 u32 lo_water
, hi_water
;
1269 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
1270 lo_water
= BNX2_L2CTX_LO_WATER_MARK_DEFAULT
;
1272 lo_water
= BNX2_L2CTX_LO_WATER_MARK_DIS
;
1273 if (lo_water
>= bp
->rx_ring_size
)
1276 hi_water
= min_t(int, bp
->rx_ring_size
/ 4, lo_water
+ 16);
1278 if (hi_water
<= lo_water
)
1281 hi_water
/= BNX2_L2CTX_HI_WATER_MARK_SCALE
;
1282 lo_water
/= BNX2_L2CTX_LO_WATER_MARK_SCALE
;
1286 else if (hi_water
== 0)
1288 val
|= lo_water
| (hi_water
<< BNX2_L2CTX_HI_WATER_MARK_SHIFT
);
1290 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
1294 bnx2_init_all_rx_contexts(struct bnx2
*bp
)
1299 for (i
= 0, cid
= RX_CID
; i
< bp
->num_rx_rings
; i
++, cid
++) {
1302 bnx2_init_rx_context(bp
, cid
);
1307 bnx2_set_mac_link(struct bnx2
*bp
)
1311 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x2620);
1312 if (bp
->link_up
&& (bp
->line_speed
== SPEED_1000
) &&
1313 (bp
->duplex
== DUPLEX_HALF
)) {
1314 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x26ff);
1317 /* Configure the EMAC mode register. */
1318 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
1320 val
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
1321 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
1322 BNX2_EMAC_MODE_25G_MODE
);
1325 switch (bp
->line_speed
) {
1327 if (CHIP_NUM(bp
) != CHIP_NUM_5706
) {
1328 val
|= BNX2_EMAC_MODE_PORT_MII_10M
;
1333 val
|= BNX2_EMAC_MODE_PORT_MII
;
1336 val
|= BNX2_EMAC_MODE_25G_MODE
;
1339 val
|= BNX2_EMAC_MODE_PORT_GMII
;
1344 val
|= BNX2_EMAC_MODE_PORT_GMII
;
1347 /* Set the MAC to operate in the appropriate duplex mode. */
1348 if (bp
->duplex
== DUPLEX_HALF
)
1349 val
|= BNX2_EMAC_MODE_HALF_DUPLEX
;
1350 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
1352 /* Enable/disable rx PAUSE. */
1353 bp
->rx_mode
&= ~BNX2_EMAC_RX_MODE_FLOW_EN
;
1355 if (bp
->flow_ctrl
& FLOW_CTRL_RX
)
1356 bp
->rx_mode
|= BNX2_EMAC_RX_MODE_FLOW_EN
;
1357 REG_WR(bp
, BNX2_EMAC_RX_MODE
, bp
->rx_mode
);
1359 /* Enable/disable tx PAUSE. */
1360 val
= REG_RD(bp
, BNX2_EMAC_TX_MODE
);
1361 val
&= ~BNX2_EMAC_TX_MODE_FLOW_EN
;
1363 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
1364 val
|= BNX2_EMAC_TX_MODE_FLOW_EN
;
1365 REG_WR(bp
, BNX2_EMAC_TX_MODE
, val
);
1367 /* Acknowledge the interrupt. */
1368 REG_WR(bp
, BNX2_EMAC_STATUS
, BNX2_EMAC_STATUS_LINK_CHANGE
);
1370 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1371 bnx2_init_all_rx_contexts(bp
);
1375 bnx2_enable_bmsr1(struct bnx2
*bp
)
1377 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1378 (CHIP_NUM(bp
) == CHIP_NUM_5709
))
1379 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1380 MII_BNX2_BLK_ADDR_GP_STATUS
);
1384 bnx2_disable_bmsr1(struct bnx2
*bp
)
1386 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1387 (CHIP_NUM(bp
) == CHIP_NUM_5709
))
1388 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1389 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1393 bnx2_test_and_enable_2g5(struct bnx2
*bp
)
1398 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1401 if (bp
->autoneg
& AUTONEG_SPEED
)
1402 bp
->advertising
|= ADVERTISED_2500baseX_Full
;
1404 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1405 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
1407 bnx2_read_phy(bp
, bp
->mii_up1
, &up1
);
1408 if (!(up1
& BCM5708S_UP1_2G5
)) {
1409 up1
|= BCM5708S_UP1_2G5
;
1410 bnx2_write_phy(bp
, bp
->mii_up1
, up1
);
1414 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1415 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1416 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1422 bnx2_test_and_disable_2g5(struct bnx2
*bp
)
1427 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1430 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1431 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
1433 bnx2_read_phy(bp
, bp
->mii_up1
, &up1
);
1434 if (up1
& BCM5708S_UP1_2G5
) {
1435 up1
&= ~BCM5708S_UP1_2G5
;
1436 bnx2_write_phy(bp
, bp
->mii_up1
, up1
);
1440 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1441 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1442 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1448 bnx2_enable_forced_2g5(struct bnx2
*bp
)
1452 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1455 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1458 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1459 MII_BNX2_BLK_ADDR_SERDES_DIG
);
1460 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, &val
);
1461 val
&= ~MII_BNX2_SD_MISC1_FORCE_MSK
;
1462 val
|= MII_BNX2_SD_MISC1_FORCE
| MII_BNX2_SD_MISC1_FORCE_2_5G
;
1463 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, val
);
1465 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1466 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1467 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1469 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1470 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1471 bmcr
|= BCM5708S_BMCR_FORCE_2500
;
1476 if (bp
->autoneg
& AUTONEG_SPEED
) {
1477 bmcr
&= ~BMCR_ANENABLE
;
1478 if (bp
->req_duplex
== DUPLEX_FULL
)
1479 bmcr
|= BMCR_FULLDPLX
;
1481 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
1485 bnx2_disable_forced_2g5(struct bnx2
*bp
)
1489 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1492 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1495 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1496 MII_BNX2_BLK_ADDR_SERDES_DIG
);
1497 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, &val
);
1498 val
&= ~MII_BNX2_SD_MISC1_FORCE
;
1499 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, val
);
1501 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1502 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1503 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1505 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1506 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1507 bmcr
&= ~BCM5708S_BMCR_FORCE_2500
;
1512 if (bp
->autoneg
& AUTONEG_SPEED
)
1513 bmcr
|= BMCR_SPEED1000
| BMCR_ANENABLE
| BMCR_ANRESTART
;
1514 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
1518 bnx2_5706s_force_link_dn(struct bnx2
*bp
, int start
)
1522 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
, MII_EXPAND_SERDES_CTL
);
1523 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &val
);
1525 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
& 0xff0f);
1527 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
| 0xc0);
1531 bnx2_set_link(struct bnx2
*bp
)
1536 if (bp
->loopback
== MAC_LOOPBACK
|| bp
->loopback
== PHY_LOOPBACK
) {
1541 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
1544 link_up
= bp
->link_up
;
1546 bnx2_enable_bmsr1(bp
);
1547 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
1548 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
1549 bnx2_disable_bmsr1(bp
);
1551 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1552 (CHIP_NUM(bp
) == CHIP_NUM_5706
)) {
1555 if (bp
->phy_flags
& BNX2_PHY_FLAG_FORCED_DOWN
) {
1556 bnx2_5706s_force_link_dn(bp
, 0);
1557 bp
->phy_flags
&= ~BNX2_PHY_FLAG_FORCED_DOWN
;
1559 val
= REG_RD(bp
, BNX2_EMAC_STATUS
);
1561 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_AN_DBG
);
1562 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
1563 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
1565 if ((val
& BNX2_EMAC_STATUS_LINK
) &&
1566 !(an_dbg
& MISC_SHDW_AN_DBG_NOSYNC
))
1567 bmsr
|= BMSR_LSTATUS
;
1569 bmsr
&= ~BMSR_LSTATUS
;
1572 if (bmsr
& BMSR_LSTATUS
) {
1575 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1576 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1577 bnx2_5706s_linkup(bp
);
1578 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
1579 bnx2_5708s_linkup(bp
);
1580 else if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1581 bnx2_5709s_linkup(bp
);
1584 bnx2_copper_linkup(bp
);
1586 bnx2_resolve_flow_ctrl(bp
);
1589 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1590 (bp
->autoneg
& AUTONEG_SPEED
))
1591 bnx2_disable_forced_2g5(bp
);
1593 if (bp
->phy_flags
& BNX2_PHY_FLAG_PARALLEL_DETECT
) {
1596 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1597 bmcr
|= BMCR_ANENABLE
;
1598 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
1600 bp
->phy_flags
&= ~BNX2_PHY_FLAG_PARALLEL_DETECT
;
1605 if (bp
->link_up
!= link_up
) {
1606 bnx2_report_link(bp
);
1609 bnx2_set_mac_link(bp
);
1615 bnx2_reset_phy(struct bnx2
*bp
)
1620 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_RESET
);
1622 #define PHY_RESET_MAX_WAIT 100
1623 for (i
= 0; i
< PHY_RESET_MAX_WAIT
; i
++) {
1626 bnx2_read_phy(bp
, bp
->mii_bmcr
, ®
);
1627 if (!(reg
& BMCR_RESET
)) {
1632 if (i
== PHY_RESET_MAX_WAIT
) {
1639 bnx2_phy_get_pause_adv(struct bnx2
*bp
)
1643 if ((bp
->req_flow_ctrl
& (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) ==
1644 (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) {
1646 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1647 adv
= ADVERTISE_1000XPAUSE
;
1650 adv
= ADVERTISE_PAUSE_CAP
;
1653 else if (bp
->req_flow_ctrl
& FLOW_CTRL_TX
) {
1654 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1655 adv
= ADVERTISE_1000XPSE_ASYM
;
1658 adv
= ADVERTISE_PAUSE_ASYM
;
1661 else if (bp
->req_flow_ctrl
& FLOW_CTRL_RX
) {
1662 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1663 adv
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1666 adv
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1672 static int bnx2_fw_sync(struct bnx2
*, u32
, int, int);
1675 bnx2_setup_remote_phy(struct bnx2
*bp
, u8 port
)
1676 __releases(&bp
->phy_lock
)
1677 __acquires(&bp
->phy_lock
)
1679 u32 speed_arg
= 0, pause_adv
;
1681 pause_adv
= bnx2_phy_get_pause_adv(bp
);
1683 if (bp
->autoneg
& AUTONEG_SPEED
) {
1684 speed_arg
|= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG
;
1685 if (bp
->advertising
& ADVERTISED_10baseT_Half
)
1686 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_10HALF
;
1687 if (bp
->advertising
& ADVERTISED_10baseT_Full
)
1688 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_10FULL
;
1689 if (bp
->advertising
& ADVERTISED_100baseT_Half
)
1690 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_100HALF
;
1691 if (bp
->advertising
& ADVERTISED_100baseT_Full
)
1692 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_100FULL
;
1693 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1694 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_1GFULL
;
1695 if (bp
->advertising
& ADVERTISED_2500baseX_Full
)
1696 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
;
1698 if (bp
->req_line_speed
== SPEED_2500
)
1699 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
;
1700 else if (bp
->req_line_speed
== SPEED_1000
)
1701 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_1GFULL
;
1702 else if (bp
->req_line_speed
== SPEED_100
) {
1703 if (bp
->req_duplex
== DUPLEX_FULL
)
1704 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_100FULL
;
1706 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_100HALF
;
1707 } else if (bp
->req_line_speed
== SPEED_10
) {
1708 if (bp
->req_duplex
== DUPLEX_FULL
)
1709 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_10FULL
;
1711 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_10HALF
;
1715 if (pause_adv
& (ADVERTISE_1000XPAUSE
| ADVERTISE_PAUSE_CAP
))
1716 speed_arg
|= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE
;
1717 if (pause_adv
& (ADVERTISE_1000XPSE_ASYM
| ADVERTISE_PAUSE_ASYM
))
1718 speed_arg
|= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE
;
1720 if (port
== PORT_TP
)
1721 speed_arg
|= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE
|
1722 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED
;
1724 bnx2_shmem_wr(bp
, BNX2_DRV_MB_ARG0
, speed_arg
);
1726 spin_unlock_bh(&bp
->phy_lock
);
1727 bnx2_fw_sync(bp
, BNX2_DRV_MSG_CODE_CMD_SET_LINK
, 1, 0);
1728 spin_lock_bh(&bp
->phy_lock
);
1734 bnx2_setup_serdes_phy(struct bnx2
*bp
, u8 port
)
1735 __releases(&bp
->phy_lock
)
1736 __acquires(&bp
->phy_lock
)
1741 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
1742 return (bnx2_setup_remote_phy(bp
, port
));
1744 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
1746 int force_link_down
= 0;
1748 if (bp
->req_line_speed
== SPEED_2500
) {
1749 if (!bnx2_test_and_enable_2g5(bp
))
1750 force_link_down
= 1;
1751 } else if (bp
->req_line_speed
== SPEED_1000
) {
1752 if (bnx2_test_and_disable_2g5(bp
))
1753 force_link_down
= 1;
1755 bnx2_read_phy(bp
, bp
->mii_adv
, &adv
);
1756 adv
&= ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
);
1758 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1759 new_bmcr
= bmcr
& ~BMCR_ANENABLE
;
1760 new_bmcr
|= BMCR_SPEED1000
;
1762 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1763 if (bp
->req_line_speed
== SPEED_2500
)
1764 bnx2_enable_forced_2g5(bp
);
1765 else if (bp
->req_line_speed
== SPEED_1000
) {
1766 bnx2_disable_forced_2g5(bp
);
1767 new_bmcr
&= ~0x2000;
1770 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1771 if (bp
->req_line_speed
== SPEED_2500
)
1772 new_bmcr
|= BCM5708S_BMCR_FORCE_2500
;
1774 new_bmcr
= bmcr
& ~BCM5708S_BMCR_FORCE_2500
;
1777 if (bp
->req_duplex
== DUPLEX_FULL
) {
1778 adv
|= ADVERTISE_1000XFULL
;
1779 new_bmcr
|= BMCR_FULLDPLX
;
1782 adv
|= ADVERTISE_1000XHALF
;
1783 new_bmcr
&= ~BMCR_FULLDPLX
;
1785 if ((new_bmcr
!= bmcr
) || (force_link_down
)) {
1786 /* Force a link down visible on the other side */
1788 bnx2_write_phy(bp
, bp
->mii_adv
, adv
&
1789 ~(ADVERTISE_1000XFULL
|
1790 ADVERTISE_1000XHALF
));
1791 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
|
1792 BMCR_ANRESTART
| BMCR_ANENABLE
);
1795 netif_carrier_off(bp
->dev
);
1796 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
1797 bnx2_report_link(bp
);
1799 bnx2_write_phy(bp
, bp
->mii_adv
, adv
);
1800 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
1802 bnx2_resolve_flow_ctrl(bp
);
1803 bnx2_set_mac_link(bp
);
1808 bnx2_test_and_enable_2g5(bp
);
1810 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1811 new_adv
|= ADVERTISE_1000XFULL
;
1813 new_adv
|= bnx2_phy_get_pause_adv(bp
);
1815 bnx2_read_phy(bp
, bp
->mii_adv
, &adv
);
1816 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1818 bp
->serdes_an_pending
= 0;
1819 if ((adv
!= new_adv
) || ((bmcr
& BMCR_ANENABLE
) == 0)) {
1820 /* Force a link down visible on the other side */
1822 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
1823 spin_unlock_bh(&bp
->phy_lock
);
1825 spin_lock_bh(&bp
->phy_lock
);
1828 bnx2_write_phy(bp
, bp
->mii_adv
, new_adv
);
1829 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
| BMCR_ANRESTART
|
1831 /* Speed up link-up time when the link partner
1832 * does not autonegotiate which is very common
1833 * in blade servers. Some blade servers use
1834 * IPMI for kerboard input and it's important
1835 * to minimize link disruptions. Autoneg. involves
1836 * exchanging base pages plus 3 next pages and
1837 * normally completes in about 120 msec.
1839 bp
->current_interval
= BNX2_SERDES_AN_TIMEOUT
;
1840 bp
->serdes_an_pending
= 1;
1841 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1843 bnx2_resolve_flow_ctrl(bp
);
1844 bnx2_set_mac_link(bp
);
1850 #define ETHTOOL_ALL_FIBRE_SPEED \
1851 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1852 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1853 (ADVERTISED_1000baseT_Full)
1855 #define ETHTOOL_ALL_COPPER_SPEED \
1856 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1857 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1858 ADVERTISED_1000baseT_Full)
1860 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1861 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1863 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1866 bnx2_set_default_remote_link(struct bnx2
*bp
)
1870 if (bp
->phy_port
== PORT_TP
)
1871 link
= bnx2_shmem_rd(bp
, BNX2_RPHY_COPPER_LINK
);
1873 link
= bnx2_shmem_rd(bp
, BNX2_RPHY_SERDES_LINK
);
1875 if (link
& BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG
) {
1876 bp
->req_line_speed
= 0;
1877 bp
->autoneg
|= AUTONEG_SPEED
;
1878 bp
->advertising
= ADVERTISED_Autoneg
;
1879 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10HALF
)
1880 bp
->advertising
|= ADVERTISED_10baseT_Half
;
1881 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10FULL
)
1882 bp
->advertising
|= ADVERTISED_10baseT_Full
;
1883 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100HALF
)
1884 bp
->advertising
|= ADVERTISED_100baseT_Half
;
1885 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100FULL
)
1886 bp
->advertising
|= ADVERTISED_100baseT_Full
;
1887 if (link
& BNX2_NETLINK_SET_LINK_SPEED_1GFULL
)
1888 bp
->advertising
|= ADVERTISED_1000baseT_Full
;
1889 if (link
& BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
)
1890 bp
->advertising
|= ADVERTISED_2500baseX_Full
;
1893 bp
->advertising
= 0;
1894 bp
->req_duplex
= DUPLEX_FULL
;
1895 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10
) {
1896 bp
->req_line_speed
= SPEED_10
;
1897 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10HALF
)
1898 bp
->req_duplex
= DUPLEX_HALF
;
1900 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100
) {
1901 bp
->req_line_speed
= SPEED_100
;
1902 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100HALF
)
1903 bp
->req_duplex
= DUPLEX_HALF
;
1905 if (link
& BNX2_NETLINK_SET_LINK_SPEED_1GFULL
)
1906 bp
->req_line_speed
= SPEED_1000
;
1907 if (link
& BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
)
1908 bp
->req_line_speed
= SPEED_2500
;
1913 bnx2_set_default_link(struct bnx2
*bp
)
1915 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
1916 bnx2_set_default_remote_link(bp
);
1920 bp
->autoneg
= AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
;
1921 bp
->req_line_speed
= 0;
1922 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1925 bp
->advertising
= ETHTOOL_ALL_FIBRE_SPEED
| ADVERTISED_Autoneg
;
1927 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_CONFIG
);
1928 reg
&= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK
;
1929 if (reg
== BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G
) {
1931 bp
->req_line_speed
= bp
->line_speed
= SPEED_1000
;
1932 bp
->req_duplex
= DUPLEX_FULL
;
1935 bp
->advertising
= ETHTOOL_ALL_COPPER_SPEED
| ADVERTISED_Autoneg
;
1939 bnx2_send_heart_beat(struct bnx2
*bp
)
1944 spin_lock(&bp
->indirect_lock
);
1945 msg
= (u32
) (++bp
->fw_drv_pulse_wr_seq
& BNX2_DRV_PULSE_SEQ_MASK
);
1946 addr
= bp
->shmem_base
+ BNX2_DRV_PULSE_MB
;
1947 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, addr
);
1948 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW
, msg
);
1949 spin_unlock(&bp
->indirect_lock
);
1953 bnx2_remote_phy_event(struct bnx2
*bp
)
1956 u8 link_up
= bp
->link_up
;
1959 msg
= bnx2_shmem_rd(bp
, BNX2_LINK_STATUS
);
1961 if (msg
& BNX2_LINK_STATUS_HEART_BEAT_EXPIRED
)
1962 bnx2_send_heart_beat(bp
);
1964 msg
&= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED
;
1966 if ((msg
& BNX2_LINK_STATUS_LINK_UP
) == BNX2_LINK_STATUS_LINK_DOWN
)
1972 speed
= msg
& BNX2_LINK_STATUS_SPEED_MASK
;
1973 bp
->duplex
= DUPLEX_FULL
;
1975 case BNX2_LINK_STATUS_10HALF
:
1976 bp
->duplex
= DUPLEX_HALF
;
1977 case BNX2_LINK_STATUS_10FULL
:
1978 bp
->line_speed
= SPEED_10
;
1980 case BNX2_LINK_STATUS_100HALF
:
1981 bp
->duplex
= DUPLEX_HALF
;
1982 case BNX2_LINK_STATUS_100BASE_T4
:
1983 case BNX2_LINK_STATUS_100FULL
:
1984 bp
->line_speed
= SPEED_100
;
1986 case BNX2_LINK_STATUS_1000HALF
:
1987 bp
->duplex
= DUPLEX_HALF
;
1988 case BNX2_LINK_STATUS_1000FULL
:
1989 bp
->line_speed
= SPEED_1000
;
1991 case BNX2_LINK_STATUS_2500HALF
:
1992 bp
->duplex
= DUPLEX_HALF
;
1993 case BNX2_LINK_STATUS_2500FULL
:
1994 bp
->line_speed
= SPEED_2500
;
2002 if ((bp
->autoneg
& (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) !=
2003 (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) {
2004 if (bp
->duplex
== DUPLEX_FULL
)
2005 bp
->flow_ctrl
= bp
->req_flow_ctrl
;
2007 if (msg
& BNX2_LINK_STATUS_TX_FC_ENABLED
)
2008 bp
->flow_ctrl
|= FLOW_CTRL_TX
;
2009 if (msg
& BNX2_LINK_STATUS_RX_FC_ENABLED
)
2010 bp
->flow_ctrl
|= FLOW_CTRL_RX
;
2013 old_port
= bp
->phy_port
;
2014 if (msg
& BNX2_LINK_STATUS_SERDES_LINK
)
2015 bp
->phy_port
= PORT_FIBRE
;
2017 bp
->phy_port
= PORT_TP
;
2019 if (old_port
!= bp
->phy_port
)
2020 bnx2_set_default_link(bp
);
2023 if (bp
->link_up
!= link_up
)
2024 bnx2_report_link(bp
);
2026 bnx2_set_mac_link(bp
);
2030 bnx2_set_remote_link(struct bnx2
*bp
)
2034 evt_code
= bnx2_shmem_rd(bp
, BNX2_FW_EVT_CODE_MB
);
2036 case BNX2_FW_EVT_CODE_LINK_EVENT
:
2037 bnx2_remote_phy_event(bp
);
2039 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT
:
2041 bnx2_send_heart_beat(bp
);
2048 bnx2_setup_copper_phy(struct bnx2
*bp
)
2049 __releases(&bp
->phy_lock
)
2050 __acquires(&bp
->phy_lock
)
2055 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
2057 if (bp
->autoneg
& AUTONEG_SPEED
) {
2058 u32 adv_reg
, adv1000_reg
;
2059 u32 new_adv_reg
= 0;
2060 u32 new_adv1000_reg
= 0;
2062 bnx2_read_phy(bp
, bp
->mii_adv
, &adv_reg
);
2063 adv_reg
&= (PHY_ALL_10_100_SPEED
| ADVERTISE_PAUSE_CAP
|
2064 ADVERTISE_PAUSE_ASYM
);
2066 bnx2_read_phy(bp
, MII_CTRL1000
, &adv1000_reg
);
2067 adv1000_reg
&= PHY_ALL_1000_SPEED
;
2069 if (bp
->advertising
& ADVERTISED_10baseT_Half
)
2070 new_adv_reg
|= ADVERTISE_10HALF
;
2071 if (bp
->advertising
& ADVERTISED_10baseT_Full
)
2072 new_adv_reg
|= ADVERTISE_10FULL
;
2073 if (bp
->advertising
& ADVERTISED_100baseT_Half
)
2074 new_adv_reg
|= ADVERTISE_100HALF
;
2075 if (bp
->advertising
& ADVERTISED_100baseT_Full
)
2076 new_adv_reg
|= ADVERTISE_100FULL
;
2077 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
2078 new_adv1000_reg
|= ADVERTISE_1000FULL
;
2080 new_adv_reg
|= ADVERTISE_CSMA
;
2082 new_adv_reg
|= bnx2_phy_get_pause_adv(bp
);
2084 if ((adv1000_reg
!= new_adv1000_reg
) ||
2085 (adv_reg
!= new_adv_reg
) ||
2086 ((bmcr
& BMCR_ANENABLE
) == 0)) {
2088 bnx2_write_phy(bp
, bp
->mii_adv
, new_adv_reg
);
2089 bnx2_write_phy(bp
, MII_CTRL1000
, new_adv1000_reg
);
2090 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_ANRESTART
|
2093 else if (bp
->link_up
) {
2094 /* Flow ctrl may have changed from auto to forced */
2095 /* or vice-versa. */
2097 bnx2_resolve_flow_ctrl(bp
);
2098 bnx2_set_mac_link(bp
);
2104 if (bp
->req_line_speed
== SPEED_100
) {
2105 new_bmcr
|= BMCR_SPEED100
;
2107 if (bp
->req_duplex
== DUPLEX_FULL
) {
2108 new_bmcr
|= BMCR_FULLDPLX
;
2110 if (new_bmcr
!= bmcr
) {
2113 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2114 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2116 if (bmsr
& BMSR_LSTATUS
) {
2117 /* Force link down */
2118 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
2119 spin_unlock_bh(&bp
->phy_lock
);
2121 spin_lock_bh(&bp
->phy_lock
);
2123 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2124 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2127 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
2129 /* Normally, the new speed is setup after the link has
2130 * gone down and up again. In some cases, link will not go
2131 * down so we need to set up the new speed here.
2133 if (bmsr
& BMSR_LSTATUS
) {
2134 bp
->line_speed
= bp
->req_line_speed
;
2135 bp
->duplex
= bp
->req_duplex
;
2136 bnx2_resolve_flow_ctrl(bp
);
2137 bnx2_set_mac_link(bp
);
2140 bnx2_resolve_flow_ctrl(bp
);
2141 bnx2_set_mac_link(bp
);
2147 bnx2_setup_phy(struct bnx2
*bp
, u8 port
)
2148 __releases(&bp
->phy_lock
)
2149 __acquires(&bp
->phy_lock
)
2151 if (bp
->loopback
== MAC_LOOPBACK
)
2154 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
2155 return (bnx2_setup_serdes_phy(bp
, port
));
2158 return (bnx2_setup_copper_phy(bp
));
2163 bnx2_init_5709s_phy(struct bnx2
*bp
, int reset_phy
)
2167 bp
->mii_bmcr
= MII_BMCR
+ 0x10;
2168 bp
->mii_bmsr
= MII_BMSR
+ 0x10;
2169 bp
->mii_bmsr1
= MII_BNX2_GP_TOP_AN_STATUS1
;
2170 bp
->mii_adv
= MII_ADVERTISE
+ 0x10;
2171 bp
->mii_lpa
= MII_LPA
+ 0x10;
2172 bp
->mii_up1
= MII_BNX2_OVER1G_UP1
;
2174 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_AER
);
2175 bnx2_write_phy(bp
, MII_BNX2_AER_AER
, MII_BNX2_AER_AER_AN_MMD
);
2177 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
2181 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_SERDES_DIG
);
2183 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_1000XCTL1
, &val
);
2184 val
&= ~MII_BNX2_SD_1000XCTL1_AUTODET
;
2185 val
|= MII_BNX2_SD_1000XCTL1_FIBER
;
2186 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_1000XCTL1
, val
);
2188 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
2189 bnx2_read_phy(bp
, MII_BNX2_OVER1G_UP1
, &val
);
2190 if (bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
)
2191 val
|= BCM5708S_UP1_2G5
;
2193 val
&= ~BCM5708S_UP1_2G5
;
2194 bnx2_write_phy(bp
, MII_BNX2_OVER1G_UP1
, val
);
2196 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_BAM_NXTPG
);
2197 bnx2_read_phy(bp
, MII_BNX2_BAM_NXTPG_CTL
, &val
);
2198 val
|= MII_BNX2_NXTPG_CTL_T2
| MII_BNX2_NXTPG_CTL_BAM
;
2199 bnx2_write_phy(bp
, MII_BNX2_BAM_NXTPG_CTL
, val
);
2201 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_CL73_USERB0
);
2203 val
= MII_BNX2_CL73_BAM_EN
| MII_BNX2_CL73_BAM_STA_MGR_EN
|
2204 MII_BNX2_CL73_BAM_NP_AFT_BP_EN
;
2205 bnx2_write_phy(bp
, MII_BNX2_CL73_BAM_CTL1
, val
);
2207 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
2213 bnx2_init_5708s_phy(struct bnx2
*bp
, int reset_phy
)
2220 bp
->mii_up1
= BCM5708S_UP1
;
2222 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG3
);
2223 bnx2_write_phy(bp
, BCM5708S_DIG_3_0
, BCM5708S_DIG_3_0_USE_IEEE
);
2224 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
2226 bnx2_read_phy(bp
, BCM5708S_1000X_CTL1
, &val
);
2227 val
|= BCM5708S_1000X_CTL1_FIBER_MODE
| BCM5708S_1000X_CTL1_AUTODET_EN
;
2228 bnx2_write_phy(bp
, BCM5708S_1000X_CTL1
, val
);
2230 bnx2_read_phy(bp
, BCM5708S_1000X_CTL2
, &val
);
2231 val
|= BCM5708S_1000X_CTL2_PLLEL_DET_EN
;
2232 bnx2_write_phy(bp
, BCM5708S_1000X_CTL2
, val
);
2234 if (bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
) {
2235 bnx2_read_phy(bp
, BCM5708S_UP1
, &val
);
2236 val
|= BCM5708S_UP1_2G5
;
2237 bnx2_write_phy(bp
, BCM5708S_UP1
, val
);
2240 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
2241 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
2242 (CHIP_ID(bp
) == CHIP_ID_5708_B1
)) {
2243 /* increase tx signal amplitude */
2244 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
2245 BCM5708S_BLK_ADDR_TX_MISC
);
2246 bnx2_read_phy(bp
, BCM5708S_TX_ACTL1
, &val
);
2247 val
&= ~BCM5708S_TX_ACTL1_DRIVER_VCM
;
2248 bnx2_write_phy(bp
, BCM5708S_TX_ACTL1
, val
);
2249 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
2252 val
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_CONFIG
) &
2253 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK
;
2258 is_backplane
= bnx2_shmem_rd(bp
, BNX2_SHARED_HW_CFG_CONFIG
);
2259 if (is_backplane
& BNX2_SHARED_HW_CFG_PHY_BACKPLANE
) {
2260 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
2261 BCM5708S_BLK_ADDR_TX_MISC
);
2262 bnx2_write_phy(bp
, BCM5708S_TX_ACTL3
, val
);
2263 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
2264 BCM5708S_BLK_ADDR_DIG
);
2271 bnx2_init_5706s_phy(struct bnx2
*bp
, int reset_phy
)
2276 bp
->phy_flags
&= ~BNX2_PHY_FLAG_PARALLEL_DETECT
;
2278 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
2279 REG_WR(bp
, BNX2_MISC_GP_HW_CTL0
, 0x300);
2281 if (bp
->dev
->mtu
> 1500) {
2284 /* Set extended packet length bit */
2285 bnx2_write_phy(bp
, 0x18, 0x7);
2286 bnx2_read_phy(bp
, 0x18, &val
);
2287 bnx2_write_phy(bp
, 0x18, (val
& 0xfff8) | 0x4000);
2289 bnx2_write_phy(bp
, 0x1c, 0x6c00);
2290 bnx2_read_phy(bp
, 0x1c, &val
);
2291 bnx2_write_phy(bp
, 0x1c, (val
& 0x3ff) | 0xec02);
2296 bnx2_write_phy(bp
, 0x18, 0x7);
2297 bnx2_read_phy(bp
, 0x18, &val
);
2298 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
2300 bnx2_write_phy(bp
, 0x1c, 0x6c00);
2301 bnx2_read_phy(bp
, 0x1c, &val
);
2302 bnx2_write_phy(bp
, 0x1c, (val
& 0x3fd) | 0xec00);
2309 bnx2_init_copper_phy(struct bnx2
*bp
, int reset_phy
)
2316 if (bp
->phy_flags
& BNX2_PHY_FLAG_CRC_FIX
) {
2317 bnx2_write_phy(bp
, 0x18, 0x0c00);
2318 bnx2_write_phy(bp
, 0x17, 0x000a);
2319 bnx2_write_phy(bp
, 0x15, 0x310b);
2320 bnx2_write_phy(bp
, 0x17, 0x201f);
2321 bnx2_write_phy(bp
, 0x15, 0x9506);
2322 bnx2_write_phy(bp
, 0x17, 0x401f);
2323 bnx2_write_phy(bp
, 0x15, 0x14e2);
2324 bnx2_write_phy(bp
, 0x18, 0x0400);
2327 if (bp
->phy_flags
& BNX2_PHY_FLAG_DIS_EARLY_DAC
) {
2328 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
,
2329 MII_BNX2_DSP_EXPAND_REG
| 0x8);
2330 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &val
);
2332 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
);
2335 if (bp
->dev
->mtu
> 1500) {
2336 /* Set extended packet length bit */
2337 bnx2_write_phy(bp
, 0x18, 0x7);
2338 bnx2_read_phy(bp
, 0x18, &val
);
2339 bnx2_write_phy(bp
, 0x18, val
| 0x4000);
2341 bnx2_read_phy(bp
, 0x10, &val
);
2342 bnx2_write_phy(bp
, 0x10, val
| 0x1);
2345 bnx2_write_phy(bp
, 0x18, 0x7);
2346 bnx2_read_phy(bp
, 0x18, &val
);
2347 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
2349 bnx2_read_phy(bp
, 0x10, &val
);
2350 bnx2_write_phy(bp
, 0x10, val
& ~0x1);
2353 /* ethernet@wirespeed */
2354 bnx2_write_phy(bp
, 0x18, 0x7007);
2355 bnx2_read_phy(bp
, 0x18, &val
);
2356 bnx2_write_phy(bp
, 0x18, val
| (1 << 15) | (1 << 4));
2362 bnx2_init_phy(struct bnx2
*bp
, int reset_phy
)
2363 __releases(&bp
->phy_lock
)
2364 __acquires(&bp
->phy_lock
)
2369 bp
->phy_flags
&= ~BNX2_PHY_FLAG_INT_MODE_MASK
;
2370 bp
->phy_flags
|= BNX2_PHY_FLAG_INT_MODE_LINK_READY
;
2372 bp
->mii_bmcr
= MII_BMCR
;
2373 bp
->mii_bmsr
= MII_BMSR
;
2374 bp
->mii_bmsr1
= MII_BMSR
;
2375 bp
->mii_adv
= MII_ADVERTISE
;
2376 bp
->mii_lpa
= MII_LPA
;
2378 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
2380 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
2383 bnx2_read_phy(bp
, MII_PHYSID1
, &val
);
2384 bp
->phy_id
= val
<< 16;
2385 bnx2_read_phy(bp
, MII_PHYSID2
, &val
);
2386 bp
->phy_id
|= val
& 0xffff;
2388 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
2389 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
2390 rc
= bnx2_init_5706s_phy(bp
, reset_phy
);
2391 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
2392 rc
= bnx2_init_5708s_phy(bp
, reset_phy
);
2393 else if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2394 rc
= bnx2_init_5709s_phy(bp
, reset_phy
);
2397 rc
= bnx2_init_copper_phy(bp
, reset_phy
);
2402 rc
= bnx2_setup_phy(bp
, bp
->phy_port
);
2408 bnx2_set_mac_loopback(struct bnx2
*bp
)
2412 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
2413 mac_mode
&= ~BNX2_EMAC_MODE_PORT
;
2414 mac_mode
|= BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
;
2415 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
2420 static int bnx2_test_link(struct bnx2
*);
2423 bnx2_set_phy_loopback(struct bnx2
*bp
)
2428 spin_lock_bh(&bp
->phy_lock
);
2429 rc
= bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
2431 spin_unlock_bh(&bp
->phy_lock
);
2435 for (i
= 0; i
< 10; i
++) {
2436 if (bnx2_test_link(bp
) == 0)
2441 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
2442 mac_mode
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
2443 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
2444 BNX2_EMAC_MODE_25G_MODE
);
2446 mac_mode
|= BNX2_EMAC_MODE_PORT_GMII
;
2447 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
2453 bnx2_fw_sync(struct bnx2
*bp
, u32 msg_data
, int ack
, int silent
)
2459 msg_data
|= bp
->fw_wr_seq
;
2461 bnx2_shmem_wr(bp
, BNX2_DRV_MB
, msg_data
);
2466 /* wait for an acknowledgement. */
2467 for (i
= 0; i
< (BNX2_FW_ACK_TIME_OUT_MS
/ 10); i
++) {
2470 val
= bnx2_shmem_rd(bp
, BNX2_FW_MB
);
2472 if ((val
& BNX2_FW_MSG_ACK
) == (msg_data
& BNX2_DRV_MSG_SEQ
))
2475 if ((msg_data
& BNX2_DRV_MSG_DATA
) == BNX2_DRV_MSG_DATA_WAIT0
)
2478 /* If we timed out, inform the firmware that this is the case. */
2479 if ((val
& BNX2_FW_MSG_ACK
) != (msg_data
& BNX2_DRV_MSG_SEQ
)) {
2481 pr_err("fw sync timeout, reset code = %x\n", msg_data
);
2483 msg_data
&= ~BNX2_DRV_MSG_CODE
;
2484 msg_data
|= BNX2_DRV_MSG_CODE_FW_TIMEOUT
;
2486 bnx2_shmem_wr(bp
, BNX2_DRV_MB
, msg_data
);
2491 if ((val
& BNX2_FW_MSG_STATUS_MASK
) != BNX2_FW_MSG_STATUS_OK
)
2498 bnx2_init_5709_context(struct bnx2
*bp
)
2503 val
= BNX2_CTX_COMMAND_ENABLED
| BNX2_CTX_COMMAND_MEM_INIT
| (1 << 12);
2504 val
|= (BCM_PAGE_BITS
- 8) << 16;
2505 REG_WR(bp
, BNX2_CTX_COMMAND
, val
);
2506 for (i
= 0; i
< 10; i
++) {
2507 val
= REG_RD(bp
, BNX2_CTX_COMMAND
);
2508 if (!(val
& BNX2_CTX_COMMAND_MEM_INIT
))
2512 if (val
& BNX2_CTX_COMMAND_MEM_INIT
)
2515 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
2519 memset(bp
->ctx_blk
[i
], 0, BCM_PAGE_SIZE
);
2523 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
2524 (bp
->ctx_blk_mapping
[i
] & 0xffffffff) |
2525 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
);
2526 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
2527 (u64
) bp
->ctx_blk_mapping
[i
] >> 32);
2528 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, i
|
2529 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
2530 for (j
= 0; j
< 10; j
++) {
2532 val
= REG_RD(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
2533 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
2537 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
2546 bnx2_init_context(struct bnx2
*bp
)
2552 u32 vcid_addr
, pcid_addr
, offset
;
2557 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
2560 vcid_addr
= GET_PCID_ADDR(vcid
);
2562 new_vcid
= 0x60 + (vcid
& 0xf0) + (vcid
& 0x7);
2567 pcid_addr
= GET_PCID_ADDR(new_vcid
);
2570 vcid_addr
= GET_CID_ADDR(vcid
);
2571 pcid_addr
= vcid_addr
;
2574 for (i
= 0; i
< (CTX_SIZE
/ PHY_CTX_SIZE
); i
++) {
2575 vcid_addr
+= (i
<< PHY_CTX_SHIFT
);
2576 pcid_addr
+= (i
<< PHY_CTX_SHIFT
);
2578 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, vcid_addr
);
2579 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
2581 /* Zero out the context. */
2582 for (offset
= 0; offset
< PHY_CTX_SIZE
; offset
+= 4)
2583 bnx2_ctx_wr(bp
, vcid_addr
, offset
, 0);
2589 bnx2_alloc_bad_rbuf(struct bnx2
*bp
)
2595 good_mbuf
= kmalloc(512 * sizeof(u16
), GFP_KERNEL
);
2596 if (good_mbuf
== NULL
) {
2597 pr_err("Failed to allocate memory in %s\n", __func__
);
2601 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
2602 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE
);
2606 /* Allocate a bunch of mbufs and save the good ones in an array. */
2607 val
= bnx2_reg_rd_ind(bp
, BNX2_RBUF_STATUS1
);
2608 while (val
& BNX2_RBUF_STATUS1_FREE_COUNT
) {
2609 bnx2_reg_wr_ind(bp
, BNX2_RBUF_COMMAND
,
2610 BNX2_RBUF_COMMAND_ALLOC_REQ
);
2612 val
= bnx2_reg_rd_ind(bp
, BNX2_RBUF_FW_BUF_ALLOC
);
2614 val
&= BNX2_RBUF_FW_BUF_ALLOC_VALUE
;
2616 /* The addresses with Bit 9 set are bad memory blocks. */
2617 if (!(val
& (1 << 9))) {
2618 good_mbuf
[good_mbuf_cnt
] = (u16
) val
;
2622 val
= bnx2_reg_rd_ind(bp
, BNX2_RBUF_STATUS1
);
2625 /* Free the good ones back to the mbuf pool thus discarding
2626 * all the bad ones. */
2627 while (good_mbuf_cnt
) {
2630 val
= good_mbuf
[good_mbuf_cnt
];
2631 val
= (val
<< 9) | val
| 1;
2633 bnx2_reg_wr_ind(bp
, BNX2_RBUF_FW_BUF_FREE
, val
);
2640 bnx2_set_mac_addr(struct bnx2
*bp
, u8
*mac_addr
, u32 pos
)
2644 val
= (mac_addr
[0] << 8) | mac_addr
[1];
2646 REG_WR(bp
, BNX2_EMAC_MAC_MATCH0
+ (pos
* 8), val
);
2648 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
2649 (mac_addr
[4] << 8) | mac_addr
[5];
2651 REG_WR(bp
, BNX2_EMAC_MAC_MATCH1
+ (pos
* 8), val
);
2655 bnx2_alloc_rx_page(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, u16 index
)
2658 struct sw_pg
*rx_pg
= &rxr
->rx_pg_ring
[index
];
2659 struct rx_bd
*rxbd
=
2660 &rxr
->rx_pg_desc_ring
[RX_RING(index
)][RX_IDX(index
)];
2661 struct page
*page
= alloc_page(GFP_ATOMIC
);
2665 mapping
= pci_map_page(bp
->pdev
, page
, 0, PAGE_SIZE
,
2666 PCI_DMA_FROMDEVICE
);
2667 if (pci_dma_mapping_error(bp
->pdev
, mapping
)) {
2673 dma_unmap_addr_set(rx_pg
, mapping
, mapping
);
2674 rxbd
->rx_bd_haddr_hi
= (u64
) mapping
>> 32;
2675 rxbd
->rx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
2680 bnx2_free_rx_page(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, u16 index
)
2682 struct sw_pg
*rx_pg
= &rxr
->rx_pg_ring
[index
];
2683 struct page
*page
= rx_pg
->page
;
2688 pci_unmap_page(bp
->pdev
, dma_unmap_addr(rx_pg
, mapping
), PAGE_SIZE
,
2689 PCI_DMA_FROMDEVICE
);
2696 bnx2_alloc_rx_skb(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, u16 index
)
2698 struct sk_buff
*skb
;
2699 struct sw_bd
*rx_buf
= &rxr
->rx_buf_ring
[index
];
2701 struct rx_bd
*rxbd
= &rxr
->rx_desc_ring
[RX_RING(index
)][RX_IDX(index
)];
2702 unsigned long align
;
2704 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
2709 if (unlikely((align
= (unsigned long) skb
->data
& (BNX2_RX_ALIGN
- 1))))
2710 skb_reserve(skb
, BNX2_RX_ALIGN
- align
);
2712 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_use_size
,
2713 PCI_DMA_FROMDEVICE
);
2714 if (pci_dma_mapping_error(bp
->pdev
, mapping
)) {
2720 dma_unmap_addr_set(rx_buf
, mapping
, mapping
);
2722 rxbd
->rx_bd_haddr_hi
= (u64
) mapping
>> 32;
2723 rxbd
->rx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
2725 rxr
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
2731 bnx2_phy_event_is_set(struct bnx2
*bp
, struct bnx2_napi
*bnapi
, u32 event
)
2733 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
2734 u32 new_link_state
, old_link_state
;
2737 new_link_state
= sblk
->status_attn_bits
& event
;
2738 old_link_state
= sblk
->status_attn_bits_ack
& event
;
2739 if (new_link_state
!= old_link_state
) {
2741 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_SET_CMD
, event
);
2743 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD
, event
);
2751 bnx2_phy_int(struct bnx2
*bp
, struct bnx2_napi
*bnapi
)
2753 spin_lock(&bp
->phy_lock
);
2755 if (bnx2_phy_event_is_set(bp
, bnapi
, STATUS_ATTN_BITS_LINK_STATE
))
2757 if (bnx2_phy_event_is_set(bp
, bnapi
, STATUS_ATTN_BITS_TIMER_ABORT
))
2758 bnx2_set_remote_link(bp
);
2760 spin_unlock(&bp
->phy_lock
);
2765 bnx2_get_hw_tx_cons(struct bnx2_napi
*bnapi
)
2769 /* Tell compiler that status block fields can change. */
2771 cons
= *bnapi
->hw_tx_cons_ptr
;
2773 if (unlikely((cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
))
2779 bnx2_tx_int(struct bnx2
*bp
, struct bnx2_napi
*bnapi
, int budget
)
2781 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
2782 u16 hw_cons
, sw_cons
, sw_ring_cons
;
2783 int tx_pkt
= 0, index
;
2784 struct netdev_queue
*txq
;
2786 index
= (bnapi
- bp
->bnx2_napi
);
2787 txq
= netdev_get_tx_queue(bp
->dev
, index
);
2789 hw_cons
= bnx2_get_hw_tx_cons(bnapi
);
2790 sw_cons
= txr
->tx_cons
;
2792 while (sw_cons
!= hw_cons
) {
2793 struct sw_tx_bd
*tx_buf
;
2794 struct sk_buff
*skb
;
2797 sw_ring_cons
= TX_RING_IDX(sw_cons
);
2799 tx_buf
= &txr
->tx_buf_ring
[sw_ring_cons
];
2802 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2803 prefetch(&skb
->end
);
2805 /* partial BD completions possible with TSO packets */
2806 if (tx_buf
->is_gso
) {
2807 u16 last_idx
, last_ring_idx
;
2809 last_idx
= sw_cons
+ tx_buf
->nr_frags
+ 1;
2810 last_ring_idx
= sw_ring_cons
+ tx_buf
->nr_frags
+ 1;
2811 if (unlikely(last_ring_idx
>= MAX_TX_DESC_CNT
)) {
2814 if (((s16
) ((s16
) last_idx
- (s16
) hw_cons
)) > 0) {
2819 pci_unmap_single(bp
->pdev
, dma_unmap_addr(tx_buf
, mapping
),
2820 skb_headlen(skb
), PCI_DMA_TODEVICE
);
2823 last
= tx_buf
->nr_frags
;
2825 for (i
= 0; i
< last
; i
++) {
2826 sw_cons
= NEXT_TX_BD(sw_cons
);
2828 pci_unmap_page(bp
->pdev
,
2830 &txr
->tx_buf_ring
[TX_RING_IDX(sw_cons
)],
2832 skb_shinfo(skb
)->frags
[i
].size
,
2836 sw_cons
= NEXT_TX_BD(sw_cons
);
2840 if (tx_pkt
== budget
)
2843 if (hw_cons
== sw_cons
)
2844 hw_cons
= bnx2_get_hw_tx_cons(bnapi
);
2847 txr
->hw_tx_cons
= hw_cons
;
2848 txr
->tx_cons
= sw_cons
;
2850 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2851 * before checking for netif_tx_queue_stopped(). Without the
2852 * memory barrier, there is a small possibility that bnx2_start_xmit()
2853 * will miss it and cause the queue to be stopped forever.
2857 if (unlikely(netif_tx_queue_stopped(txq
)) &&
2858 (bnx2_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)) {
2859 __netif_tx_lock(txq
, smp_processor_id());
2860 if ((netif_tx_queue_stopped(txq
)) &&
2861 (bnx2_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
))
2862 netif_tx_wake_queue(txq
);
2863 __netif_tx_unlock(txq
);
2870 bnx2_reuse_rx_skb_pages(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
,
2871 struct sk_buff
*skb
, int count
)
2873 struct sw_pg
*cons_rx_pg
, *prod_rx_pg
;
2874 struct rx_bd
*cons_bd
, *prod_bd
;
2877 u16 cons
= rxr
->rx_pg_cons
;
2879 cons_rx_pg
= &rxr
->rx_pg_ring
[cons
];
2881 /* The caller was unable to allocate a new page to replace the
2882 * last one in the frags array, so we need to recycle that page
2883 * and then free the skb.
2887 struct skb_shared_info
*shinfo
;
2889 shinfo
= skb_shinfo(skb
);
2891 page
= shinfo
->frags
[shinfo
->nr_frags
].page
;
2892 shinfo
->frags
[shinfo
->nr_frags
].page
= NULL
;
2894 cons_rx_pg
->page
= page
;
2898 hw_prod
= rxr
->rx_pg_prod
;
2900 for (i
= 0; i
< count
; i
++) {
2901 prod
= RX_PG_RING_IDX(hw_prod
);
2903 prod_rx_pg
= &rxr
->rx_pg_ring
[prod
];
2904 cons_rx_pg
= &rxr
->rx_pg_ring
[cons
];
2905 cons_bd
= &rxr
->rx_pg_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
2906 prod_bd
= &rxr
->rx_pg_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
2909 prod_rx_pg
->page
= cons_rx_pg
->page
;
2910 cons_rx_pg
->page
= NULL
;
2911 dma_unmap_addr_set(prod_rx_pg
, mapping
,
2912 dma_unmap_addr(cons_rx_pg
, mapping
));
2914 prod_bd
->rx_bd_haddr_hi
= cons_bd
->rx_bd_haddr_hi
;
2915 prod_bd
->rx_bd_haddr_lo
= cons_bd
->rx_bd_haddr_lo
;
2918 cons
= RX_PG_RING_IDX(NEXT_RX_BD(cons
));
2919 hw_prod
= NEXT_RX_BD(hw_prod
);
2921 rxr
->rx_pg_prod
= hw_prod
;
2922 rxr
->rx_pg_cons
= cons
;
2926 bnx2_reuse_rx_skb(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
,
2927 struct sk_buff
*skb
, u16 cons
, u16 prod
)
2929 struct sw_bd
*cons_rx_buf
, *prod_rx_buf
;
2930 struct rx_bd
*cons_bd
, *prod_bd
;
2932 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
2933 prod_rx_buf
= &rxr
->rx_buf_ring
[prod
];
2935 pci_dma_sync_single_for_device(bp
->pdev
,
2936 dma_unmap_addr(cons_rx_buf
, mapping
),
2937 BNX2_RX_OFFSET
+ BNX2_RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
2939 rxr
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
2941 prod_rx_buf
->skb
= skb
;
2946 dma_unmap_addr_set(prod_rx_buf
, mapping
,
2947 dma_unmap_addr(cons_rx_buf
, mapping
));
2949 cons_bd
= &rxr
->rx_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
2950 prod_bd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
2951 prod_bd
->rx_bd_haddr_hi
= cons_bd
->rx_bd_haddr_hi
;
2952 prod_bd
->rx_bd_haddr_lo
= cons_bd
->rx_bd_haddr_lo
;
2956 bnx2_rx_skb(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, struct sk_buff
*skb
,
2957 unsigned int len
, unsigned int hdr_len
, dma_addr_t dma_addr
,
2961 u16 prod
= ring_idx
& 0xffff;
2963 err
= bnx2_alloc_rx_skb(bp
, rxr
, prod
);
2964 if (unlikely(err
)) {
2965 bnx2_reuse_rx_skb(bp
, rxr
, skb
, (u16
) (ring_idx
>> 16), prod
);
2967 unsigned int raw_len
= len
+ 4;
2968 int pages
= PAGE_ALIGN(raw_len
- hdr_len
) >> PAGE_SHIFT
;
2970 bnx2_reuse_rx_skb_pages(bp
, rxr
, NULL
, pages
);
2975 skb_reserve(skb
, BNX2_RX_OFFSET
);
2976 pci_unmap_single(bp
->pdev
, dma_addr
, bp
->rx_buf_use_size
,
2977 PCI_DMA_FROMDEVICE
);
2983 unsigned int i
, frag_len
, frag_size
, pages
;
2984 struct sw_pg
*rx_pg
;
2985 u16 pg_cons
= rxr
->rx_pg_cons
;
2986 u16 pg_prod
= rxr
->rx_pg_prod
;
2988 frag_size
= len
+ 4 - hdr_len
;
2989 pages
= PAGE_ALIGN(frag_size
) >> PAGE_SHIFT
;
2990 skb_put(skb
, hdr_len
);
2992 for (i
= 0; i
< pages
; i
++) {
2993 dma_addr_t mapping_old
;
2995 frag_len
= min(frag_size
, (unsigned int) PAGE_SIZE
);
2996 if (unlikely(frag_len
<= 4)) {
2997 unsigned int tail
= 4 - frag_len
;
2999 rxr
->rx_pg_cons
= pg_cons
;
3000 rxr
->rx_pg_prod
= pg_prod
;
3001 bnx2_reuse_rx_skb_pages(bp
, rxr
, NULL
,
3008 &skb_shinfo(skb
)->frags
[i
- 1];
3010 skb
->data_len
-= tail
;
3011 skb
->truesize
-= tail
;
3015 rx_pg
= &rxr
->rx_pg_ring
[pg_cons
];
3017 /* Don't unmap yet. If we're unable to allocate a new
3018 * page, we need to recycle the page and the DMA addr.
3020 mapping_old
= dma_unmap_addr(rx_pg
, mapping
);
3024 skb_fill_page_desc(skb
, i
, rx_pg
->page
, 0, frag_len
);
3027 err
= bnx2_alloc_rx_page(bp
, rxr
,
3028 RX_PG_RING_IDX(pg_prod
));
3029 if (unlikely(err
)) {
3030 rxr
->rx_pg_cons
= pg_cons
;
3031 rxr
->rx_pg_prod
= pg_prod
;
3032 bnx2_reuse_rx_skb_pages(bp
, rxr
, skb
,
3037 pci_unmap_page(bp
->pdev
, mapping_old
,
3038 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
3040 frag_size
-= frag_len
;
3041 skb
->data_len
+= frag_len
;
3042 skb
->truesize
+= frag_len
;
3043 skb
->len
+= frag_len
;
3045 pg_prod
= NEXT_RX_BD(pg_prod
);
3046 pg_cons
= RX_PG_RING_IDX(NEXT_RX_BD(pg_cons
));
3048 rxr
->rx_pg_prod
= pg_prod
;
3049 rxr
->rx_pg_cons
= pg_cons
;
3055 bnx2_get_hw_rx_cons(struct bnx2_napi
*bnapi
)
3059 /* Tell compiler that status block fields can change. */
3061 cons
= *bnapi
->hw_rx_cons_ptr
;
3063 if (unlikely((cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
))
3069 bnx2_rx_int(struct bnx2
*bp
, struct bnx2_napi
*bnapi
, int budget
)
3071 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
3072 u16 hw_cons
, sw_cons
, sw_ring_cons
, sw_prod
, sw_ring_prod
;
3073 struct l2_fhdr
*rx_hdr
;
3074 int rx_pkt
= 0, pg_ring_used
= 0;
3076 hw_cons
= bnx2_get_hw_rx_cons(bnapi
);
3077 sw_cons
= rxr
->rx_cons
;
3078 sw_prod
= rxr
->rx_prod
;
3080 /* Memory barrier necessary as speculative reads of the rx
3081 * buffer can be ahead of the index in the status block
3084 while (sw_cons
!= hw_cons
) {
3085 unsigned int len
, hdr_len
;
3087 struct sw_bd
*rx_buf
;
3088 struct sk_buff
*skb
;
3089 dma_addr_t dma_addr
;
3091 int hw_vlan __maybe_unused
= 0;
3093 sw_ring_cons
= RX_RING_IDX(sw_cons
);
3094 sw_ring_prod
= RX_RING_IDX(sw_prod
);
3096 rx_buf
= &rxr
->rx_buf_ring
[sw_ring_cons
];
3101 dma_addr
= dma_unmap_addr(rx_buf
, mapping
);
3103 pci_dma_sync_single_for_cpu(bp
->pdev
, dma_addr
,
3104 BNX2_RX_OFFSET
+ BNX2_RX_COPY_THRESH
,
3105 PCI_DMA_FROMDEVICE
);
3107 rx_hdr
= (struct l2_fhdr
*) skb
->data
;
3108 len
= rx_hdr
->l2_fhdr_pkt_len
;
3109 status
= rx_hdr
->l2_fhdr_status
;
3112 if (status
& L2_FHDR_STATUS_SPLIT
) {
3113 hdr_len
= rx_hdr
->l2_fhdr_ip_xsum
;
3115 } else if (len
> bp
->rx_jumbo_thresh
) {
3116 hdr_len
= bp
->rx_jumbo_thresh
;
3120 if (unlikely(status
& (L2_FHDR_ERRORS_BAD_CRC
|
3121 L2_FHDR_ERRORS_PHY_DECODE
|
3122 L2_FHDR_ERRORS_ALIGNMENT
|
3123 L2_FHDR_ERRORS_TOO_SHORT
|
3124 L2_FHDR_ERRORS_GIANT_FRAME
))) {
3126 bnx2_reuse_rx_skb(bp
, rxr
, skb
, sw_ring_cons
,
3131 pages
= PAGE_ALIGN(len
- hdr_len
) >> PAGE_SHIFT
;
3133 bnx2_reuse_rx_skb_pages(bp
, rxr
, NULL
, pages
);
3140 if (len
<= bp
->rx_copy_thresh
) {
3141 struct sk_buff
*new_skb
;
3143 new_skb
= netdev_alloc_skb(bp
->dev
, len
+ 6);
3144 if (new_skb
== NULL
) {
3145 bnx2_reuse_rx_skb(bp
, rxr
, skb
, sw_ring_cons
,
3151 skb_copy_from_linear_data_offset(skb
,
3153 new_skb
->data
, len
+ 6);
3154 skb_reserve(new_skb
, 6);
3155 skb_put(new_skb
, len
);
3157 bnx2_reuse_rx_skb(bp
, rxr
, skb
,
3158 sw_ring_cons
, sw_ring_prod
);
3161 } else if (unlikely(bnx2_rx_skb(bp
, rxr
, skb
, len
, hdr_len
,
3162 dma_addr
, (sw_ring_cons
<< 16) | sw_ring_prod
)))
3165 if ((status
& L2_FHDR_STATUS_L2_VLAN_TAG
) &&
3166 !(bp
->rx_mode
& BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
)) {
3167 vtag
= rx_hdr
->l2_fhdr_vlan_tag
;
3174 struct vlan_ethhdr
*ve
= (struct vlan_ethhdr
*)
3177 memmove(ve
, skb
->data
+ 4, ETH_ALEN
* 2);
3178 ve
->h_vlan_proto
= htons(ETH_P_8021Q
);
3179 ve
->h_vlan_TCI
= htons(vtag
);
3184 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
3186 if ((len
> (bp
->dev
->mtu
+ ETH_HLEN
)) &&
3187 (ntohs(skb
->protocol
) != 0x8100)) {
3194 skb
->ip_summed
= CHECKSUM_NONE
;
3196 (status
& (L2_FHDR_STATUS_TCP_SEGMENT
|
3197 L2_FHDR_STATUS_UDP_DATAGRAM
))) {
3199 if (likely((status
& (L2_FHDR_ERRORS_TCP_XSUM
|
3200 L2_FHDR_ERRORS_UDP_XSUM
)) == 0))
3201 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3204 skb_record_rx_queue(skb
, bnapi
- &bp
->bnx2_napi
[0]);
3208 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
, vtag
);
3211 netif_receive_skb(skb
);
3216 sw_cons
= NEXT_RX_BD(sw_cons
);
3217 sw_prod
= NEXT_RX_BD(sw_prod
);
3219 if ((rx_pkt
== budget
))
3222 /* Refresh hw_cons to see if there is new work */
3223 if (sw_cons
== hw_cons
) {
3224 hw_cons
= bnx2_get_hw_rx_cons(bnapi
);
3228 rxr
->rx_cons
= sw_cons
;
3229 rxr
->rx_prod
= sw_prod
;
3232 REG_WR16(bp
, rxr
->rx_pg_bidx_addr
, rxr
->rx_pg_prod
);
3234 REG_WR16(bp
, rxr
->rx_bidx_addr
, sw_prod
);
3236 REG_WR(bp
, rxr
->rx_bseq_addr
, rxr
->rx_prod_bseq
);
3244 /* MSI ISR - The only difference between this and the INTx ISR
3245 * is that the MSI interrupt is always serviced.
3248 bnx2_msi(int irq
, void *dev_instance
)
3250 struct bnx2_napi
*bnapi
= dev_instance
;
3251 struct bnx2
*bp
= bnapi
->bp
;
3253 prefetch(bnapi
->status_blk
.msi
);
3254 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3255 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
3256 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3258 /* Return here if interrupt is disabled. */
3259 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
3262 napi_schedule(&bnapi
->napi
);
3268 bnx2_msi_1shot(int irq
, void *dev_instance
)
3270 struct bnx2_napi
*bnapi
= dev_instance
;
3271 struct bnx2
*bp
= bnapi
->bp
;
3273 prefetch(bnapi
->status_blk
.msi
);
3275 /* Return here if interrupt is disabled. */
3276 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
3279 napi_schedule(&bnapi
->napi
);
3285 bnx2_interrupt(int irq
, void *dev_instance
)
3287 struct bnx2_napi
*bnapi
= dev_instance
;
3288 struct bnx2
*bp
= bnapi
->bp
;
3289 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3291 /* When using INTx, it is possible for the interrupt to arrive
3292 * at the CPU before the status block posted prior to the
3293 * interrupt. Reading a register will flush the status block.
3294 * When using MSI, the MSI message will always complete after
3295 * the status block write.
3297 if ((sblk
->status_idx
== bnapi
->last_status_idx
) &&
3298 (REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
) &
3299 BNX2_PCICFG_MISC_STATUS_INTA_VALUE
))
3302 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3303 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
3304 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3306 /* Read back to deassert IRQ immediately to avoid too many
3307 * spurious interrupts.
3309 REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
);
3311 /* Return here if interrupt is shared and is disabled. */
3312 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
3315 if (napi_schedule_prep(&bnapi
->napi
)) {
3316 bnapi
->last_status_idx
= sblk
->status_idx
;
3317 __napi_schedule(&bnapi
->napi
);
3324 bnx2_has_fast_work(struct bnx2_napi
*bnapi
)
3326 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
3327 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
3329 if ((bnx2_get_hw_rx_cons(bnapi
) != rxr
->rx_cons
) ||
3330 (bnx2_get_hw_tx_cons(bnapi
) != txr
->hw_tx_cons
))
3335 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3336 STATUS_ATTN_BITS_TIMER_ABORT)
3339 bnx2_has_work(struct bnx2_napi
*bnapi
)
3341 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3343 if (bnx2_has_fast_work(bnapi
))
3347 if (bnapi
->cnic_present
&& (bnapi
->cnic_tag
!= sblk
->status_idx
))
3351 if ((sblk
->status_attn_bits
& STATUS_ATTN_EVENTS
) !=
3352 (sblk
->status_attn_bits_ack
& STATUS_ATTN_EVENTS
))
3359 bnx2_chk_missed_msi(struct bnx2
*bp
)
3361 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
3364 if (bnx2_has_work(bnapi
)) {
3365 msi_ctrl
= REG_RD(bp
, BNX2_PCICFG_MSI_CONTROL
);
3366 if (!(msi_ctrl
& BNX2_PCICFG_MSI_CONTROL_ENABLE
))
3369 if (bnapi
->last_status_idx
== bp
->idle_chk_status_idx
) {
3370 REG_WR(bp
, BNX2_PCICFG_MSI_CONTROL
, msi_ctrl
&
3371 ~BNX2_PCICFG_MSI_CONTROL_ENABLE
);
3372 REG_WR(bp
, BNX2_PCICFG_MSI_CONTROL
, msi_ctrl
);
3373 bnx2_msi(bp
->irq_tbl
[0].vector
, bnapi
);
3377 bp
->idle_chk_status_idx
= bnapi
->last_status_idx
;
3381 static void bnx2_poll_cnic(struct bnx2
*bp
, struct bnx2_napi
*bnapi
)
3383 struct cnic_ops
*c_ops
;
3385 if (!bnapi
->cnic_present
)
3389 c_ops
= rcu_dereference(bp
->cnic_ops
);
3391 bnapi
->cnic_tag
= c_ops
->cnic_handler(bp
->cnic_data
,
3392 bnapi
->status_blk
.msi
);
3397 static void bnx2_poll_link(struct bnx2
*bp
, struct bnx2_napi
*bnapi
)
3399 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3400 u32 status_attn_bits
= sblk
->status_attn_bits
;
3401 u32 status_attn_bits_ack
= sblk
->status_attn_bits_ack
;
3403 if ((status_attn_bits
& STATUS_ATTN_EVENTS
) !=
3404 (status_attn_bits_ack
& STATUS_ATTN_EVENTS
)) {
3406 bnx2_phy_int(bp
, bnapi
);
3408 /* This is needed to take care of transient status
3409 * during link changes.
3411 REG_WR(bp
, BNX2_HC_COMMAND
,
3412 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
3413 REG_RD(bp
, BNX2_HC_COMMAND
);
3417 static int bnx2_poll_work(struct bnx2
*bp
, struct bnx2_napi
*bnapi
,
3418 int work_done
, int budget
)
3420 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
3421 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
3423 if (bnx2_get_hw_tx_cons(bnapi
) != txr
->hw_tx_cons
)
3424 bnx2_tx_int(bp
, bnapi
, 0);
3426 if (bnx2_get_hw_rx_cons(bnapi
) != rxr
->rx_cons
)
3427 work_done
+= bnx2_rx_int(bp
, bnapi
, budget
- work_done
);
3432 static int bnx2_poll_msix(struct napi_struct
*napi
, int budget
)
3434 struct bnx2_napi
*bnapi
= container_of(napi
, struct bnx2_napi
, napi
);
3435 struct bnx2
*bp
= bnapi
->bp
;
3437 struct status_block_msix
*sblk
= bnapi
->status_blk
.msix
;
3440 work_done
= bnx2_poll_work(bp
, bnapi
, work_done
, budget
);
3441 if (unlikely(work_done
>= budget
))
3444 bnapi
->last_status_idx
= sblk
->status_idx
;
3445 /* status idx must be read before checking for more work. */
3447 if (likely(!bnx2_has_fast_work(bnapi
))) {
3449 napi_complete(napi
);
3450 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
3451 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3452 bnapi
->last_status_idx
);
3459 static int bnx2_poll(struct napi_struct
*napi
, int budget
)
3461 struct bnx2_napi
*bnapi
= container_of(napi
, struct bnx2_napi
, napi
);
3462 struct bnx2
*bp
= bnapi
->bp
;
3464 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3467 bnx2_poll_link(bp
, bnapi
);
3469 work_done
= bnx2_poll_work(bp
, bnapi
, work_done
, budget
);
3472 bnx2_poll_cnic(bp
, bnapi
);
3475 /* bnapi->last_status_idx is used below to tell the hw how
3476 * much work has been processed, so we must read it before
3477 * checking for more work.
3479 bnapi
->last_status_idx
= sblk
->status_idx
;
3481 if (unlikely(work_done
>= budget
))
3485 if (likely(!bnx2_has_work(bnapi
))) {
3486 napi_complete(napi
);
3487 if (likely(bp
->flags
& BNX2_FLAG_USING_MSI_OR_MSIX
)) {
3488 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3489 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3490 bnapi
->last_status_idx
);
3493 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3494 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3495 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
|
3496 bnapi
->last_status_idx
);
3498 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3499 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3500 bnapi
->last_status_idx
);
3508 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3509 * from set_multicast.
3512 bnx2_set_rx_mode(struct net_device
*dev
)
3514 struct bnx2
*bp
= netdev_priv(dev
);
3515 u32 rx_mode
, sort_mode
;
3516 struct netdev_hw_addr
*ha
;
3519 if (!netif_running(dev
))
3522 spin_lock_bh(&bp
->phy_lock
);
3524 rx_mode
= bp
->rx_mode
& ~(BNX2_EMAC_RX_MODE_PROMISCUOUS
|
3525 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
);
3526 sort_mode
= 1 | BNX2_RPM_SORT_USER0_BC_EN
;
3528 if (!bp
->vlgrp
&& (bp
->flags
& BNX2_FLAG_CAN_KEEP_VLAN
))
3529 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
3531 if (bp
->flags
& BNX2_FLAG_CAN_KEEP_VLAN
)
3532 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
3534 if (dev
->flags
& IFF_PROMISC
) {
3535 /* Promiscuous mode. */
3536 rx_mode
|= BNX2_EMAC_RX_MODE_PROMISCUOUS
;
3537 sort_mode
|= BNX2_RPM_SORT_USER0_PROM_EN
|
3538 BNX2_RPM_SORT_USER0_PROM_VLAN
;
3540 else if (dev
->flags
& IFF_ALLMULTI
) {
3541 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
3542 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
3545 sort_mode
|= BNX2_RPM_SORT_USER0_MC_EN
;
3548 /* Accept one or more multicast(s). */
3549 u32 mc_filter
[NUM_MC_HASH_REGISTERS
];
3554 memset(mc_filter
, 0, 4 * NUM_MC_HASH_REGISTERS
);
3556 netdev_for_each_mc_addr(ha
, dev
) {
3557 crc
= ether_crc_le(ETH_ALEN
, ha
->addr
);
3559 regidx
= (bit
& 0xe0) >> 5;
3561 mc_filter
[regidx
] |= (1 << bit
);
3564 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
3565 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
3569 sort_mode
|= BNX2_RPM_SORT_USER0_MC_HSH_EN
;
3572 if (netdev_uc_count(dev
) > BNX2_MAX_UNICAST_ADDRESSES
) {
3573 rx_mode
|= BNX2_EMAC_RX_MODE_PROMISCUOUS
;
3574 sort_mode
|= BNX2_RPM_SORT_USER0_PROM_EN
|
3575 BNX2_RPM_SORT_USER0_PROM_VLAN
;
3576 } else if (!(dev
->flags
& IFF_PROMISC
)) {
3577 /* Add all entries into to the match filter list */
3579 netdev_for_each_uc_addr(ha
, dev
) {
3580 bnx2_set_mac_addr(bp
, ha
->addr
,
3581 i
+ BNX2_START_UNICAST_ADDRESS_INDEX
);
3583 (i
+ BNX2_START_UNICAST_ADDRESS_INDEX
));
3589 if (rx_mode
!= bp
->rx_mode
) {
3590 bp
->rx_mode
= rx_mode
;
3591 REG_WR(bp
, BNX2_EMAC_RX_MODE
, rx_mode
);
3594 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
3595 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
);
3596 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
| BNX2_RPM_SORT_USER0_ENA
);
3598 spin_unlock_bh(&bp
->phy_lock
);
3601 static int __devinit
3602 check_fw_section(const struct firmware
*fw
,
3603 const struct bnx2_fw_file_section
*section
,
3604 u32 alignment
, bool non_empty
)
3606 u32 offset
= be32_to_cpu(section
->offset
);
3607 u32 len
= be32_to_cpu(section
->len
);
3609 if ((offset
== 0 && len
!= 0) || offset
>= fw
->size
|| offset
& 3)
3611 if ((non_empty
&& len
== 0) || len
> fw
->size
- offset
||
3612 len
& (alignment
- 1))
3617 static int __devinit
3618 check_mips_fw_entry(const struct firmware
*fw
,
3619 const struct bnx2_mips_fw_file_entry
*entry
)
3621 if (check_fw_section(fw
, &entry
->text
, 4, true) ||
3622 check_fw_section(fw
, &entry
->data
, 4, false) ||
3623 check_fw_section(fw
, &entry
->rodata
, 4, false))
3628 static int __devinit
3629 bnx2_request_firmware(struct bnx2
*bp
)
3631 const char *mips_fw_file
, *rv2p_fw_file
;
3632 const struct bnx2_mips_fw_file
*mips_fw
;
3633 const struct bnx2_rv2p_fw_file
*rv2p_fw
;
3636 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
3637 mips_fw_file
= FW_MIPS_FILE_09
;
3638 if ((CHIP_ID(bp
) == CHIP_ID_5709_A0
) ||
3639 (CHIP_ID(bp
) == CHIP_ID_5709_A1
))
3640 rv2p_fw_file
= FW_RV2P_FILE_09_Ax
;
3642 rv2p_fw_file
= FW_RV2P_FILE_09
;
3644 mips_fw_file
= FW_MIPS_FILE_06
;
3645 rv2p_fw_file
= FW_RV2P_FILE_06
;
3648 rc
= request_firmware(&bp
->mips_firmware
, mips_fw_file
, &bp
->pdev
->dev
);
3650 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file
);
3654 rc
= request_firmware(&bp
->rv2p_firmware
, rv2p_fw_file
, &bp
->pdev
->dev
);
3656 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file
);
3659 mips_fw
= (const struct bnx2_mips_fw_file
*) bp
->mips_firmware
->data
;
3660 rv2p_fw
= (const struct bnx2_rv2p_fw_file
*) bp
->rv2p_firmware
->data
;
3661 if (bp
->mips_firmware
->size
< sizeof(*mips_fw
) ||
3662 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->com
) ||
3663 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->cp
) ||
3664 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->rxp
) ||
3665 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->tpat
) ||
3666 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->txp
)) {
3667 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file
);
3670 if (bp
->rv2p_firmware
->size
< sizeof(*rv2p_fw
) ||
3671 check_fw_section(bp
->rv2p_firmware
, &rv2p_fw
->proc1
.rv2p
, 8, true) ||
3672 check_fw_section(bp
->rv2p_firmware
, &rv2p_fw
->proc2
.rv2p
, 8, true)) {
3673 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file
);
3681 rv2p_fw_fixup(u32 rv2p_proc
, int idx
, u32 loc
, u32 rv2p_code
)
3684 case RV2P_P1_FIXUP_PAGE_SIZE_IDX
:
3685 rv2p_code
&= ~RV2P_BD_PAGE_SIZE_MSK
;
3686 rv2p_code
|= RV2P_BD_PAGE_SIZE
;
3693 load_rv2p_fw(struct bnx2
*bp
, u32 rv2p_proc
,
3694 const struct bnx2_rv2p_fw_file_entry
*fw_entry
)
3696 u32 rv2p_code_len
, file_offset
;
3701 rv2p_code_len
= be32_to_cpu(fw_entry
->rv2p
.len
);
3702 file_offset
= be32_to_cpu(fw_entry
->rv2p
.offset
);
3704 rv2p_code
= (__be32
*)(bp
->rv2p_firmware
->data
+ file_offset
);
3706 if (rv2p_proc
== RV2P_PROC1
) {
3707 cmd
= BNX2_RV2P_PROC1_ADDR_CMD_RDWR
;
3708 addr
= BNX2_RV2P_PROC1_ADDR_CMD
;
3710 cmd
= BNX2_RV2P_PROC2_ADDR_CMD_RDWR
;
3711 addr
= BNX2_RV2P_PROC2_ADDR_CMD
;
3714 for (i
= 0; i
< rv2p_code_len
; i
+= 8) {
3715 REG_WR(bp
, BNX2_RV2P_INSTR_HIGH
, be32_to_cpu(*rv2p_code
));
3717 REG_WR(bp
, BNX2_RV2P_INSTR_LOW
, be32_to_cpu(*rv2p_code
));
3720 val
= (i
/ 8) | cmd
;
3721 REG_WR(bp
, addr
, val
);
3724 rv2p_code
= (__be32
*)(bp
->rv2p_firmware
->data
+ file_offset
);
3725 for (i
= 0; i
< 8; i
++) {
3728 loc
= be32_to_cpu(fw_entry
->fixup
[i
]);
3729 if (loc
&& ((loc
* 4) < rv2p_code_len
)) {
3730 code
= be32_to_cpu(*(rv2p_code
+ loc
- 1));
3731 REG_WR(bp
, BNX2_RV2P_INSTR_HIGH
, code
);
3732 code
= be32_to_cpu(*(rv2p_code
+ loc
));
3733 code
= rv2p_fw_fixup(rv2p_proc
, i
, loc
, code
);
3734 REG_WR(bp
, BNX2_RV2P_INSTR_LOW
, code
);
3736 val
= (loc
/ 2) | cmd
;
3737 REG_WR(bp
, addr
, val
);
3741 /* Reset the processor, un-stall is done later. */
3742 if (rv2p_proc
== RV2P_PROC1
) {
3743 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC1_RESET
);
3746 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC2_RESET
);
3753 load_cpu_fw(struct bnx2
*bp
, const struct cpu_reg
*cpu_reg
,
3754 const struct bnx2_mips_fw_file_entry
*fw_entry
)
3756 u32 addr
, len
, file_offset
;
3762 val
= bnx2_reg_rd_ind(bp
, cpu_reg
->mode
);
3763 val
|= cpu_reg
->mode_value_halt
;
3764 bnx2_reg_wr_ind(bp
, cpu_reg
->mode
, val
);
3765 bnx2_reg_wr_ind(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
3767 /* Load the Text area. */
3768 addr
= be32_to_cpu(fw_entry
->text
.addr
);
3769 len
= be32_to_cpu(fw_entry
->text
.len
);
3770 file_offset
= be32_to_cpu(fw_entry
->text
.offset
);
3771 data
= (__be32
*)(bp
->mips_firmware
->data
+ file_offset
);
3773 offset
= cpu_reg
->spad_base
+ (addr
- cpu_reg
->mips_view_base
);
3777 for (j
= 0; j
< (len
/ 4); j
++, offset
+= 4)
3778 bnx2_reg_wr_ind(bp
, offset
, be32_to_cpu(data
[j
]));
3781 /* Load the Data area. */
3782 addr
= be32_to_cpu(fw_entry
->data
.addr
);
3783 len
= be32_to_cpu(fw_entry
->data
.len
);
3784 file_offset
= be32_to_cpu(fw_entry
->data
.offset
);
3785 data
= (__be32
*)(bp
->mips_firmware
->data
+ file_offset
);
3787 offset
= cpu_reg
->spad_base
+ (addr
- cpu_reg
->mips_view_base
);
3791 for (j
= 0; j
< (len
/ 4); j
++, offset
+= 4)
3792 bnx2_reg_wr_ind(bp
, offset
, be32_to_cpu(data
[j
]));
3795 /* Load the Read-Only area. */
3796 addr
= be32_to_cpu(fw_entry
->rodata
.addr
);
3797 len
= be32_to_cpu(fw_entry
->rodata
.len
);
3798 file_offset
= be32_to_cpu(fw_entry
->rodata
.offset
);
3799 data
= (__be32
*)(bp
->mips_firmware
->data
+ file_offset
);
3801 offset
= cpu_reg
->spad_base
+ (addr
- cpu_reg
->mips_view_base
);
3805 for (j
= 0; j
< (len
/ 4); j
++, offset
+= 4)
3806 bnx2_reg_wr_ind(bp
, offset
, be32_to_cpu(data
[j
]));
3809 /* Clear the pre-fetch instruction. */
3810 bnx2_reg_wr_ind(bp
, cpu_reg
->inst
, 0);
3812 val
= be32_to_cpu(fw_entry
->start_addr
);
3813 bnx2_reg_wr_ind(bp
, cpu_reg
->pc
, val
);
3815 /* Start the CPU. */
3816 val
= bnx2_reg_rd_ind(bp
, cpu_reg
->mode
);
3817 val
&= ~cpu_reg
->mode_value_halt
;
3818 bnx2_reg_wr_ind(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
3819 bnx2_reg_wr_ind(bp
, cpu_reg
->mode
, val
);
3825 bnx2_init_cpus(struct bnx2
*bp
)
3827 const struct bnx2_mips_fw_file
*mips_fw
=
3828 (const struct bnx2_mips_fw_file
*) bp
->mips_firmware
->data
;
3829 const struct bnx2_rv2p_fw_file
*rv2p_fw
=
3830 (const struct bnx2_rv2p_fw_file
*) bp
->rv2p_firmware
->data
;
3833 /* Initialize the RV2P processor. */
3834 load_rv2p_fw(bp
, RV2P_PROC1
, &rv2p_fw
->proc1
);
3835 load_rv2p_fw(bp
, RV2P_PROC2
, &rv2p_fw
->proc2
);
3837 /* Initialize the RX Processor. */
3838 rc
= load_cpu_fw(bp
, &cpu_reg_rxp
, &mips_fw
->rxp
);
3842 /* Initialize the TX Processor. */
3843 rc
= load_cpu_fw(bp
, &cpu_reg_txp
, &mips_fw
->txp
);
3847 /* Initialize the TX Patch-up Processor. */
3848 rc
= load_cpu_fw(bp
, &cpu_reg_tpat
, &mips_fw
->tpat
);
3852 /* Initialize the Completion Processor. */
3853 rc
= load_cpu_fw(bp
, &cpu_reg_com
, &mips_fw
->com
);
3857 /* Initialize the Command Processor. */
3858 rc
= load_cpu_fw(bp
, &cpu_reg_cp
, &mips_fw
->cp
);
3865 bnx2_set_power_state(struct bnx2
*bp
, pci_power_t state
)
3869 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
3875 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
3876 (pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
3877 PCI_PM_CTRL_PME_STATUS
);
3879 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
3880 /* delay required during transition out of D3hot */
3883 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
3884 val
|= BNX2_EMAC_MODE_MPKT_RCVD
| BNX2_EMAC_MODE_ACPI_RCVD
;
3885 val
&= ~BNX2_EMAC_MODE_MPKT
;
3886 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
3888 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
3889 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
3890 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
3901 autoneg
= bp
->autoneg
;
3902 advertising
= bp
->advertising
;
3904 if (bp
->phy_port
== PORT_TP
) {
3905 bp
->autoneg
= AUTONEG_SPEED
;
3906 bp
->advertising
= ADVERTISED_10baseT_Half
|
3907 ADVERTISED_10baseT_Full
|
3908 ADVERTISED_100baseT_Half
|
3909 ADVERTISED_100baseT_Full
|
3913 spin_lock_bh(&bp
->phy_lock
);
3914 bnx2_setup_phy(bp
, bp
->phy_port
);
3915 spin_unlock_bh(&bp
->phy_lock
);
3917 bp
->autoneg
= autoneg
;
3918 bp
->advertising
= advertising
;
3920 bnx2_set_mac_addr(bp
, bp
->dev
->dev_addr
, 0);
3922 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
3924 /* Enable port mode. */
3925 val
&= ~BNX2_EMAC_MODE_PORT
;
3926 val
|= BNX2_EMAC_MODE_MPKT_RCVD
|
3927 BNX2_EMAC_MODE_ACPI_RCVD
|
3928 BNX2_EMAC_MODE_MPKT
;
3929 if (bp
->phy_port
== PORT_TP
)
3930 val
|= BNX2_EMAC_MODE_PORT_MII
;
3932 val
|= BNX2_EMAC_MODE_PORT_GMII
;
3933 if (bp
->line_speed
== SPEED_2500
)
3934 val
|= BNX2_EMAC_MODE_25G_MODE
;
3937 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
3939 /* receive all multicast */
3940 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
3941 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
3944 REG_WR(bp
, BNX2_EMAC_RX_MODE
,
3945 BNX2_EMAC_RX_MODE_SORT_MODE
);
3947 val
= 1 | BNX2_RPM_SORT_USER0_BC_EN
|
3948 BNX2_RPM_SORT_USER0_MC_EN
;
3949 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
3950 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
);
3951 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
|
3952 BNX2_RPM_SORT_USER0_ENA
);
3954 /* Need to enable EMAC and RPM for WOL. */
3955 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
3956 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE
|
3957 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE
|
3958 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE
);
3960 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
3961 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
3962 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
3964 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
3967 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
3970 if (!(bp
->flags
& BNX2_FLAG_NO_WOL
))
3971 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT3
| wol_msg
,
3974 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3975 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
3976 (CHIP_ID(bp
) == CHIP_ID_5706_A1
)) {
3985 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
3987 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
3990 /* No more memory access after this point until
3991 * device is brought back to D0.
4003 bnx2_acquire_nvram_lock(struct bnx2
*bp
)
4008 /* Request access to the flash interface. */
4009 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_SET2
);
4010 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4011 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
4012 if (val
& BNX2_NVM_SW_ARB_ARB_ARB2
)
4018 if (j
>= NVRAM_TIMEOUT_COUNT
)
4025 bnx2_release_nvram_lock(struct bnx2
*bp
)
4030 /* Relinquish nvram interface. */
4031 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_CLR2
);
4033 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4034 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
4035 if (!(val
& BNX2_NVM_SW_ARB_ARB_ARB2
))
4041 if (j
>= NVRAM_TIMEOUT_COUNT
)
4049 bnx2_enable_nvram_write(struct bnx2
*bp
)
4053 val
= REG_RD(bp
, BNX2_MISC_CFG
);
4054 REG_WR(bp
, BNX2_MISC_CFG
, val
| BNX2_MISC_CFG_NVM_WR_EN_PCI
);
4056 if (bp
->flash_info
->flags
& BNX2_NV_WREN
) {
4059 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4060 REG_WR(bp
, BNX2_NVM_COMMAND
,
4061 BNX2_NVM_COMMAND_WREN
| BNX2_NVM_COMMAND_DOIT
);
4063 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4066 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
4067 if (val
& BNX2_NVM_COMMAND_DONE
)
4071 if (j
>= NVRAM_TIMEOUT_COUNT
)
4078 bnx2_disable_nvram_write(struct bnx2
*bp
)
4082 val
= REG_RD(bp
, BNX2_MISC_CFG
);
4083 REG_WR(bp
, BNX2_MISC_CFG
, val
& ~BNX2_MISC_CFG_NVM_WR_EN
);
4088 bnx2_enable_nvram_access(struct bnx2
*bp
)
4092 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
4093 /* Enable both bits, even on read. */
4094 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
4095 val
| BNX2_NVM_ACCESS_ENABLE_EN
| BNX2_NVM_ACCESS_ENABLE_WR_EN
);
4099 bnx2_disable_nvram_access(struct bnx2
*bp
)
4103 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
4104 /* Disable both bits, even after read. */
4105 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
4106 val
& ~(BNX2_NVM_ACCESS_ENABLE_EN
|
4107 BNX2_NVM_ACCESS_ENABLE_WR_EN
));
4111 bnx2_nvram_erase_page(struct bnx2
*bp
, u32 offset
)
4116 if (bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)
4117 /* Buffered flash, no erase needed */
4120 /* Build an erase command */
4121 cmd
= BNX2_NVM_COMMAND_ERASE
| BNX2_NVM_COMMAND_WR
|
4122 BNX2_NVM_COMMAND_DOIT
;
4124 /* Need to clear DONE bit separately. */
4125 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4127 /* Address of the NVRAM to read from. */
4128 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
4130 /* Issue an erase command. */
4131 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
4133 /* Wait for completion. */
4134 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4139 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
4140 if (val
& BNX2_NVM_COMMAND_DONE
)
4144 if (j
>= NVRAM_TIMEOUT_COUNT
)
4151 bnx2_nvram_read_dword(struct bnx2
*bp
, u32 offset
, u8
*ret_val
, u32 cmd_flags
)
4156 /* Build the command word. */
4157 cmd
= BNX2_NVM_COMMAND_DOIT
| cmd_flags
;
4159 /* Calculate an offset of a buffered flash, not needed for 5709. */
4160 if (bp
->flash_info
->flags
& BNX2_NV_TRANSLATE
) {
4161 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
4162 bp
->flash_info
->page_bits
) +
4163 (offset
% bp
->flash_info
->page_size
);
4166 /* Need to clear DONE bit separately. */
4167 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4169 /* Address of the NVRAM to read from. */
4170 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
4172 /* Issue a read command. */
4173 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
4175 /* Wait for completion. */
4176 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4181 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
4182 if (val
& BNX2_NVM_COMMAND_DONE
) {
4183 __be32 v
= cpu_to_be32(REG_RD(bp
, BNX2_NVM_READ
));
4184 memcpy(ret_val
, &v
, 4);
4188 if (j
>= NVRAM_TIMEOUT_COUNT
)
4196 bnx2_nvram_write_dword(struct bnx2
*bp
, u32 offset
, u8
*val
, u32 cmd_flags
)
4202 /* Build the command word. */
4203 cmd
= BNX2_NVM_COMMAND_DOIT
| BNX2_NVM_COMMAND_WR
| cmd_flags
;
4205 /* Calculate an offset of a buffered flash, not needed for 5709. */
4206 if (bp
->flash_info
->flags
& BNX2_NV_TRANSLATE
) {
4207 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
4208 bp
->flash_info
->page_bits
) +
4209 (offset
% bp
->flash_info
->page_size
);
4212 /* Need to clear DONE bit separately. */
4213 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4215 memcpy(&val32
, val
, 4);
4217 /* Write the data. */
4218 REG_WR(bp
, BNX2_NVM_WRITE
, be32_to_cpu(val32
));
4220 /* Address of the NVRAM to write to. */
4221 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
4223 /* Issue the write command. */
4224 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
4226 /* Wait for completion. */
4227 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4230 if (REG_RD(bp
, BNX2_NVM_COMMAND
) & BNX2_NVM_COMMAND_DONE
)
4233 if (j
>= NVRAM_TIMEOUT_COUNT
)
4240 bnx2_init_nvram(struct bnx2
*bp
)
4243 int j
, entry_count
, rc
= 0;
4244 const struct flash_spec
*flash
;
4246 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4247 bp
->flash_info
= &flash_5709
;
4248 goto get_flash_size
;
4251 /* Determine the selected interface. */
4252 val
= REG_RD(bp
, BNX2_NVM_CFG1
);
4254 entry_count
= ARRAY_SIZE(flash_table
);
4256 if (val
& 0x40000000) {
4258 /* Flash interface has been reconfigured */
4259 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
4261 if ((val
& FLASH_BACKUP_STRAP_MASK
) ==
4262 (flash
->config1
& FLASH_BACKUP_STRAP_MASK
)) {
4263 bp
->flash_info
= flash
;
4270 /* Not yet been reconfigured */
4272 if (val
& (1 << 23))
4273 mask
= FLASH_BACKUP_STRAP_MASK
;
4275 mask
= FLASH_STRAP_MASK
;
4277 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
4280 if ((val
& mask
) == (flash
->strapping
& mask
)) {
4281 bp
->flash_info
= flash
;
4283 /* Request access to the flash interface. */
4284 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
4287 /* Enable access to flash interface */
4288 bnx2_enable_nvram_access(bp
);
4290 /* Reconfigure the flash interface */
4291 REG_WR(bp
, BNX2_NVM_CFG1
, flash
->config1
);
4292 REG_WR(bp
, BNX2_NVM_CFG2
, flash
->config2
);
4293 REG_WR(bp
, BNX2_NVM_CFG3
, flash
->config3
);
4294 REG_WR(bp
, BNX2_NVM_WRITE1
, flash
->write1
);
4296 /* Disable access to flash interface */
4297 bnx2_disable_nvram_access(bp
);
4298 bnx2_release_nvram_lock(bp
);
4303 } /* if (val & 0x40000000) */
4305 if (j
== entry_count
) {
4306 bp
->flash_info
= NULL
;
4307 pr_alert("Unknown flash/EEPROM type\n");
4312 val
= bnx2_shmem_rd(bp
, BNX2_SHARED_HW_CFG_CONFIG2
);
4313 val
&= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK
;
4315 bp
->flash_size
= val
;
4317 bp
->flash_size
= bp
->flash_info
->total_size
;
4323 bnx2_nvram_read(struct bnx2
*bp
, u32 offset
, u8
*ret_buf
,
4327 u32 cmd_flags
, offset32
, len32
, extra
;
4332 /* Request access to the flash interface. */
4333 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
4336 /* Enable access to flash interface */
4337 bnx2_enable_nvram_access(bp
);
4350 pre_len
= 4 - (offset
& 3);
4352 if (pre_len
>= len32
) {
4354 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
4355 BNX2_NVM_COMMAND_LAST
;
4358 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
4361 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
4366 memcpy(ret_buf
, buf
+ (offset
& 3), pre_len
);
4373 extra
= 4 - (len32
& 3);
4374 len32
= (len32
+ 4) & ~3;
4381 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
4383 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
4384 BNX2_NVM_COMMAND_LAST
;
4386 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
4388 memcpy(ret_buf
, buf
, 4 - extra
);
4390 else if (len32
> 0) {
4393 /* Read the first word. */
4397 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
4399 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, cmd_flags
);
4401 /* Advance to the next dword. */
4406 while (len32
> 4 && rc
== 0) {
4407 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, 0);
4409 /* Advance to the next dword. */
4418 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
4419 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
4421 memcpy(ret_buf
, buf
, 4 - extra
);
4424 /* Disable access to flash interface */
4425 bnx2_disable_nvram_access(bp
);
4427 bnx2_release_nvram_lock(bp
);
4433 bnx2_nvram_write(struct bnx2
*bp
, u32 offset
, u8
*data_buf
,
4436 u32 written
, offset32
, len32
;
4437 u8
*buf
, start
[4], end
[4], *align_buf
= NULL
, *flash_buffer
= NULL
;
4439 int align_start
, align_end
;
4444 align_start
= align_end
= 0;
4446 if ((align_start
= (offset32
& 3))) {
4448 len32
+= align_start
;
4451 if ((rc
= bnx2_nvram_read(bp
, offset32
, start
, 4)))
4456 align_end
= 4 - (len32
& 3);
4458 if ((rc
= bnx2_nvram_read(bp
, offset32
+ len32
- 4, end
, 4)))
4462 if (align_start
|| align_end
) {
4463 align_buf
= kmalloc(len32
, GFP_KERNEL
);
4464 if (align_buf
== NULL
)
4467 memcpy(align_buf
, start
, 4);
4470 memcpy(align_buf
+ len32
- 4, end
, 4);
4472 memcpy(align_buf
+ align_start
, data_buf
, buf_size
);
4476 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4477 flash_buffer
= kmalloc(264, GFP_KERNEL
);
4478 if (flash_buffer
== NULL
) {
4480 goto nvram_write_end
;
4485 while ((written
< len32
) && (rc
== 0)) {
4486 u32 page_start
, page_end
, data_start
, data_end
;
4487 u32 addr
, cmd_flags
;
4490 /* Find the page_start addr */
4491 page_start
= offset32
+ written
;
4492 page_start
-= (page_start
% bp
->flash_info
->page_size
);
4493 /* Find the page_end addr */
4494 page_end
= page_start
+ bp
->flash_info
->page_size
;
4495 /* Find the data_start addr */
4496 data_start
= (written
== 0) ? offset32
: page_start
;
4497 /* Find the data_end addr */
4498 data_end
= (page_end
> offset32
+ len32
) ?
4499 (offset32
+ len32
) : page_end
;
4501 /* Request access to the flash interface. */
4502 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
4503 goto nvram_write_end
;
4505 /* Enable access to flash interface */
4506 bnx2_enable_nvram_access(bp
);
4508 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
4509 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4512 /* Read the whole page into the buffer
4513 * (non-buffer flash only) */
4514 for (j
= 0; j
< bp
->flash_info
->page_size
; j
+= 4) {
4515 if (j
== (bp
->flash_info
->page_size
- 4)) {
4516 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
4518 rc
= bnx2_nvram_read_dword(bp
,
4524 goto nvram_write_end
;
4530 /* Enable writes to flash interface (unlock write-protect) */
4531 if ((rc
= bnx2_enable_nvram_write(bp
)) != 0)
4532 goto nvram_write_end
;
4534 /* Loop to write back the buffer data from page_start to
4537 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4538 /* Erase the page */
4539 if ((rc
= bnx2_nvram_erase_page(bp
, page_start
)) != 0)
4540 goto nvram_write_end
;
4542 /* Re-enable the write again for the actual write */
4543 bnx2_enable_nvram_write(bp
);
4545 for (addr
= page_start
; addr
< data_start
;
4546 addr
+= 4, i
+= 4) {
4548 rc
= bnx2_nvram_write_dword(bp
, addr
,
4549 &flash_buffer
[i
], cmd_flags
);
4552 goto nvram_write_end
;
4558 /* Loop to write the new data from data_start to data_end */
4559 for (addr
= data_start
; addr
< data_end
; addr
+= 4, i
+= 4) {
4560 if ((addr
== page_end
- 4) ||
4561 ((bp
->flash_info
->flags
& BNX2_NV_BUFFERED
) &&
4562 (addr
== data_end
- 4))) {
4564 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
4566 rc
= bnx2_nvram_write_dword(bp
, addr
, buf
,
4570 goto nvram_write_end
;
4576 /* Loop to write back the buffer data from data_end
4578 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4579 for (addr
= data_end
; addr
< page_end
;
4580 addr
+= 4, i
+= 4) {
4582 if (addr
== page_end
-4) {
4583 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
4585 rc
= bnx2_nvram_write_dword(bp
, addr
,
4586 &flash_buffer
[i
], cmd_flags
);
4589 goto nvram_write_end
;
4595 /* Disable writes to flash interface (lock write-protect) */
4596 bnx2_disable_nvram_write(bp
);
4598 /* Disable access to flash interface */
4599 bnx2_disable_nvram_access(bp
);
4600 bnx2_release_nvram_lock(bp
);
4602 /* Increment written */
4603 written
+= data_end
- data_start
;
4607 kfree(flash_buffer
);
4613 bnx2_init_fw_cap(struct bnx2
*bp
)
4617 bp
->phy_flags
&= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP
;
4618 bp
->flags
&= ~BNX2_FLAG_CAN_KEEP_VLAN
;
4620 if (!(bp
->flags
& BNX2_FLAG_ASF_ENABLE
))
4621 bp
->flags
|= BNX2_FLAG_CAN_KEEP_VLAN
;
4623 val
= bnx2_shmem_rd(bp
, BNX2_FW_CAP_MB
);
4624 if ((val
& BNX2_FW_CAP_SIGNATURE_MASK
) != BNX2_FW_CAP_SIGNATURE
)
4627 if ((val
& BNX2_FW_CAP_CAN_KEEP_VLAN
) == BNX2_FW_CAP_CAN_KEEP_VLAN
) {
4628 bp
->flags
|= BNX2_FLAG_CAN_KEEP_VLAN
;
4629 sig
|= BNX2_DRV_ACK_CAP_SIGNATURE
| BNX2_FW_CAP_CAN_KEEP_VLAN
;
4632 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
4633 (val
& BNX2_FW_CAP_REMOTE_PHY_CAPABLE
)) {
4636 bp
->phy_flags
|= BNX2_PHY_FLAG_REMOTE_PHY_CAP
;
4638 link
= bnx2_shmem_rd(bp
, BNX2_LINK_STATUS
);
4639 if (link
& BNX2_LINK_STATUS_SERDES_LINK
)
4640 bp
->phy_port
= PORT_FIBRE
;
4642 bp
->phy_port
= PORT_TP
;
4644 sig
|= BNX2_DRV_ACK_CAP_SIGNATURE
|
4645 BNX2_FW_CAP_REMOTE_PHY_CAPABLE
;
4648 if (netif_running(bp
->dev
) && sig
)
4649 bnx2_shmem_wr(bp
, BNX2_DRV_ACK_CAP_MB
, sig
);
4653 bnx2_setup_msix_tbl(struct bnx2
*bp
)
4655 REG_WR(bp
, BNX2_PCI_GRC_WINDOW_ADDR
, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN
);
4657 REG_WR(bp
, BNX2_PCI_GRC_WINDOW2_ADDR
, BNX2_MSIX_TABLE_ADDR
);
4658 REG_WR(bp
, BNX2_PCI_GRC_WINDOW3_ADDR
, BNX2_MSIX_PBA_ADDR
);
4662 bnx2_reset_chip(struct bnx2
*bp
, u32 reset_code
)
4668 /* Wait for the current PCI transaction to complete before
4669 * issuing a reset. */
4670 REG_WR(bp
, BNX2_MISC_ENABLE_CLR_BITS
,
4671 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE
|
4672 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE
|
4673 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE
|
4674 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE
);
4675 val
= REG_RD(bp
, BNX2_MISC_ENABLE_CLR_BITS
);
4678 /* Wait for the firmware to tell us it is ok to issue a reset. */
4679 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT0
| reset_code
, 1, 1);
4681 /* Deposit a driver reset signature so the firmware knows that
4682 * this is a soft reset. */
4683 bnx2_shmem_wr(bp
, BNX2_DRV_RESET_SIGNATURE
,
4684 BNX2_DRV_RESET_SIGNATURE_MAGIC
);
4686 /* Do a dummy read to force the chip to complete all current transaction
4687 * before we issue a reset. */
4688 val
= REG_RD(bp
, BNX2_MISC_ID
);
4690 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4691 REG_WR(bp
, BNX2_MISC_COMMAND
, BNX2_MISC_COMMAND_SW_RESET
);
4692 REG_RD(bp
, BNX2_MISC_COMMAND
);
4695 val
= BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
4696 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
4698 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
, val
);
4701 val
= BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
4702 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
4703 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
4706 REG_WR(bp
, BNX2_PCICFG_MISC_CONFIG
, val
);
4708 /* Reading back any register after chip reset will hang the
4709 * bus on 5706 A0 and A1. The msleep below provides plenty
4710 * of margin for write posting.
4712 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
4713 (CHIP_ID(bp
) == CHIP_ID_5706_A1
))
4716 /* Reset takes approximate 30 usec */
4717 for (i
= 0; i
< 10; i
++) {
4718 val
= REG_RD(bp
, BNX2_PCICFG_MISC_CONFIG
);
4719 if ((val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
4720 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) == 0)
4725 if (val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
4726 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) {
4727 pr_err("Chip reset did not complete\n");
4732 /* Make sure byte swapping is properly configured. */
4733 val
= REG_RD(bp
, BNX2_PCI_SWAP_DIAG0
);
4734 if (val
!= 0x01020304) {
4735 pr_err("Chip not in correct endian mode\n");
4739 /* Wait for the firmware to finish its initialization. */
4740 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT1
| reset_code
, 1, 0);
4744 spin_lock_bh(&bp
->phy_lock
);
4745 old_port
= bp
->phy_port
;
4746 bnx2_init_fw_cap(bp
);
4747 if ((bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) &&
4748 old_port
!= bp
->phy_port
)
4749 bnx2_set_default_remote_link(bp
);
4750 spin_unlock_bh(&bp
->phy_lock
);
4752 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
4753 /* Adjust the voltage regular to two steps lower. The default
4754 * of this register is 0x0000000e. */
4755 REG_WR(bp
, BNX2_MISC_VREG_CONTROL
, 0x000000fa);
4757 /* Remove bad rbuf memory from the free pool. */
4758 rc
= bnx2_alloc_bad_rbuf(bp
);
4761 if (bp
->flags
& BNX2_FLAG_USING_MSIX
)
4762 bnx2_setup_msix_tbl(bp
);
4768 bnx2_init_chip(struct bnx2
*bp
)
4773 /* Make sure the interrupt is not active. */
4774 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
4776 val
= BNX2_DMA_CONFIG_DATA_BYTE_SWAP
|
4777 BNX2_DMA_CONFIG_DATA_WORD_SWAP
|
4779 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP
|
4781 BNX2_DMA_CONFIG_CNTL_WORD_SWAP
|
4782 DMA_READ_CHANS
<< 12 |
4783 DMA_WRITE_CHANS
<< 16;
4785 val
|= (0x2 << 20) | (1 << 11);
4787 if ((bp
->flags
& BNX2_FLAG_PCIX
) && (bp
->bus_speed_mhz
== 133))
4790 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) &&
4791 (CHIP_ID(bp
) != CHIP_ID_5706_A0
) && !(bp
->flags
& BNX2_FLAG_PCIX
))
4792 val
|= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA
;
4794 REG_WR(bp
, BNX2_DMA_CONFIG
, val
);
4796 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
4797 val
= REG_RD(bp
, BNX2_TDMA_CONFIG
);
4798 val
|= BNX2_TDMA_CONFIG_ONE_DMA
;
4799 REG_WR(bp
, BNX2_TDMA_CONFIG
, val
);
4802 if (bp
->flags
& BNX2_FLAG_PCIX
) {
4805 pci_read_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
4807 pci_write_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
4808 val16
& ~PCI_X_CMD_ERO
);
4811 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
4812 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE
|
4813 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE
|
4814 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE
);
4816 /* Initialize context mapping and zero out the quick contexts. The
4817 * context block must have already been enabled. */
4818 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4819 rc
= bnx2_init_5709_context(bp
);
4823 bnx2_init_context(bp
);
4825 if ((rc
= bnx2_init_cpus(bp
)) != 0)
4828 bnx2_init_nvram(bp
);
4830 bnx2_set_mac_addr(bp
, bp
->dev
->dev_addr
, 0);
4832 val
= REG_RD(bp
, BNX2_MQ_CONFIG
);
4833 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
4834 val
|= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256
;
4835 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4836 val
|= BNX2_MQ_CONFIG_BIN_MQ_MODE
;
4837 if (CHIP_REV(bp
) == CHIP_REV_Ax
)
4838 val
|= BNX2_MQ_CONFIG_HALT_DIS
;
4841 REG_WR(bp
, BNX2_MQ_CONFIG
, val
);
4843 val
= 0x10000 + (MAX_CID_CNT
* MB_KERNEL_CTX_SIZE
);
4844 REG_WR(bp
, BNX2_MQ_KNL_BYP_WIND_START
, val
);
4845 REG_WR(bp
, BNX2_MQ_KNL_WIND_END
, val
);
4847 val
= (BCM_PAGE_BITS
- 8) << 24;
4848 REG_WR(bp
, BNX2_RV2P_CONFIG
, val
);
4850 /* Configure page size. */
4851 val
= REG_RD(bp
, BNX2_TBDR_CONFIG
);
4852 val
&= ~BNX2_TBDR_CONFIG_PAGE_SIZE
;
4853 val
|= (BCM_PAGE_BITS
- 8) << 24 | 0x40;
4854 REG_WR(bp
, BNX2_TBDR_CONFIG
, val
);
4856 val
= bp
->mac_addr
[0] +
4857 (bp
->mac_addr
[1] << 8) +
4858 (bp
->mac_addr
[2] << 16) +
4860 (bp
->mac_addr
[4] << 8) +
4861 (bp
->mac_addr
[5] << 16);
4862 REG_WR(bp
, BNX2_EMAC_BACKOFF_SEED
, val
);
4864 /* Program the MTU. Also include 4 bytes for CRC32. */
4866 val
= mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
4867 if (val
> (MAX_ETHERNET_PACKET_SIZE
+ 4))
4868 val
|= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA
;
4869 REG_WR(bp
, BNX2_EMAC_RX_MTU_SIZE
, val
);
4874 bnx2_reg_wr_ind(bp
, BNX2_RBUF_CONFIG
, BNX2_RBUF_CONFIG_VAL(mtu
));
4875 bnx2_reg_wr_ind(bp
, BNX2_RBUF_CONFIG2
, BNX2_RBUF_CONFIG2_VAL(mtu
));
4876 bnx2_reg_wr_ind(bp
, BNX2_RBUF_CONFIG3
, BNX2_RBUF_CONFIG3_VAL(mtu
));
4878 memset(bp
->bnx2_napi
[0].status_blk
.msi
, 0, bp
->status_stats_size
);
4879 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++)
4880 bp
->bnx2_napi
[i
].last_status_idx
= 0;
4882 bp
->idle_chk_status_idx
= 0xffff;
4884 bp
->rx_mode
= BNX2_EMAC_RX_MODE_SORT_MODE
;
4886 /* Set up how to generate a link change interrupt. */
4887 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
4889 REG_WR(bp
, BNX2_HC_STATUS_ADDR_L
,
4890 (u64
) bp
->status_blk_mapping
& 0xffffffff);
4891 REG_WR(bp
, BNX2_HC_STATUS_ADDR_H
, (u64
) bp
->status_blk_mapping
>> 32);
4893 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_L
,
4894 (u64
) bp
->stats_blk_mapping
& 0xffffffff);
4895 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_H
,
4896 (u64
) bp
->stats_blk_mapping
>> 32);
4898 REG_WR(bp
, BNX2_HC_TX_QUICK_CONS_TRIP
,
4899 (bp
->tx_quick_cons_trip_int
<< 16) | bp
->tx_quick_cons_trip
);
4901 REG_WR(bp
, BNX2_HC_RX_QUICK_CONS_TRIP
,
4902 (bp
->rx_quick_cons_trip_int
<< 16) | bp
->rx_quick_cons_trip
);
4904 REG_WR(bp
, BNX2_HC_COMP_PROD_TRIP
,
4905 (bp
->comp_prod_trip_int
<< 16) | bp
->comp_prod_trip
);
4907 REG_WR(bp
, BNX2_HC_TX_TICKS
, (bp
->tx_ticks_int
<< 16) | bp
->tx_ticks
);
4909 REG_WR(bp
, BNX2_HC_RX_TICKS
, (bp
->rx_ticks_int
<< 16) | bp
->rx_ticks
);
4911 REG_WR(bp
, BNX2_HC_COM_TICKS
,
4912 (bp
->com_ticks_int
<< 16) | bp
->com_ticks
);
4914 REG_WR(bp
, BNX2_HC_CMD_TICKS
,
4915 (bp
->cmd_ticks_int
<< 16) | bp
->cmd_ticks
);
4917 if (bp
->flags
& BNX2_FLAG_BROKEN_STATS
)
4918 REG_WR(bp
, BNX2_HC_STATS_TICKS
, 0);
4920 REG_WR(bp
, BNX2_HC_STATS_TICKS
, bp
->stats_ticks
);
4921 REG_WR(bp
, BNX2_HC_STAT_COLLECT_TICKS
, 0xbb8); /* 3ms */
4923 if (CHIP_ID(bp
) == CHIP_ID_5706_A1
)
4924 val
= BNX2_HC_CONFIG_COLLECT_STATS
;
4926 val
= BNX2_HC_CONFIG_RX_TMR_MODE
| BNX2_HC_CONFIG_TX_TMR_MODE
|
4927 BNX2_HC_CONFIG_COLLECT_STATS
;
4930 if (bp
->flags
& BNX2_FLAG_USING_MSIX
) {
4931 REG_WR(bp
, BNX2_HC_MSIX_BIT_VECTOR
,
4932 BNX2_HC_MSIX_BIT_VECTOR_VAL
);
4934 val
|= BNX2_HC_CONFIG_SB_ADDR_INC_128B
;
4937 if (bp
->flags
& BNX2_FLAG_ONE_SHOT_MSI
)
4938 val
|= BNX2_HC_CONFIG_ONE_SHOT
| BNX2_HC_CONFIG_USE_INT_PARAM
;
4940 REG_WR(bp
, BNX2_HC_CONFIG
, val
);
4942 for (i
= 1; i
< bp
->irq_nvecs
; i
++) {
4943 u32 base
= ((i
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
4944 BNX2_HC_SB_CONFIG_1
;
4947 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE
|
4948 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE
|
4949 BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
4951 REG_WR(bp
, base
+ BNX2_HC_TX_QUICK_CONS_TRIP_OFF
,
4952 (bp
->tx_quick_cons_trip_int
<< 16) |
4953 bp
->tx_quick_cons_trip
);
4955 REG_WR(bp
, base
+ BNX2_HC_TX_TICKS_OFF
,
4956 (bp
->tx_ticks_int
<< 16) | bp
->tx_ticks
);
4958 REG_WR(bp
, base
+ BNX2_HC_RX_QUICK_CONS_TRIP_OFF
,
4959 (bp
->rx_quick_cons_trip_int
<< 16) |
4960 bp
->rx_quick_cons_trip
);
4962 REG_WR(bp
, base
+ BNX2_HC_RX_TICKS_OFF
,
4963 (bp
->rx_ticks_int
<< 16) | bp
->rx_ticks
);
4966 /* Clear internal stats counters. */
4967 REG_WR(bp
, BNX2_HC_COMMAND
, BNX2_HC_COMMAND_CLR_STAT_NOW
);
4969 REG_WR(bp
, BNX2_HC_ATTN_BITS_ENABLE
, STATUS_ATTN_EVENTS
);
4971 /* Initialize the receive filter. */
4972 bnx2_set_rx_mode(bp
->dev
);
4974 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4975 val
= REG_RD(bp
, BNX2_MISC_NEW_CORE_CTL
);
4976 val
|= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE
;
4977 REG_WR(bp
, BNX2_MISC_NEW_CORE_CTL
, val
);
4979 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT2
| BNX2_DRV_MSG_CODE_RESET
,
4982 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
, BNX2_MISC_ENABLE_DEFAULT
);
4983 REG_RD(bp
, BNX2_MISC_ENABLE_SET_BITS
);
4987 bp
->hc_cmd
= REG_RD(bp
, BNX2_HC_COMMAND
);
4993 bnx2_clear_ring_states(struct bnx2
*bp
)
4995 struct bnx2_napi
*bnapi
;
4996 struct bnx2_tx_ring_info
*txr
;
4997 struct bnx2_rx_ring_info
*rxr
;
5000 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++) {
5001 bnapi
= &bp
->bnx2_napi
[i
];
5002 txr
= &bnapi
->tx_ring
;
5003 rxr
= &bnapi
->rx_ring
;
5006 txr
->hw_tx_cons
= 0;
5007 rxr
->rx_prod_bseq
= 0;
5010 rxr
->rx_pg_prod
= 0;
5011 rxr
->rx_pg_cons
= 0;
5016 bnx2_init_tx_context(struct bnx2
*bp
, u32 cid
, struct bnx2_tx_ring_info
*txr
)
5018 u32 val
, offset0
, offset1
, offset2
, offset3
;
5019 u32 cid_addr
= GET_CID_ADDR(cid
);
5021 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
5022 offset0
= BNX2_L2CTX_TYPE_XI
;
5023 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
5024 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
5025 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
5027 offset0
= BNX2_L2CTX_TYPE
;
5028 offset1
= BNX2_L2CTX_CMD_TYPE
;
5029 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
5030 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
5032 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
5033 bnx2_ctx_wr(bp
, cid_addr
, offset0
, val
);
5035 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
5036 bnx2_ctx_wr(bp
, cid_addr
, offset1
, val
);
5038 val
= (u64
) txr
->tx_desc_mapping
>> 32;
5039 bnx2_ctx_wr(bp
, cid_addr
, offset2
, val
);
5041 val
= (u64
) txr
->tx_desc_mapping
& 0xffffffff;
5042 bnx2_ctx_wr(bp
, cid_addr
, offset3
, val
);
5046 bnx2_init_tx_ring(struct bnx2
*bp
, int ring_num
)
5050 struct bnx2_napi
*bnapi
;
5051 struct bnx2_tx_ring_info
*txr
;
5053 bnapi
= &bp
->bnx2_napi
[ring_num
];
5054 txr
= &bnapi
->tx_ring
;
5059 cid
= TX_TSS_CID
+ ring_num
- 1;
5061 bp
->tx_wake_thresh
= bp
->tx_ring_size
/ 2;
5063 txbd
= &txr
->tx_desc_ring
[MAX_TX_DESC_CNT
];
5065 txbd
->tx_bd_haddr_hi
= (u64
) txr
->tx_desc_mapping
>> 32;
5066 txbd
->tx_bd_haddr_lo
= (u64
) txr
->tx_desc_mapping
& 0xffffffff;
5069 txr
->tx_prod_bseq
= 0;
5071 txr
->tx_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BIDX
;
5072 txr
->tx_bseq_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BSEQ
;
5074 bnx2_init_tx_context(bp
, cid
, txr
);
5078 bnx2_init_rxbd_rings(struct rx_bd
*rx_ring
[], dma_addr_t dma
[], u32 buf_size
,
5084 for (i
= 0; i
< num_rings
; i
++) {
5087 rxbd
= &rx_ring
[i
][0];
5088 for (j
= 0; j
< MAX_RX_DESC_CNT
; j
++, rxbd
++) {
5089 rxbd
->rx_bd_len
= buf_size
;
5090 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
5092 if (i
== (num_rings
- 1))
5096 rxbd
->rx_bd_haddr_hi
= (u64
) dma
[j
] >> 32;
5097 rxbd
->rx_bd_haddr_lo
= (u64
) dma
[j
] & 0xffffffff;
5102 bnx2_init_rx_ring(struct bnx2
*bp
, int ring_num
)
5105 u16 prod
, ring_prod
;
5106 u32 cid
, rx_cid_addr
, val
;
5107 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[ring_num
];
5108 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
5113 cid
= RX_RSS_CID
+ ring_num
- 1;
5115 rx_cid_addr
= GET_CID_ADDR(cid
);
5117 bnx2_init_rxbd_rings(rxr
->rx_desc_ring
, rxr
->rx_desc_mapping
,
5118 bp
->rx_buf_use_size
, bp
->rx_max_ring
);
5120 bnx2_init_rx_context(bp
, cid
);
5122 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
5123 val
= REG_RD(bp
, BNX2_MQ_MAP_L2_5
);
5124 REG_WR(bp
, BNX2_MQ_MAP_L2_5
, val
| BNX2_MQ_MAP_L2_5_ARM
);
5127 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_PG_BUF_SIZE
, 0);
5128 if (bp
->rx_pg_ring_size
) {
5129 bnx2_init_rxbd_rings(rxr
->rx_pg_desc_ring
,
5130 rxr
->rx_pg_desc_mapping
,
5131 PAGE_SIZE
, bp
->rx_max_pg_ring
);
5132 val
= (bp
->rx_buf_use_size
<< 16) | PAGE_SIZE
;
5133 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_PG_BUF_SIZE
, val
);
5134 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_RBDC_KEY
,
5135 BNX2_L2CTX_RBDC_JUMBO_KEY
- ring_num
);
5137 val
= (u64
) rxr
->rx_pg_desc_mapping
[0] >> 32;
5138 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_PG_BDHADDR_HI
, val
);
5140 val
= (u64
) rxr
->rx_pg_desc_mapping
[0] & 0xffffffff;
5141 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_PG_BDHADDR_LO
, val
);
5143 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5144 REG_WR(bp
, BNX2_MQ_MAP_L2_3
, BNX2_MQ_MAP_L2_3_DEFAULT
);
5147 val
= (u64
) rxr
->rx_desc_mapping
[0] >> 32;
5148 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
5150 val
= (u64
) rxr
->rx_desc_mapping
[0] & 0xffffffff;
5151 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
5153 ring_prod
= prod
= rxr
->rx_pg_prod
;
5154 for (i
= 0; i
< bp
->rx_pg_ring_size
; i
++) {
5155 if (bnx2_alloc_rx_page(bp
, rxr
, ring_prod
) < 0) {
5156 netdev_warn(bp
->dev
, "init'ed rx page ring %d with %d/%d pages only\n",
5157 ring_num
, i
, bp
->rx_pg_ring_size
);
5160 prod
= NEXT_RX_BD(prod
);
5161 ring_prod
= RX_PG_RING_IDX(prod
);
5163 rxr
->rx_pg_prod
= prod
;
5165 ring_prod
= prod
= rxr
->rx_prod
;
5166 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
5167 if (bnx2_alloc_rx_skb(bp
, rxr
, ring_prod
) < 0) {
5168 netdev_warn(bp
->dev
, "init'ed rx ring %d with %d/%d skbs only\n",
5169 ring_num
, i
, bp
->rx_ring_size
);
5172 prod
= NEXT_RX_BD(prod
);
5173 ring_prod
= RX_RING_IDX(prod
);
5175 rxr
->rx_prod
= prod
;
5177 rxr
->rx_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_HOST_BDIDX
;
5178 rxr
->rx_bseq_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_HOST_BSEQ
;
5179 rxr
->rx_pg_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_HOST_PG_BDIDX
;
5181 REG_WR16(bp
, rxr
->rx_pg_bidx_addr
, rxr
->rx_pg_prod
);
5182 REG_WR16(bp
, rxr
->rx_bidx_addr
, prod
);
5184 REG_WR(bp
, rxr
->rx_bseq_addr
, rxr
->rx_prod_bseq
);
5188 bnx2_init_all_rings(struct bnx2
*bp
)
5193 bnx2_clear_ring_states(bp
);
5195 REG_WR(bp
, BNX2_TSCH_TSS_CFG
, 0);
5196 for (i
= 0; i
< bp
->num_tx_rings
; i
++)
5197 bnx2_init_tx_ring(bp
, i
);
5199 if (bp
->num_tx_rings
> 1)
5200 REG_WR(bp
, BNX2_TSCH_TSS_CFG
, ((bp
->num_tx_rings
- 1) << 24) |
5203 REG_WR(bp
, BNX2_RLUP_RSS_CONFIG
, 0);
5204 bnx2_reg_wr_ind(bp
, BNX2_RXP_SCRATCH_RSS_TBL_SZ
, 0);
5206 for (i
= 0; i
< bp
->num_rx_rings
; i
++)
5207 bnx2_init_rx_ring(bp
, i
);
5209 if (bp
->num_rx_rings
> 1) {
5211 u8
*tbl
= (u8
*) &tbl_32
;
5213 bnx2_reg_wr_ind(bp
, BNX2_RXP_SCRATCH_RSS_TBL_SZ
,
5214 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES
);
5216 for (i
= 0; i
< BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES
; i
++) {
5217 tbl
[i
% 4] = i
% (bp
->num_rx_rings
- 1);
5220 BNX2_RXP_SCRATCH_RSS_TBL
+ i
,
5221 cpu_to_be32(tbl_32
));
5224 val
= BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI
|
5225 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI
;
5227 REG_WR(bp
, BNX2_RLUP_RSS_CONFIG
, val
);
5232 static u32
bnx2_find_max_ring(u32 ring_size
, u32 max_size
)
5234 u32 max
, num_rings
= 1;
5236 while (ring_size
> MAX_RX_DESC_CNT
) {
5237 ring_size
-= MAX_RX_DESC_CNT
;
5240 /* round to next power of 2 */
5242 while ((max
& num_rings
) == 0)
5245 if (num_rings
!= max
)
5252 bnx2_set_rx_ring_size(struct bnx2
*bp
, u32 size
)
5254 u32 rx_size
, rx_space
, jumbo_size
;
5256 /* 8 for CRC and VLAN */
5257 rx_size
= bp
->dev
->mtu
+ ETH_HLEN
+ BNX2_RX_OFFSET
+ 8;
5259 rx_space
= SKB_DATA_ALIGN(rx_size
+ BNX2_RX_ALIGN
) + NET_SKB_PAD
+
5260 sizeof(struct skb_shared_info
);
5262 bp
->rx_copy_thresh
= BNX2_RX_COPY_THRESH
;
5263 bp
->rx_pg_ring_size
= 0;
5264 bp
->rx_max_pg_ring
= 0;
5265 bp
->rx_max_pg_ring_idx
= 0;
5266 if ((rx_space
> PAGE_SIZE
) && !(bp
->flags
& BNX2_FLAG_JUMBO_BROKEN
)) {
5267 int pages
= PAGE_ALIGN(bp
->dev
->mtu
- 40) >> PAGE_SHIFT
;
5269 jumbo_size
= size
* pages
;
5270 if (jumbo_size
> MAX_TOTAL_RX_PG_DESC_CNT
)
5271 jumbo_size
= MAX_TOTAL_RX_PG_DESC_CNT
;
5273 bp
->rx_pg_ring_size
= jumbo_size
;
5274 bp
->rx_max_pg_ring
= bnx2_find_max_ring(jumbo_size
,
5276 bp
->rx_max_pg_ring_idx
= (bp
->rx_max_pg_ring
* RX_DESC_CNT
) - 1;
5277 rx_size
= BNX2_RX_COPY_THRESH
+ BNX2_RX_OFFSET
;
5278 bp
->rx_copy_thresh
= 0;
5281 bp
->rx_buf_use_size
= rx_size
;
5283 bp
->rx_buf_size
= bp
->rx_buf_use_size
+ BNX2_RX_ALIGN
;
5284 bp
->rx_jumbo_thresh
= rx_size
- BNX2_RX_OFFSET
;
5285 bp
->rx_ring_size
= size
;
5286 bp
->rx_max_ring
= bnx2_find_max_ring(size
, MAX_RX_RINGS
);
5287 bp
->rx_max_ring_idx
= (bp
->rx_max_ring
* RX_DESC_CNT
) - 1;
5291 bnx2_free_tx_skbs(struct bnx2
*bp
)
5295 for (i
= 0; i
< bp
->num_tx_rings
; i
++) {
5296 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
5297 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
5300 if (txr
->tx_buf_ring
== NULL
)
5303 for (j
= 0; j
< TX_DESC_CNT
; ) {
5304 struct sw_tx_bd
*tx_buf
= &txr
->tx_buf_ring
[j
];
5305 struct sk_buff
*skb
= tx_buf
->skb
;
5313 pci_unmap_single(bp
->pdev
,
5314 dma_unmap_addr(tx_buf
, mapping
),
5320 last
= tx_buf
->nr_frags
;
5322 for (k
= 0; k
< last
; k
++, j
++) {
5323 tx_buf
= &txr
->tx_buf_ring
[TX_RING_IDX(j
)];
5324 pci_unmap_page(bp
->pdev
,
5325 dma_unmap_addr(tx_buf
, mapping
),
5326 skb_shinfo(skb
)->frags
[k
].size
,
5335 bnx2_free_rx_skbs(struct bnx2
*bp
)
5339 for (i
= 0; i
< bp
->num_rx_rings
; i
++) {
5340 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
5341 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
5344 if (rxr
->rx_buf_ring
== NULL
)
5347 for (j
= 0; j
< bp
->rx_max_ring_idx
; j
++) {
5348 struct sw_bd
*rx_buf
= &rxr
->rx_buf_ring
[j
];
5349 struct sk_buff
*skb
= rx_buf
->skb
;
5354 pci_unmap_single(bp
->pdev
,
5355 dma_unmap_addr(rx_buf
, mapping
),
5356 bp
->rx_buf_use_size
,
5357 PCI_DMA_FROMDEVICE
);
5363 for (j
= 0; j
< bp
->rx_max_pg_ring_idx
; j
++)
5364 bnx2_free_rx_page(bp
, rxr
, j
);
5369 bnx2_free_skbs(struct bnx2
*bp
)
5371 bnx2_free_tx_skbs(bp
);
5372 bnx2_free_rx_skbs(bp
);
5376 bnx2_reset_nic(struct bnx2
*bp
, u32 reset_code
)
5380 rc
= bnx2_reset_chip(bp
, reset_code
);
5385 if ((rc
= bnx2_init_chip(bp
)) != 0)
5388 bnx2_init_all_rings(bp
);
5393 bnx2_init_nic(struct bnx2
*bp
, int reset_phy
)
5397 if ((rc
= bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
)) != 0)
5400 spin_lock_bh(&bp
->phy_lock
);
5401 bnx2_init_phy(bp
, reset_phy
);
5403 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
5404 bnx2_remote_phy_event(bp
);
5405 spin_unlock_bh(&bp
->phy_lock
);
5410 bnx2_shutdown_chip(struct bnx2
*bp
)
5414 if (bp
->flags
& BNX2_FLAG_NO_WOL
)
5415 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN
;
5417 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
5419 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
5421 return bnx2_reset_chip(bp
, reset_code
);
5425 bnx2_test_registers(struct bnx2
*bp
)
5429 static const struct {
5432 #define BNX2_FL_NOT_5709 1
5436 { 0x006c, 0, 0x00000000, 0x0000003f },
5437 { 0x0090, 0, 0xffffffff, 0x00000000 },
5438 { 0x0094, 0, 0x00000000, 0x00000000 },
5440 { 0x0404, BNX2_FL_NOT_5709
, 0x00003f00, 0x00000000 },
5441 { 0x0418, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5442 { 0x041c, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5443 { 0x0420, BNX2_FL_NOT_5709
, 0x00000000, 0x80ffffff },
5444 { 0x0424, BNX2_FL_NOT_5709
, 0x00000000, 0x00000000 },
5445 { 0x0428, BNX2_FL_NOT_5709
, 0x00000000, 0x00000001 },
5446 { 0x0450, BNX2_FL_NOT_5709
, 0x00000000, 0x0000ffff },
5447 { 0x0454, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5448 { 0x0458, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5450 { 0x0808, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5451 { 0x0854, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5452 { 0x0868, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5453 { 0x086c, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5454 { 0x0870, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5455 { 0x0874, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5457 { 0x0c00, BNX2_FL_NOT_5709
, 0x00000000, 0x00000001 },
5458 { 0x0c04, BNX2_FL_NOT_5709
, 0x00000000, 0x03ff0001 },
5459 { 0x0c08, BNX2_FL_NOT_5709
, 0x0f0ff073, 0x00000000 },
5461 { 0x1000, 0, 0x00000000, 0x00000001 },
5462 { 0x1004, BNX2_FL_NOT_5709
, 0x00000000, 0x000f0001 },
5464 { 0x1408, 0, 0x01c00800, 0x00000000 },
5465 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5466 { 0x14a8, 0, 0x00000000, 0x000001ff },
5467 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5468 { 0x14b0, 0, 0x00000002, 0x00000001 },
5469 { 0x14b8, 0, 0x00000000, 0x00000000 },
5470 { 0x14c0, 0, 0x00000000, 0x00000009 },
5471 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5472 { 0x14cc, 0, 0x00000000, 0x00000001 },
5473 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5475 { 0x1800, 0, 0x00000000, 0x00000001 },
5476 { 0x1804, 0, 0x00000000, 0x00000003 },
5478 { 0x2800, 0, 0x00000000, 0x00000001 },
5479 { 0x2804, 0, 0x00000000, 0x00003f01 },
5480 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5481 { 0x2810, 0, 0xffff0000, 0x00000000 },
5482 { 0x2814, 0, 0xffff0000, 0x00000000 },
5483 { 0x2818, 0, 0xffff0000, 0x00000000 },
5484 { 0x281c, 0, 0xffff0000, 0x00000000 },
5485 { 0x2834, 0, 0xffffffff, 0x00000000 },
5486 { 0x2840, 0, 0x00000000, 0xffffffff },
5487 { 0x2844, 0, 0x00000000, 0xffffffff },
5488 { 0x2848, 0, 0xffffffff, 0x00000000 },
5489 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5491 { 0x2c00, 0, 0x00000000, 0x00000011 },
5492 { 0x2c04, 0, 0x00000000, 0x00030007 },
5494 { 0x3c00, 0, 0x00000000, 0x00000001 },
5495 { 0x3c04, 0, 0x00000000, 0x00070000 },
5496 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5497 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5498 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5499 { 0x3c14, 0, 0x00000000, 0xffffffff },
5500 { 0x3c18, 0, 0x00000000, 0xffffffff },
5501 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5502 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5504 { 0x5004, 0, 0x00000000, 0x0000007f },
5505 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5507 { 0x5c00, 0, 0x00000000, 0x00000001 },
5508 { 0x5c04, 0, 0x00000000, 0x0003000f },
5509 { 0x5c08, 0, 0x00000003, 0x00000000 },
5510 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5511 { 0x5c10, 0, 0x00000000, 0xffffffff },
5512 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5513 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5514 { 0x5c88, 0, 0x00000000, 0x00077373 },
5515 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5517 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5518 { 0x680c, 0, 0xffffffff, 0x00000000 },
5519 { 0x6810, 0, 0xffffffff, 0x00000000 },
5520 { 0x6814, 0, 0xffffffff, 0x00000000 },
5521 { 0x6818, 0, 0xffffffff, 0x00000000 },
5522 { 0x681c, 0, 0xffffffff, 0x00000000 },
5523 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5524 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5525 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5526 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5527 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5528 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5529 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5530 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5531 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5532 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5533 { 0x684c, 0, 0xffffffff, 0x00000000 },
5534 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5535 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5536 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5537 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5538 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5539 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5541 { 0xffff, 0, 0x00000000, 0x00000000 },
5546 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5549 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
5550 u32 offset
, rw_mask
, ro_mask
, save_val
, val
;
5551 u16 flags
= reg_tbl
[i
].flags
;
5553 if (is_5709
&& (flags
& BNX2_FL_NOT_5709
))
5556 offset
= (u32
) reg_tbl
[i
].offset
;
5557 rw_mask
= reg_tbl
[i
].rw_mask
;
5558 ro_mask
= reg_tbl
[i
].ro_mask
;
5560 save_val
= readl(bp
->regview
+ offset
);
5562 writel(0, bp
->regview
+ offset
);
5564 val
= readl(bp
->regview
+ offset
);
5565 if ((val
& rw_mask
) != 0) {
5569 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
5573 writel(0xffffffff, bp
->regview
+ offset
);
5575 val
= readl(bp
->regview
+ offset
);
5576 if ((val
& rw_mask
) != rw_mask
) {
5580 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
5584 writel(save_val
, bp
->regview
+ offset
);
5588 writel(save_val
, bp
->regview
+ offset
);
5596 bnx2_do_mem_test(struct bnx2
*bp
, u32 start
, u32 size
)
5598 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0x55555555,
5599 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5602 for (i
= 0; i
< sizeof(test_pattern
) / 4; i
++) {
5605 for (offset
= 0; offset
< size
; offset
+= 4) {
5607 bnx2_reg_wr_ind(bp
, start
+ offset
, test_pattern
[i
]);
5609 if (bnx2_reg_rd_ind(bp
, start
+ offset
) !=
5619 bnx2_test_memory(struct bnx2
*bp
)
5623 static struct mem_entry
{
5626 } mem_tbl_5706
[] = {
5627 { 0x60000, 0x4000 },
5628 { 0xa0000, 0x3000 },
5629 { 0xe0000, 0x4000 },
5630 { 0x120000, 0x4000 },
5631 { 0x1a0000, 0x4000 },
5632 { 0x160000, 0x4000 },
5636 { 0x60000, 0x4000 },
5637 { 0xa0000, 0x3000 },
5638 { 0xe0000, 0x4000 },
5639 { 0x120000, 0x4000 },
5640 { 0x1a0000, 0x4000 },
5643 struct mem_entry
*mem_tbl
;
5645 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5646 mem_tbl
= mem_tbl_5709
;
5648 mem_tbl
= mem_tbl_5706
;
5650 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
5651 if ((ret
= bnx2_do_mem_test(bp
, mem_tbl
[i
].offset
,
5652 mem_tbl
[i
].len
)) != 0) {
5660 #define BNX2_MAC_LOOPBACK 0
5661 #define BNX2_PHY_LOOPBACK 1
5664 bnx2_run_loopback(struct bnx2
*bp
, int loopback_mode
)
5666 unsigned int pkt_size
, num_pkts
, i
;
5667 struct sk_buff
*skb
, *rx_skb
;
5668 unsigned char *packet
;
5669 u16 rx_start_idx
, rx_idx
;
5672 struct sw_bd
*rx_buf
;
5673 struct l2_fhdr
*rx_hdr
;
5675 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0], *tx_napi
;
5676 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
5677 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
5681 txr
= &tx_napi
->tx_ring
;
5682 rxr
= &bnapi
->rx_ring
;
5683 if (loopback_mode
== BNX2_MAC_LOOPBACK
) {
5684 bp
->loopback
= MAC_LOOPBACK
;
5685 bnx2_set_mac_loopback(bp
);
5687 else if (loopback_mode
== BNX2_PHY_LOOPBACK
) {
5688 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
5691 bp
->loopback
= PHY_LOOPBACK
;
5692 bnx2_set_phy_loopback(bp
);
5697 pkt_size
= min(bp
->dev
->mtu
+ ETH_HLEN
, bp
->rx_jumbo_thresh
- 4);
5698 skb
= netdev_alloc_skb(bp
->dev
, pkt_size
);
5701 packet
= skb_put(skb
, pkt_size
);
5702 memcpy(packet
, bp
->dev
->dev_addr
, 6);
5703 memset(packet
+ 6, 0x0, 8);
5704 for (i
= 14; i
< pkt_size
; i
++)
5705 packet
[i
] = (unsigned char) (i
& 0xff);
5707 map
= pci_map_single(bp
->pdev
, skb
->data
, pkt_size
,
5709 if (pci_dma_mapping_error(bp
->pdev
, map
)) {
5714 REG_WR(bp
, BNX2_HC_COMMAND
,
5715 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
5717 REG_RD(bp
, BNX2_HC_COMMAND
);
5720 rx_start_idx
= bnx2_get_hw_rx_cons(bnapi
);
5724 txbd
= &txr
->tx_desc_ring
[TX_RING_IDX(txr
->tx_prod
)];
5726 txbd
->tx_bd_haddr_hi
= (u64
) map
>> 32;
5727 txbd
->tx_bd_haddr_lo
= (u64
) map
& 0xffffffff;
5728 txbd
->tx_bd_mss_nbytes
= pkt_size
;
5729 txbd
->tx_bd_vlan_tag_flags
= TX_BD_FLAGS_START
| TX_BD_FLAGS_END
;
5732 txr
->tx_prod
= NEXT_TX_BD(txr
->tx_prod
);
5733 txr
->tx_prod_bseq
+= pkt_size
;
5735 REG_WR16(bp
, txr
->tx_bidx_addr
, txr
->tx_prod
);
5736 REG_WR(bp
, txr
->tx_bseq_addr
, txr
->tx_prod_bseq
);
5740 REG_WR(bp
, BNX2_HC_COMMAND
,
5741 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
5743 REG_RD(bp
, BNX2_HC_COMMAND
);
5747 pci_unmap_single(bp
->pdev
, map
, pkt_size
, PCI_DMA_TODEVICE
);
5750 if (bnx2_get_hw_tx_cons(tx_napi
) != txr
->tx_prod
)
5751 goto loopback_test_done
;
5753 rx_idx
= bnx2_get_hw_rx_cons(bnapi
);
5754 if (rx_idx
!= rx_start_idx
+ num_pkts
) {
5755 goto loopback_test_done
;
5758 rx_buf
= &rxr
->rx_buf_ring
[rx_start_idx
];
5759 rx_skb
= rx_buf
->skb
;
5761 rx_hdr
= (struct l2_fhdr
*) rx_skb
->data
;
5762 skb_reserve(rx_skb
, BNX2_RX_OFFSET
);
5764 pci_dma_sync_single_for_cpu(bp
->pdev
,
5765 dma_unmap_addr(rx_buf
, mapping
),
5766 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
5768 if (rx_hdr
->l2_fhdr_status
&
5769 (L2_FHDR_ERRORS_BAD_CRC
|
5770 L2_FHDR_ERRORS_PHY_DECODE
|
5771 L2_FHDR_ERRORS_ALIGNMENT
|
5772 L2_FHDR_ERRORS_TOO_SHORT
|
5773 L2_FHDR_ERRORS_GIANT_FRAME
)) {
5775 goto loopback_test_done
;
5778 if ((rx_hdr
->l2_fhdr_pkt_len
- 4) != pkt_size
) {
5779 goto loopback_test_done
;
5782 for (i
= 14; i
< pkt_size
; i
++) {
5783 if (*(rx_skb
->data
+ i
) != (unsigned char) (i
& 0xff)) {
5784 goto loopback_test_done
;
5795 #define BNX2_MAC_LOOPBACK_FAILED 1
5796 #define BNX2_PHY_LOOPBACK_FAILED 2
5797 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5798 BNX2_PHY_LOOPBACK_FAILED)
5801 bnx2_test_loopback(struct bnx2
*bp
)
5805 if (!netif_running(bp
->dev
))
5806 return BNX2_LOOPBACK_FAILED
;
5808 bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
);
5809 spin_lock_bh(&bp
->phy_lock
);
5810 bnx2_init_phy(bp
, 1);
5811 spin_unlock_bh(&bp
->phy_lock
);
5812 if (bnx2_run_loopback(bp
, BNX2_MAC_LOOPBACK
))
5813 rc
|= BNX2_MAC_LOOPBACK_FAILED
;
5814 if (bnx2_run_loopback(bp
, BNX2_PHY_LOOPBACK
))
5815 rc
|= BNX2_PHY_LOOPBACK_FAILED
;
5819 #define NVRAM_SIZE 0x200
5820 #define CRC32_RESIDUAL 0xdebb20e3
5823 bnx2_test_nvram(struct bnx2
*bp
)
5825 __be32 buf
[NVRAM_SIZE
/ 4];
5826 u8
*data
= (u8
*) buf
;
5830 if ((rc
= bnx2_nvram_read(bp
, 0, data
, 4)) != 0)
5831 goto test_nvram_done
;
5833 magic
= be32_to_cpu(buf
[0]);
5834 if (magic
!= 0x669955aa) {
5836 goto test_nvram_done
;
5839 if ((rc
= bnx2_nvram_read(bp
, 0x100, data
, NVRAM_SIZE
)) != 0)
5840 goto test_nvram_done
;
5842 csum
= ether_crc_le(0x100, data
);
5843 if (csum
!= CRC32_RESIDUAL
) {
5845 goto test_nvram_done
;
5848 csum
= ether_crc_le(0x100, data
+ 0x100);
5849 if (csum
!= CRC32_RESIDUAL
) {
5858 bnx2_test_link(struct bnx2
*bp
)
5862 if (!netif_running(bp
->dev
))
5865 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
5870 spin_lock_bh(&bp
->phy_lock
);
5871 bnx2_enable_bmsr1(bp
);
5872 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
5873 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
5874 bnx2_disable_bmsr1(bp
);
5875 spin_unlock_bh(&bp
->phy_lock
);
5877 if (bmsr
& BMSR_LSTATUS
) {
5884 bnx2_test_intr(struct bnx2
*bp
)
5889 if (!netif_running(bp
->dev
))
5892 status_idx
= REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff;
5894 /* This register is not touched during run-time. */
5895 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
5896 REG_RD(bp
, BNX2_HC_COMMAND
);
5898 for (i
= 0; i
< 10; i
++) {
5899 if ((REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff) !=
5905 msleep_interruptible(10);
5913 /* Determining link for parallel detection. */
5915 bnx2_5706_serdes_has_link(struct bnx2
*bp
)
5917 u32 mode_ctl
, an_dbg
, exp
;
5919 if (bp
->phy_flags
& BNX2_PHY_FLAG_NO_PARALLEL
)
5922 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_MODE_CTL
);
5923 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &mode_ctl
);
5925 if (!(mode_ctl
& MISC_SHDW_MODE_CTL_SIG_DET
))
5928 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_AN_DBG
);
5929 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
5930 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
5932 if (an_dbg
& (MISC_SHDW_AN_DBG_NOSYNC
| MISC_SHDW_AN_DBG_RUDI_INVALID
))
5935 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
, MII_EXPAND_REG1
);
5936 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &exp
);
5937 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &exp
);
5939 if (exp
& MII_EXPAND_REG1_RUDI_C
) /* receiving CONFIG */
5946 bnx2_5706_serdes_timer(struct bnx2
*bp
)
5950 spin_lock(&bp
->phy_lock
);
5951 if (bp
->serdes_an_pending
) {
5952 bp
->serdes_an_pending
--;
5954 } else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
5957 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
5959 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
5961 if (bmcr
& BMCR_ANENABLE
) {
5962 if (bnx2_5706_serdes_has_link(bp
)) {
5963 bmcr
&= ~BMCR_ANENABLE
;
5964 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5965 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
5966 bp
->phy_flags
|= BNX2_PHY_FLAG_PARALLEL_DETECT
;
5970 else if ((bp
->link_up
) && (bp
->autoneg
& AUTONEG_SPEED
) &&
5971 (bp
->phy_flags
& BNX2_PHY_FLAG_PARALLEL_DETECT
)) {
5974 bnx2_write_phy(bp
, 0x17, 0x0f01);
5975 bnx2_read_phy(bp
, 0x15, &phy2
);
5979 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
5980 bmcr
|= BMCR_ANENABLE
;
5981 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
5983 bp
->phy_flags
&= ~BNX2_PHY_FLAG_PARALLEL_DETECT
;
5986 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
5991 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_AN_DBG
);
5992 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &val
);
5993 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &val
);
5995 if (bp
->link_up
&& (val
& MISC_SHDW_AN_DBG_NOSYNC
)) {
5996 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_FORCED_DOWN
)) {
5997 bnx2_5706s_force_link_dn(bp
, 1);
5998 bp
->phy_flags
|= BNX2_PHY_FLAG_FORCED_DOWN
;
6001 } else if (!bp
->link_up
&& !(val
& MISC_SHDW_AN_DBG_NOSYNC
))
6004 spin_unlock(&bp
->phy_lock
);
6008 bnx2_5708_serdes_timer(struct bnx2
*bp
)
6010 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
6013 if ((bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
) == 0) {
6014 bp
->serdes_an_pending
= 0;
6018 spin_lock(&bp
->phy_lock
);
6019 if (bp
->serdes_an_pending
)
6020 bp
->serdes_an_pending
--;
6021 else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
6024 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
6025 if (bmcr
& BMCR_ANENABLE
) {
6026 bnx2_enable_forced_2g5(bp
);
6027 bp
->current_interval
= BNX2_SERDES_FORCED_TIMEOUT
;
6029 bnx2_disable_forced_2g5(bp
);
6030 bp
->serdes_an_pending
= 2;
6031 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
6035 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
6037 spin_unlock(&bp
->phy_lock
);
6041 bnx2_timer(unsigned long data
)
6043 struct bnx2
*bp
= (struct bnx2
*) data
;
6045 if (!netif_running(bp
->dev
))
6048 if (atomic_read(&bp
->intr_sem
) != 0)
6049 goto bnx2_restart_timer
;
6051 if ((bp
->flags
& (BNX2_FLAG_USING_MSI
| BNX2_FLAG_ONE_SHOT_MSI
)) ==
6052 BNX2_FLAG_USING_MSI
)
6053 bnx2_chk_missed_msi(bp
);
6055 bnx2_send_heart_beat(bp
);
6057 bp
->stats_blk
->stat_FwRxDrop
=
6058 bnx2_reg_rd_ind(bp
, BNX2_FW_RX_DROP_COUNT
);
6060 /* workaround occasional corrupted counters */
6061 if ((bp
->flags
& BNX2_FLAG_BROKEN_STATS
) && bp
->stats_ticks
)
6062 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
|
6063 BNX2_HC_COMMAND_STATS_NOW
);
6065 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
6066 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
6067 bnx2_5706_serdes_timer(bp
);
6069 bnx2_5708_serdes_timer(bp
);
6073 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6077 bnx2_request_irq(struct bnx2
*bp
)
6079 unsigned long flags
;
6080 struct bnx2_irq
*irq
;
6083 if (bp
->flags
& BNX2_FLAG_USING_MSI_OR_MSIX
)
6086 flags
= IRQF_SHARED
;
6088 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
6089 irq
= &bp
->irq_tbl
[i
];
6090 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
6100 bnx2_free_irq(struct bnx2
*bp
)
6102 struct bnx2_irq
*irq
;
6105 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
6106 irq
= &bp
->irq_tbl
[i
];
6108 free_irq(irq
->vector
, &bp
->bnx2_napi
[i
]);
6111 if (bp
->flags
& BNX2_FLAG_USING_MSI
)
6112 pci_disable_msi(bp
->pdev
);
6113 else if (bp
->flags
& BNX2_FLAG_USING_MSIX
)
6114 pci_disable_msix(bp
->pdev
);
6116 bp
->flags
&= ~(BNX2_FLAG_USING_MSI_OR_MSIX
| BNX2_FLAG_ONE_SHOT_MSI
);
6120 bnx2_enable_msix(struct bnx2
*bp
, int msix_vecs
)
6123 struct msix_entry msix_ent
[BNX2_MAX_MSIX_VEC
];
6124 struct net_device
*dev
= bp
->dev
;
6125 const int len
= sizeof(bp
->irq_tbl
[0].name
);
6127 bnx2_setup_msix_tbl(bp
);
6128 REG_WR(bp
, BNX2_PCI_MSIX_CONTROL
, BNX2_MAX_MSIX_HW_VEC
- 1);
6129 REG_WR(bp
, BNX2_PCI_MSIX_TBL_OFF_BIR
, BNX2_PCI_GRC_WINDOW2_BASE
);
6130 REG_WR(bp
, BNX2_PCI_MSIX_PBA_OFF_BIT
, BNX2_PCI_GRC_WINDOW3_BASE
);
6132 /* Need to flush the previous three writes to ensure MSI-X
6133 * is setup properly */
6134 REG_RD(bp
, BNX2_PCI_MSIX_CONTROL
);
6136 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++) {
6137 msix_ent
[i
].entry
= i
;
6138 msix_ent
[i
].vector
= 0;
6141 rc
= pci_enable_msix(bp
->pdev
, msix_ent
, BNX2_MAX_MSIX_VEC
);
6145 bp
->irq_nvecs
= msix_vecs
;
6146 bp
->flags
|= BNX2_FLAG_USING_MSIX
| BNX2_FLAG_ONE_SHOT_MSI
;
6147 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++) {
6148 bp
->irq_tbl
[i
].vector
= msix_ent
[i
].vector
;
6149 snprintf(bp
->irq_tbl
[i
].name
, len
, "%s-%d", dev
->name
, i
);
6150 bp
->irq_tbl
[i
].handler
= bnx2_msi_1shot
;
6155 bnx2_setup_int_mode(struct bnx2
*bp
, int dis_msi
)
6157 int cpus
= num_online_cpus();
6158 int msix_vecs
= min(cpus
+ 1, RX_MAX_RINGS
);
6160 bp
->irq_tbl
[0].handler
= bnx2_interrupt
;
6161 strcpy(bp
->irq_tbl
[0].name
, bp
->dev
->name
);
6163 bp
->irq_tbl
[0].vector
= bp
->pdev
->irq
;
6165 if ((bp
->flags
& BNX2_FLAG_MSIX_CAP
) && !dis_msi
&& cpus
> 1)
6166 bnx2_enable_msix(bp
, msix_vecs
);
6168 if ((bp
->flags
& BNX2_FLAG_MSI_CAP
) && !dis_msi
&&
6169 !(bp
->flags
& BNX2_FLAG_USING_MSIX
)) {
6170 if (pci_enable_msi(bp
->pdev
) == 0) {
6171 bp
->flags
|= BNX2_FLAG_USING_MSI
;
6172 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
6173 bp
->flags
|= BNX2_FLAG_ONE_SHOT_MSI
;
6174 bp
->irq_tbl
[0].handler
= bnx2_msi_1shot
;
6176 bp
->irq_tbl
[0].handler
= bnx2_msi
;
6178 bp
->irq_tbl
[0].vector
= bp
->pdev
->irq
;
6182 bp
->num_tx_rings
= rounddown_pow_of_two(bp
->irq_nvecs
);
6183 bp
->dev
->real_num_tx_queues
= bp
->num_tx_rings
;
6185 bp
->num_rx_rings
= bp
->irq_nvecs
;
6188 /* Called with rtnl_lock */
6190 bnx2_open(struct net_device
*dev
)
6192 struct bnx2
*bp
= netdev_priv(dev
);
6195 netif_carrier_off(dev
);
6197 bnx2_set_power_state(bp
, PCI_D0
);
6198 bnx2_disable_int(bp
);
6200 bnx2_setup_int_mode(bp
, disable_msi
);
6202 bnx2_napi_enable(bp
);
6203 rc
= bnx2_alloc_mem(bp
);
6207 rc
= bnx2_request_irq(bp
);
6211 rc
= bnx2_init_nic(bp
, 1);
6215 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6217 atomic_set(&bp
->intr_sem
, 0);
6219 memset(bp
->temp_stats_blk
, 0, sizeof(struct statistics_block
));
6221 bnx2_enable_int(bp
);
6223 if (bp
->flags
& BNX2_FLAG_USING_MSI
) {
6224 /* Test MSI to make sure it is working
6225 * If MSI test fails, go back to INTx mode
6227 if (bnx2_test_intr(bp
) != 0) {
6228 netdev_warn(bp
->dev
, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6230 bnx2_disable_int(bp
);
6233 bnx2_setup_int_mode(bp
, 1);
6235 rc
= bnx2_init_nic(bp
, 0);
6238 rc
= bnx2_request_irq(bp
);
6241 del_timer_sync(&bp
->timer
);
6244 bnx2_enable_int(bp
);
6247 if (bp
->flags
& BNX2_FLAG_USING_MSI
)
6248 netdev_info(dev
, "using MSI\n");
6249 else if (bp
->flags
& BNX2_FLAG_USING_MSIX
)
6250 netdev_info(dev
, "using MSIX\n");
6252 netif_tx_start_all_queues(dev
);
6257 bnx2_napi_disable(bp
);
6265 bnx2_reset_task(struct work_struct
*work
)
6267 struct bnx2
*bp
= container_of(work
, struct bnx2
, reset_task
);
6270 if (!netif_running(bp
->dev
)) {
6275 bnx2_netif_stop(bp
);
6277 bnx2_init_nic(bp
, 1);
6279 atomic_set(&bp
->intr_sem
, 1);
6280 bnx2_netif_start(bp
);
6285 bnx2_dump_state(struct bnx2
*bp
)
6287 struct net_device
*dev
= bp
->dev
;
6289 netdev_err(dev
, "DEBUG: intr_sem[%x]\n", atomic_read(&bp
->intr_sem
));
6290 netdev_err(dev
, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6291 REG_RD(bp
, BNX2_EMAC_TX_STATUS
),
6292 REG_RD(bp
, BNX2_RPM_MGMT_PKT_CTRL
));
6293 netdev_err(dev
, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6294 bnx2_reg_rd_ind(bp
, BNX2_MCP_STATE_P0
),
6295 bnx2_reg_rd_ind(bp
, BNX2_MCP_STATE_P1
));
6296 netdev_err(dev
, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6297 REG_RD(bp
, BNX2_HC_STATS_INTERRUPT_STATUS
));
6298 if (bp
->flags
& BNX2_FLAG_USING_MSIX
)
6299 netdev_err(dev
, "DEBUG: PBA[%08x]\n",
6300 REG_RD(bp
, BNX2_PCI_GRC_WINDOW3_BASE
));
6304 bnx2_tx_timeout(struct net_device
*dev
)
6306 struct bnx2
*bp
= netdev_priv(dev
);
6308 bnx2_dump_state(bp
);
6310 /* This allows the netif to be shutdown gracefully before resetting */
6311 schedule_work(&bp
->reset_task
);
6315 /* Called with rtnl_lock */
6317 bnx2_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*vlgrp
)
6319 struct bnx2
*bp
= netdev_priv(dev
);
6321 if (netif_running(dev
))
6322 bnx2_netif_stop(bp
);
6326 if (!netif_running(dev
))
6329 bnx2_set_rx_mode(dev
);
6330 if (bp
->flags
& BNX2_FLAG_CAN_KEEP_VLAN
)
6331 bnx2_fw_sync(bp
, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE
, 0, 1);
6333 bnx2_netif_start(bp
);
6337 /* Called with netif_tx_lock.
6338 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6339 * netif_wake_queue().
6342 bnx2_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6344 struct bnx2
*bp
= netdev_priv(dev
);
6347 struct sw_tx_bd
*tx_buf
;
6348 u32 len
, vlan_tag_flags
, last_frag
, mss
;
6349 u16 prod
, ring_prod
;
6351 struct bnx2_napi
*bnapi
;
6352 struct bnx2_tx_ring_info
*txr
;
6353 struct netdev_queue
*txq
;
6355 /* Determine which tx ring we will be placed on */
6356 i
= skb_get_queue_mapping(skb
);
6357 bnapi
= &bp
->bnx2_napi
[i
];
6358 txr
= &bnapi
->tx_ring
;
6359 txq
= netdev_get_tx_queue(dev
, i
);
6361 if (unlikely(bnx2_tx_avail(bp
, txr
) <
6362 (skb_shinfo(skb
)->nr_frags
+ 1))) {
6363 netif_tx_stop_queue(txq
);
6364 netdev_err(dev
, "BUG! Tx ring full when queue awake!\n");
6366 return NETDEV_TX_BUSY
;
6368 len
= skb_headlen(skb
);
6369 prod
= txr
->tx_prod
;
6370 ring_prod
= TX_RING_IDX(prod
);
6373 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
6374 vlan_tag_flags
|= TX_BD_FLAGS_TCP_UDP_CKSUM
;
6378 if (bp
->vlgrp
&& vlan_tx_tag_present(skb
)) {
6380 (TX_BD_FLAGS_VLAN_TAG
| (vlan_tx_tag_get(skb
) << 16));
6383 if ((mss
= skb_shinfo(skb
)->gso_size
)) {
6387 vlan_tag_flags
|= TX_BD_FLAGS_SW_LSO
;
6389 tcp_opt_len
= tcp_optlen(skb
);
6391 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
6392 u32 tcp_off
= skb_transport_offset(skb
) -
6393 sizeof(struct ipv6hdr
) - ETH_HLEN
;
6395 vlan_tag_flags
|= ((tcp_opt_len
>> 2) << 8) |
6396 TX_BD_FLAGS_SW_FLAGS
;
6397 if (likely(tcp_off
== 0))
6398 vlan_tag_flags
&= ~TX_BD_FLAGS_TCP6_OFF0_MSK
;
6401 vlan_tag_flags
|= ((tcp_off
& 0x3) <<
6402 TX_BD_FLAGS_TCP6_OFF0_SHL
) |
6403 ((tcp_off
& 0x10) <<
6404 TX_BD_FLAGS_TCP6_OFF4_SHL
);
6405 mss
|= (tcp_off
& 0xc) << TX_BD_TCP6_OFF2_SHL
;
6409 if (tcp_opt_len
|| (iph
->ihl
> 5)) {
6410 vlan_tag_flags
|= ((iph
->ihl
- 5) +
6411 (tcp_opt_len
>> 2)) << 8;
6417 mapping
= pci_map_single(bp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6418 if (pci_dma_mapping_error(bp
->pdev
, mapping
)) {
6420 return NETDEV_TX_OK
;
6423 tx_buf
= &txr
->tx_buf_ring
[ring_prod
];
6425 dma_unmap_addr_set(tx_buf
, mapping
, mapping
);
6427 txbd
= &txr
->tx_desc_ring
[ring_prod
];
6429 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
6430 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
6431 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
6432 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
| TX_BD_FLAGS_START
;
6434 last_frag
= skb_shinfo(skb
)->nr_frags
;
6435 tx_buf
->nr_frags
= last_frag
;
6436 tx_buf
->is_gso
= skb_is_gso(skb
);
6438 for (i
= 0; i
< last_frag
; i
++) {
6439 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6441 prod
= NEXT_TX_BD(prod
);
6442 ring_prod
= TX_RING_IDX(prod
);
6443 txbd
= &txr
->tx_desc_ring
[ring_prod
];
6446 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
6447 len
, PCI_DMA_TODEVICE
);
6448 if (pci_dma_mapping_error(bp
->pdev
, mapping
))
6450 dma_unmap_addr_set(&txr
->tx_buf_ring
[ring_prod
], mapping
,
6453 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
6454 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
6455 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
6456 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
;
6459 txbd
->tx_bd_vlan_tag_flags
|= TX_BD_FLAGS_END
;
6461 prod
= NEXT_TX_BD(prod
);
6462 txr
->tx_prod_bseq
+= skb
->len
;
6464 REG_WR16(bp
, txr
->tx_bidx_addr
, prod
);
6465 REG_WR(bp
, txr
->tx_bseq_addr
, txr
->tx_prod_bseq
);
6469 txr
->tx_prod
= prod
;
6471 if (unlikely(bnx2_tx_avail(bp
, txr
) <= MAX_SKB_FRAGS
)) {
6472 netif_tx_stop_queue(txq
);
6473 if (bnx2_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)
6474 netif_tx_wake_queue(txq
);
6477 return NETDEV_TX_OK
;
6479 /* save value of frag that failed */
6482 /* start back at beginning and unmap skb */
6483 prod
= txr
->tx_prod
;
6484 ring_prod
= TX_RING_IDX(prod
);
6485 tx_buf
= &txr
->tx_buf_ring
[ring_prod
];
6487 pci_unmap_single(bp
->pdev
, dma_unmap_addr(tx_buf
, mapping
),
6488 skb_headlen(skb
), PCI_DMA_TODEVICE
);
6490 /* unmap remaining mapped pages */
6491 for (i
= 0; i
< last_frag
; i
++) {
6492 prod
= NEXT_TX_BD(prod
);
6493 ring_prod
= TX_RING_IDX(prod
);
6494 tx_buf
= &txr
->tx_buf_ring
[ring_prod
];
6495 pci_unmap_page(bp
->pdev
, dma_unmap_addr(tx_buf
, mapping
),
6496 skb_shinfo(skb
)->frags
[i
].size
,
6501 return NETDEV_TX_OK
;
6504 /* Called with rtnl_lock */
6506 bnx2_close(struct net_device
*dev
)
6508 struct bnx2
*bp
= netdev_priv(dev
);
6510 cancel_work_sync(&bp
->reset_task
);
6512 bnx2_disable_int_sync(bp
);
6513 bnx2_napi_disable(bp
);
6514 del_timer_sync(&bp
->timer
);
6515 bnx2_shutdown_chip(bp
);
6520 netif_carrier_off(bp
->dev
);
6521 bnx2_set_power_state(bp
, PCI_D3hot
);
6526 bnx2_save_stats(struct bnx2
*bp
)
6528 u32
*hw_stats
= (u32
*) bp
->stats_blk
;
6529 u32
*temp_stats
= (u32
*) bp
->temp_stats_blk
;
6532 /* The 1st 10 counters are 64-bit counters */
6533 for (i
= 0; i
< 20; i
+= 2) {
6537 hi
= temp_stats
[i
] + hw_stats
[i
];
6538 lo
= (u64
) temp_stats
[i
+ 1] + (u64
) hw_stats
[i
+ 1];
6539 if (lo
> 0xffffffff)
6542 temp_stats
[i
+ 1] = lo
& 0xffffffff;
6545 for ( ; i
< sizeof(struct statistics_block
) / 4; i
++)
6546 temp_stats
[i
] += hw_stats
[i
];
6549 #define GET_64BIT_NET_STATS64(ctr) \
6550 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6551 (unsigned long) (ctr##_lo)
6553 #define GET_64BIT_NET_STATS32(ctr) \
6556 #if (BITS_PER_LONG == 64)
6557 #define GET_64BIT_NET_STATS(ctr) \
6558 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6559 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6561 #define GET_64BIT_NET_STATS(ctr) \
6562 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6563 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6566 #define GET_32BIT_NET_STATS(ctr) \
6567 (unsigned long) (bp->stats_blk->ctr + \
6568 bp->temp_stats_blk->ctr)
6570 static struct net_device_stats
*
6571 bnx2_get_stats(struct net_device
*dev
)
6573 struct bnx2
*bp
= netdev_priv(dev
);
6574 struct net_device_stats
*net_stats
= &dev
->stats
;
6576 if (bp
->stats_blk
== NULL
) {
6579 net_stats
->rx_packets
=
6580 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts
) +
6581 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts
) +
6582 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts
);
6584 net_stats
->tx_packets
=
6585 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts
) +
6586 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts
) +
6587 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts
);
6589 net_stats
->rx_bytes
=
6590 GET_64BIT_NET_STATS(stat_IfHCInOctets
);
6592 net_stats
->tx_bytes
=
6593 GET_64BIT_NET_STATS(stat_IfHCOutOctets
);
6595 net_stats
->multicast
=
6596 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts
);
6598 net_stats
->collisions
=
6599 GET_32BIT_NET_STATS(stat_EtherStatsCollisions
);
6601 net_stats
->rx_length_errors
=
6602 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts
) +
6603 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts
);
6605 net_stats
->rx_over_errors
=
6606 GET_32BIT_NET_STATS(stat_IfInFTQDiscards
) +
6607 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards
);
6609 net_stats
->rx_frame_errors
=
6610 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors
);
6612 net_stats
->rx_crc_errors
=
6613 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors
);
6615 net_stats
->rx_errors
= net_stats
->rx_length_errors
+
6616 net_stats
->rx_over_errors
+ net_stats
->rx_frame_errors
+
6617 net_stats
->rx_crc_errors
;
6619 net_stats
->tx_aborted_errors
=
6620 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions
) +
6621 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions
);
6623 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) ||
6624 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
6625 net_stats
->tx_carrier_errors
= 0;
6627 net_stats
->tx_carrier_errors
=
6628 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors
);
6631 net_stats
->tx_errors
=
6632 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors
) +
6633 net_stats
->tx_aborted_errors
+
6634 net_stats
->tx_carrier_errors
;
6636 net_stats
->rx_missed_errors
=
6637 GET_32BIT_NET_STATS(stat_IfInFTQDiscards
) +
6638 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards
) +
6639 GET_32BIT_NET_STATS(stat_FwRxDrop
);
6644 /* All ethtool functions called with rtnl_lock */
6647 bnx2_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6649 struct bnx2
*bp
= netdev_priv(dev
);
6650 int support_serdes
= 0, support_copper
= 0;
6652 cmd
->supported
= SUPPORTED_Autoneg
;
6653 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
6656 } else if (bp
->phy_port
== PORT_FIBRE
)
6661 if (support_serdes
) {
6662 cmd
->supported
|= SUPPORTED_1000baseT_Full
|
6664 if (bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
)
6665 cmd
->supported
|= SUPPORTED_2500baseX_Full
;
6668 if (support_copper
) {
6669 cmd
->supported
|= SUPPORTED_10baseT_Half
|
6670 SUPPORTED_10baseT_Full
|
6671 SUPPORTED_100baseT_Half
|
6672 SUPPORTED_100baseT_Full
|
6673 SUPPORTED_1000baseT_Full
|
6678 spin_lock_bh(&bp
->phy_lock
);
6679 cmd
->port
= bp
->phy_port
;
6680 cmd
->advertising
= bp
->advertising
;
6682 if (bp
->autoneg
& AUTONEG_SPEED
) {
6683 cmd
->autoneg
= AUTONEG_ENABLE
;
6686 cmd
->autoneg
= AUTONEG_DISABLE
;
6689 if (netif_carrier_ok(dev
)) {
6690 cmd
->speed
= bp
->line_speed
;
6691 cmd
->duplex
= bp
->duplex
;
6697 spin_unlock_bh(&bp
->phy_lock
);
6699 cmd
->transceiver
= XCVR_INTERNAL
;
6700 cmd
->phy_address
= bp
->phy_addr
;
6706 bnx2_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6708 struct bnx2
*bp
= netdev_priv(dev
);
6709 u8 autoneg
= bp
->autoneg
;
6710 u8 req_duplex
= bp
->req_duplex
;
6711 u16 req_line_speed
= bp
->req_line_speed
;
6712 u32 advertising
= bp
->advertising
;
6715 spin_lock_bh(&bp
->phy_lock
);
6717 if (cmd
->port
!= PORT_TP
&& cmd
->port
!= PORT_FIBRE
)
6718 goto err_out_unlock
;
6720 if (cmd
->port
!= bp
->phy_port
&&
6721 !(bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
))
6722 goto err_out_unlock
;
6724 /* If device is down, we can store the settings only if the user
6725 * is setting the currently active port.
6727 if (!netif_running(dev
) && cmd
->port
!= bp
->phy_port
)
6728 goto err_out_unlock
;
6730 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
6731 autoneg
|= AUTONEG_SPEED
;
6733 advertising
= cmd
->advertising
;
6734 if (cmd
->port
== PORT_TP
) {
6735 advertising
&= ETHTOOL_ALL_COPPER_SPEED
;
6737 advertising
= ETHTOOL_ALL_COPPER_SPEED
;
6739 advertising
&= ETHTOOL_ALL_FIBRE_SPEED
;
6741 advertising
= ETHTOOL_ALL_FIBRE_SPEED
;
6743 advertising
|= ADVERTISED_Autoneg
;
6746 if (cmd
->port
== PORT_FIBRE
) {
6747 if ((cmd
->speed
!= SPEED_1000
&&
6748 cmd
->speed
!= SPEED_2500
) ||
6749 (cmd
->duplex
!= DUPLEX_FULL
))
6750 goto err_out_unlock
;
6752 if (cmd
->speed
== SPEED_2500
&&
6753 !(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
6754 goto err_out_unlock
;
6756 else if (cmd
->speed
== SPEED_1000
|| cmd
->speed
== SPEED_2500
)
6757 goto err_out_unlock
;
6759 autoneg
&= ~AUTONEG_SPEED
;
6760 req_line_speed
= cmd
->speed
;
6761 req_duplex
= cmd
->duplex
;
6765 bp
->autoneg
= autoneg
;
6766 bp
->advertising
= advertising
;
6767 bp
->req_line_speed
= req_line_speed
;
6768 bp
->req_duplex
= req_duplex
;
6771 /* If device is down, the new settings will be picked up when it is
6774 if (netif_running(dev
))
6775 err
= bnx2_setup_phy(bp
, cmd
->port
);
6778 spin_unlock_bh(&bp
->phy_lock
);
6784 bnx2_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
6786 struct bnx2
*bp
= netdev_priv(dev
);
6788 strcpy(info
->driver
, DRV_MODULE_NAME
);
6789 strcpy(info
->version
, DRV_MODULE_VERSION
);
6790 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
6791 strcpy(info
->fw_version
, bp
->fw_version
);
6794 #define BNX2_REGDUMP_LEN (32 * 1024)
6797 bnx2_get_regs_len(struct net_device
*dev
)
6799 return BNX2_REGDUMP_LEN
;
6803 bnx2_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *_p
)
6805 u32
*p
= _p
, i
, offset
;
6807 struct bnx2
*bp
= netdev_priv(dev
);
6808 u32 reg_boundaries
[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6809 0x0800, 0x0880, 0x0c00, 0x0c10,
6810 0x0c30, 0x0d08, 0x1000, 0x101c,
6811 0x1040, 0x1048, 0x1080, 0x10a4,
6812 0x1400, 0x1490, 0x1498, 0x14f0,
6813 0x1500, 0x155c, 0x1580, 0x15dc,
6814 0x1600, 0x1658, 0x1680, 0x16d8,
6815 0x1800, 0x1820, 0x1840, 0x1854,
6816 0x1880, 0x1894, 0x1900, 0x1984,
6817 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6818 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6819 0x2000, 0x2030, 0x23c0, 0x2400,
6820 0x2800, 0x2820, 0x2830, 0x2850,
6821 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6822 0x3c00, 0x3c94, 0x4000, 0x4010,
6823 0x4080, 0x4090, 0x43c0, 0x4458,
6824 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6825 0x4fc0, 0x5010, 0x53c0, 0x5444,
6826 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6827 0x5fc0, 0x6000, 0x6400, 0x6428,
6828 0x6800, 0x6848, 0x684c, 0x6860,
6829 0x6888, 0x6910, 0x8000 };
6833 memset(p
, 0, BNX2_REGDUMP_LEN
);
6835 if (!netif_running(bp
->dev
))
6839 offset
= reg_boundaries
[0];
6841 while (offset
< BNX2_REGDUMP_LEN
) {
6842 *p
++ = REG_RD(bp
, offset
);
6844 if (offset
== reg_boundaries
[i
+ 1]) {
6845 offset
= reg_boundaries
[i
+ 2];
6846 p
= (u32
*) (orig_p
+ offset
);
6853 bnx2_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
6855 struct bnx2
*bp
= netdev_priv(dev
);
6857 if (bp
->flags
& BNX2_FLAG_NO_WOL
) {
6862 wol
->supported
= WAKE_MAGIC
;
6864 wol
->wolopts
= WAKE_MAGIC
;
6868 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
6872 bnx2_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
6874 struct bnx2
*bp
= netdev_priv(dev
);
6876 if (wol
->wolopts
& ~WAKE_MAGIC
)
6879 if (wol
->wolopts
& WAKE_MAGIC
) {
6880 if (bp
->flags
& BNX2_FLAG_NO_WOL
)
6892 bnx2_nway_reset(struct net_device
*dev
)
6894 struct bnx2
*bp
= netdev_priv(dev
);
6897 if (!netif_running(dev
))
6900 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
6904 spin_lock_bh(&bp
->phy_lock
);
6906 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
6909 rc
= bnx2_setup_remote_phy(bp
, bp
->phy_port
);
6910 spin_unlock_bh(&bp
->phy_lock
);
6914 /* Force a link down visible on the other side */
6915 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
6916 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
6917 spin_unlock_bh(&bp
->phy_lock
);
6921 spin_lock_bh(&bp
->phy_lock
);
6923 bp
->current_interval
= BNX2_SERDES_AN_TIMEOUT
;
6924 bp
->serdes_an_pending
= 1;
6925 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6928 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
6929 bmcr
&= ~BMCR_LOOPBACK
;
6930 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
| BMCR_ANRESTART
| BMCR_ANENABLE
);
6932 spin_unlock_bh(&bp
->phy_lock
);
6938 bnx2_get_link(struct net_device
*dev
)
6940 struct bnx2
*bp
= netdev_priv(dev
);
6946 bnx2_get_eeprom_len(struct net_device
*dev
)
6948 struct bnx2
*bp
= netdev_priv(dev
);
6950 if (bp
->flash_info
== NULL
)
6953 return (int) bp
->flash_size
;
6957 bnx2_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
6960 struct bnx2
*bp
= netdev_priv(dev
);
6963 if (!netif_running(dev
))
6966 /* parameters already validated in ethtool_get_eeprom */
6968 rc
= bnx2_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
6974 bnx2_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
6977 struct bnx2
*bp
= netdev_priv(dev
);
6980 if (!netif_running(dev
))
6983 /* parameters already validated in ethtool_set_eeprom */
6985 rc
= bnx2_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
6991 bnx2_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
6993 struct bnx2
*bp
= netdev_priv(dev
);
6995 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
6997 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
6998 coal
->rx_max_coalesced_frames
= bp
->rx_quick_cons_trip
;
6999 coal
->rx_coalesce_usecs_irq
= bp
->rx_ticks_int
;
7000 coal
->rx_max_coalesced_frames_irq
= bp
->rx_quick_cons_trip_int
;
7002 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
7003 coal
->tx_max_coalesced_frames
= bp
->tx_quick_cons_trip
;
7004 coal
->tx_coalesce_usecs_irq
= bp
->tx_ticks_int
;
7005 coal
->tx_max_coalesced_frames_irq
= bp
->tx_quick_cons_trip_int
;
7007 coal
->stats_block_coalesce_usecs
= bp
->stats_ticks
;
7013 bnx2_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
7015 struct bnx2
*bp
= netdev_priv(dev
);
7017 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
7018 if (bp
->rx_ticks
> 0x3ff) bp
->rx_ticks
= 0x3ff;
7020 bp
->rx_quick_cons_trip
= (u16
) coal
->rx_max_coalesced_frames
;
7021 if (bp
->rx_quick_cons_trip
> 0xff) bp
->rx_quick_cons_trip
= 0xff;
7023 bp
->rx_ticks_int
= (u16
) coal
->rx_coalesce_usecs_irq
;
7024 if (bp
->rx_ticks_int
> 0x3ff) bp
->rx_ticks_int
= 0x3ff;
7026 bp
->rx_quick_cons_trip_int
= (u16
) coal
->rx_max_coalesced_frames_irq
;
7027 if (bp
->rx_quick_cons_trip_int
> 0xff)
7028 bp
->rx_quick_cons_trip_int
= 0xff;
7030 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
7031 if (bp
->tx_ticks
> 0x3ff) bp
->tx_ticks
= 0x3ff;
7033 bp
->tx_quick_cons_trip
= (u16
) coal
->tx_max_coalesced_frames
;
7034 if (bp
->tx_quick_cons_trip
> 0xff) bp
->tx_quick_cons_trip
= 0xff;
7036 bp
->tx_ticks_int
= (u16
) coal
->tx_coalesce_usecs_irq
;
7037 if (bp
->tx_ticks_int
> 0x3ff) bp
->tx_ticks_int
= 0x3ff;
7039 bp
->tx_quick_cons_trip_int
= (u16
) coal
->tx_max_coalesced_frames_irq
;
7040 if (bp
->tx_quick_cons_trip_int
> 0xff) bp
->tx_quick_cons_trip_int
=
7043 bp
->stats_ticks
= coal
->stats_block_coalesce_usecs
;
7044 if (bp
->flags
& BNX2_FLAG_BROKEN_STATS
) {
7045 if (bp
->stats_ticks
!= 0 && bp
->stats_ticks
!= USEC_PER_SEC
)
7046 bp
->stats_ticks
= USEC_PER_SEC
;
7048 if (bp
->stats_ticks
> BNX2_HC_STATS_TICKS_HC_STAT_TICKS
)
7049 bp
->stats_ticks
= BNX2_HC_STATS_TICKS_HC_STAT_TICKS
;
7050 bp
->stats_ticks
&= BNX2_HC_STATS_TICKS_HC_STAT_TICKS
;
7052 if (netif_running(bp
->dev
)) {
7053 bnx2_netif_stop(bp
);
7054 bnx2_init_nic(bp
, 0);
7055 bnx2_netif_start(bp
);
7062 bnx2_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
7064 struct bnx2
*bp
= netdev_priv(dev
);
7066 ering
->rx_max_pending
= MAX_TOTAL_RX_DESC_CNT
;
7067 ering
->rx_mini_max_pending
= 0;
7068 ering
->rx_jumbo_max_pending
= MAX_TOTAL_RX_PG_DESC_CNT
;
7070 ering
->rx_pending
= bp
->rx_ring_size
;
7071 ering
->rx_mini_pending
= 0;
7072 ering
->rx_jumbo_pending
= bp
->rx_pg_ring_size
;
7074 ering
->tx_max_pending
= MAX_TX_DESC_CNT
;
7075 ering
->tx_pending
= bp
->tx_ring_size
;
7079 bnx2_change_ring_size(struct bnx2
*bp
, u32 rx
, u32 tx
)
7081 if (netif_running(bp
->dev
)) {
7082 /* Reset will erase chipset stats; save them */
7083 bnx2_save_stats(bp
);
7085 bnx2_netif_stop(bp
);
7086 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
7091 bnx2_set_rx_ring_size(bp
, rx
);
7092 bp
->tx_ring_size
= tx
;
7094 if (netif_running(bp
->dev
)) {
7097 rc
= bnx2_alloc_mem(bp
);
7099 rc
= bnx2_init_nic(bp
, 0);
7102 bnx2_napi_enable(bp
);
7107 mutex_lock(&bp
->cnic_lock
);
7108 /* Let cnic know about the new status block. */
7109 if (bp
->cnic_eth_dev
.drv_state
& CNIC_DRV_STATE_REGD
)
7110 bnx2_setup_cnic_irq_info(bp
);
7111 mutex_unlock(&bp
->cnic_lock
);
7113 bnx2_netif_start(bp
);
7119 bnx2_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
7121 struct bnx2
*bp
= netdev_priv(dev
);
7124 if ((ering
->rx_pending
> MAX_TOTAL_RX_DESC_CNT
) ||
7125 (ering
->tx_pending
> MAX_TX_DESC_CNT
) ||
7126 (ering
->tx_pending
<= MAX_SKB_FRAGS
)) {
7130 rc
= bnx2_change_ring_size(bp
, ering
->rx_pending
, ering
->tx_pending
);
7135 bnx2_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7137 struct bnx2
*bp
= netdev_priv(dev
);
7139 epause
->autoneg
= ((bp
->autoneg
& AUTONEG_FLOW_CTRL
) != 0);
7140 epause
->rx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_RX
) != 0);
7141 epause
->tx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_TX
) != 0);
7145 bnx2_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7147 struct bnx2
*bp
= netdev_priv(dev
);
7149 bp
->req_flow_ctrl
= 0;
7150 if (epause
->rx_pause
)
7151 bp
->req_flow_ctrl
|= FLOW_CTRL_RX
;
7152 if (epause
->tx_pause
)
7153 bp
->req_flow_ctrl
|= FLOW_CTRL_TX
;
7155 if (epause
->autoneg
) {
7156 bp
->autoneg
|= AUTONEG_FLOW_CTRL
;
7159 bp
->autoneg
&= ~AUTONEG_FLOW_CTRL
;
7162 if (netif_running(dev
)) {
7163 spin_lock_bh(&bp
->phy_lock
);
7164 bnx2_setup_phy(bp
, bp
->phy_port
);
7165 spin_unlock_bh(&bp
->phy_lock
);
7172 bnx2_get_rx_csum(struct net_device
*dev
)
7174 struct bnx2
*bp
= netdev_priv(dev
);
7180 bnx2_set_rx_csum(struct net_device
*dev
, u32 data
)
7182 struct bnx2
*bp
= netdev_priv(dev
);
7189 bnx2_set_tso(struct net_device
*dev
, u32 data
)
7191 struct bnx2
*bp
= netdev_priv(dev
);
7194 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
7195 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
7196 dev
->features
|= NETIF_F_TSO6
;
7198 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
|
7204 char string
[ETH_GSTRING_LEN
];
7205 } bnx2_stats_str_arr
[] = {
7207 { "rx_error_bytes" },
7209 { "tx_error_bytes" },
7210 { "rx_ucast_packets" },
7211 { "rx_mcast_packets" },
7212 { "rx_bcast_packets" },
7213 { "tx_ucast_packets" },
7214 { "tx_mcast_packets" },
7215 { "tx_bcast_packets" },
7216 { "tx_mac_errors" },
7217 { "tx_carrier_errors" },
7218 { "rx_crc_errors" },
7219 { "rx_align_errors" },
7220 { "tx_single_collisions" },
7221 { "tx_multi_collisions" },
7223 { "tx_excess_collisions" },
7224 { "tx_late_collisions" },
7225 { "tx_total_collisions" },
7228 { "rx_undersize_packets" },
7229 { "rx_oversize_packets" },
7230 { "rx_64_byte_packets" },
7231 { "rx_65_to_127_byte_packets" },
7232 { "rx_128_to_255_byte_packets" },
7233 { "rx_256_to_511_byte_packets" },
7234 { "rx_512_to_1023_byte_packets" },
7235 { "rx_1024_to_1522_byte_packets" },
7236 { "rx_1523_to_9022_byte_packets" },
7237 { "tx_64_byte_packets" },
7238 { "tx_65_to_127_byte_packets" },
7239 { "tx_128_to_255_byte_packets" },
7240 { "tx_256_to_511_byte_packets" },
7241 { "tx_512_to_1023_byte_packets" },
7242 { "tx_1024_to_1522_byte_packets" },
7243 { "tx_1523_to_9022_byte_packets" },
7244 { "rx_xon_frames" },
7245 { "rx_xoff_frames" },
7246 { "tx_xon_frames" },
7247 { "tx_xoff_frames" },
7248 { "rx_mac_ctrl_frames" },
7249 { "rx_filtered_packets" },
7250 { "rx_ftq_discards" },
7252 { "rx_fw_discards" },
7255 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7256 sizeof(bnx2_stats_str_arr[0]))
7258 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7260 static const unsigned long bnx2_stats_offset_arr
[BNX2_NUM_STATS
] = {
7261 STATS_OFFSET32(stat_IfHCInOctets_hi
),
7262 STATS_OFFSET32(stat_IfHCInBadOctets_hi
),
7263 STATS_OFFSET32(stat_IfHCOutOctets_hi
),
7264 STATS_OFFSET32(stat_IfHCOutBadOctets_hi
),
7265 STATS_OFFSET32(stat_IfHCInUcastPkts_hi
),
7266 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi
),
7267 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi
),
7268 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi
),
7269 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi
),
7270 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi
),
7271 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors
),
7272 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors
),
7273 STATS_OFFSET32(stat_Dot3StatsFCSErrors
),
7274 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors
),
7275 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames
),
7276 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames
),
7277 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions
),
7278 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions
),
7279 STATS_OFFSET32(stat_Dot3StatsLateCollisions
),
7280 STATS_OFFSET32(stat_EtherStatsCollisions
),
7281 STATS_OFFSET32(stat_EtherStatsFragments
),
7282 STATS_OFFSET32(stat_EtherStatsJabbers
),
7283 STATS_OFFSET32(stat_EtherStatsUndersizePkts
),
7284 STATS_OFFSET32(stat_EtherStatsOverrsizePkts
),
7285 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets
),
7286 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets
),
7287 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets
),
7288 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets
),
7289 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets
),
7290 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets
),
7291 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets
),
7292 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets
),
7293 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets
),
7294 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets
),
7295 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets
),
7296 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets
),
7297 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets
),
7298 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets
),
7299 STATS_OFFSET32(stat_XonPauseFramesReceived
),
7300 STATS_OFFSET32(stat_XoffPauseFramesReceived
),
7301 STATS_OFFSET32(stat_OutXonSent
),
7302 STATS_OFFSET32(stat_OutXoffSent
),
7303 STATS_OFFSET32(stat_MacControlFramesReceived
),
7304 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards
),
7305 STATS_OFFSET32(stat_IfInFTQDiscards
),
7306 STATS_OFFSET32(stat_IfInMBUFDiscards
),
7307 STATS_OFFSET32(stat_FwRxDrop
),
7310 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7311 * skipped because of errata.
7313 static u8 bnx2_5706_stats_len_arr
[BNX2_NUM_STATS
] = {
7314 8,0,8,8,8,8,8,8,8,8,
7315 4,0,4,4,4,4,4,4,4,4,
7316 4,4,4,4,4,4,4,4,4,4,
7317 4,4,4,4,4,4,4,4,4,4,
7321 static u8 bnx2_5708_stats_len_arr
[BNX2_NUM_STATS
] = {
7322 8,0,8,8,8,8,8,8,8,8,
7323 4,4,4,4,4,4,4,4,4,4,
7324 4,4,4,4,4,4,4,4,4,4,
7325 4,4,4,4,4,4,4,4,4,4,
7329 #define BNX2_NUM_TESTS 6
7332 char string
[ETH_GSTRING_LEN
];
7333 } bnx2_tests_str_arr
[BNX2_NUM_TESTS
] = {
7334 { "register_test (offline)" },
7335 { "memory_test (offline)" },
7336 { "loopback_test (offline)" },
7337 { "nvram_test (online)" },
7338 { "interrupt_test (online)" },
7339 { "link_test (online)" },
7343 bnx2_get_sset_count(struct net_device
*dev
, int sset
)
7347 return BNX2_NUM_TESTS
;
7349 return BNX2_NUM_STATS
;
7356 bnx2_self_test(struct net_device
*dev
, struct ethtool_test
*etest
, u64
*buf
)
7358 struct bnx2
*bp
= netdev_priv(dev
);
7360 bnx2_set_power_state(bp
, PCI_D0
);
7362 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_TESTS
);
7363 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
7366 bnx2_netif_stop(bp
);
7367 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_DIAG
);
7370 if (bnx2_test_registers(bp
) != 0) {
7372 etest
->flags
|= ETH_TEST_FL_FAILED
;
7374 if (bnx2_test_memory(bp
) != 0) {
7376 etest
->flags
|= ETH_TEST_FL_FAILED
;
7378 if ((buf
[2] = bnx2_test_loopback(bp
)) != 0)
7379 etest
->flags
|= ETH_TEST_FL_FAILED
;
7381 if (!netif_running(bp
->dev
))
7382 bnx2_shutdown_chip(bp
);
7384 bnx2_init_nic(bp
, 1);
7385 bnx2_netif_start(bp
);
7388 /* wait for link up */
7389 for (i
= 0; i
< 7; i
++) {
7392 msleep_interruptible(1000);
7396 if (bnx2_test_nvram(bp
) != 0) {
7398 etest
->flags
|= ETH_TEST_FL_FAILED
;
7400 if (bnx2_test_intr(bp
) != 0) {
7402 etest
->flags
|= ETH_TEST_FL_FAILED
;
7405 if (bnx2_test_link(bp
) != 0) {
7407 etest
->flags
|= ETH_TEST_FL_FAILED
;
7410 if (!netif_running(bp
->dev
))
7411 bnx2_set_power_state(bp
, PCI_D3hot
);
7415 bnx2_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
7417 switch (stringset
) {
7419 memcpy(buf
, bnx2_stats_str_arr
,
7420 sizeof(bnx2_stats_str_arr
));
7423 memcpy(buf
, bnx2_tests_str_arr
,
7424 sizeof(bnx2_tests_str_arr
));
7430 bnx2_get_ethtool_stats(struct net_device
*dev
,
7431 struct ethtool_stats
*stats
, u64
*buf
)
7433 struct bnx2
*bp
= netdev_priv(dev
);
7435 u32
*hw_stats
= (u32
*) bp
->stats_blk
;
7436 u32
*temp_stats
= (u32
*) bp
->temp_stats_blk
;
7437 u8
*stats_len_arr
= NULL
;
7439 if (hw_stats
== NULL
) {
7440 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_STATS
);
7444 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
7445 (CHIP_ID(bp
) == CHIP_ID_5706_A1
) ||
7446 (CHIP_ID(bp
) == CHIP_ID_5706_A2
) ||
7447 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
7448 stats_len_arr
= bnx2_5706_stats_len_arr
;
7450 stats_len_arr
= bnx2_5708_stats_len_arr
;
7452 for (i
= 0; i
< BNX2_NUM_STATS
; i
++) {
7453 unsigned long offset
;
7455 if (stats_len_arr
[i
] == 0) {
7456 /* skip this counter */
7461 offset
= bnx2_stats_offset_arr
[i
];
7462 if (stats_len_arr
[i
] == 4) {
7463 /* 4-byte counter */
7464 buf
[i
] = (u64
) *(hw_stats
+ offset
) +
7465 *(temp_stats
+ offset
);
7468 /* 8-byte counter */
7469 buf
[i
] = (((u64
) *(hw_stats
+ offset
)) << 32) +
7470 *(hw_stats
+ offset
+ 1) +
7471 (((u64
) *(temp_stats
+ offset
)) << 32) +
7472 *(temp_stats
+ offset
+ 1);
7477 bnx2_phys_id(struct net_device
*dev
, u32 data
)
7479 struct bnx2
*bp
= netdev_priv(dev
);
7483 bnx2_set_power_state(bp
, PCI_D0
);
7488 save
= REG_RD(bp
, BNX2_MISC_CFG
);
7489 REG_WR(bp
, BNX2_MISC_CFG
, BNX2_MISC_CFG_LEDMODE_MAC
);
7491 for (i
= 0; i
< (data
* 2); i
++) {
7493 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
);
7496 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
|
7497 BNX2_EMAC_LED_1000MB_OVERRIDE
|
7498 BNX2_EMAC_LED_100MB_OVERRIDE
|
7499 BNX2_EMAC_LED_10MB_OVERRIDE
|
7500 BNX2_EMAC_LED_TRAFFIC_OVERRIDE
|
7501 BNX2_EMAC_LED_TRAFFIC
);
7503 msleep_interruptible(500);
7504 if (signal_pending(current
))
7507 REG_WR(bp
, BNX2_EMAC_LED
, 0);
7508 REG_WR(bp
, BNX2_MISC_CFG
, save
);
7510 if (!netif_running(dev
))
7511 bnx2_set_power_state(bp
, PCI_D3hot
);
7517 bnx2_set_tx_csum(struct net_device
*dev
, u32 data
)
7519 struct bnx2
*bp
= netdev_priv(dev
);
7521 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
7522 return (ethtool_op_set_tx_ipv6_csum(dev
, data
));
7524 return (ethtool_op_set_tx_csum(dev
, data
));
7527 static const struct ethtool_ops bnx2_ethtool_ops
= {
7528 .get_settings
= bnx2_get_settings
,
7529 .set_settings
= bnx2_set_settings
,
7530 .get_drvinfo
= bnx2_get_drvinfo
,
7531 .get_regs_len
= bnx2_get_regs_len
,
7532 .get_regs
= bnx2_get_regs
,
7533 .get_wol
= bnx2_get_wol
,
7534 .set_wol
= bnx2_set_wol
,
7535 .nway_reset
= bnx2_nway_reset
,
7536 .get_link
= bnx2_get_link
,
7537 .get_eeprom_len
= bnx2_get_eeprom_len
,
7538 .get_eeprom
= bnx2_get_eeprom
,
7539 .set_eeprom
= bnx2_set_eeprom
,
7540 .get_coalesce
= bnx2_get_coalesce
,
7541 .set_coalesce
= bnx2_set_coalesce
,
7542 .get_ringparam
= bnx2_get_ringparam
,
7543 .set_ringparam
= bnx2_set_ringparam
,
7544 .get_pauseparam
= bnx2_get_pauseparam
,
7545 .set_pauseparam
= bnx2_set_pauseparam
,
7546 .get_rx_csum
= bnx2_get_rx_csum
,
7547 .set_rx_csum
= bnx2_set_rx_csum
,
7548 .set_tx_csum
= bnx2_set_tx_csum
,
7549 .set_sg
= ethtool_op_set_sg
,
7550 .set_tso
= bnx2_set_tso
,
7551 .self_test
= bnx2_self_test
,
7552 .get_strings
= bnx2_get_strings
,
7553 .phys_id
= bnx2_phys_id
,
7554 .get_ethtool_stats
= bnx2_get_ethtool_stats
,
7555 .get_sset_count
= bnx2_get_sset_count
,
7558 /* Called with rtnl_lock */
7560 bnx2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
7562 struct mii_ioctl_data
*data
= if_mii(ifr
);
7563 struct bnx2
*bp
= netdev_priv(dev
);
7568 data
->phy_id
= bp
->phy_addr
;
7574 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
7577 if (!netif_running(dev
))
7580 spin_lock_bh(&bp
->phy_lock
);
7581 err
= bnx2_read_phy(bp
, data
->reg_num
& 0x1f, &mii_regval
);
7582 spin_unlock_bh(&bp
->phy_lock
);
7584 data
->val_out
= mii_regval
;
7590 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
7593 if (!netif_running(dev
))
7596 spin_lock_bh(&bp
->phy_lock
);
7597 err
= bnx2_write_phy(bp
, data
->reg_num
& 0x1f, data
->val_in
);
7598 spin_unlock_bh(&bp
->phy_lock
);
7609 /* Called with rtnl_lock */
7611 bnx2_change_mac_addr(struct net_device
*dev
, void *p
)
7613 struct sockaddr
*addr
= p
;
7614 struct bnx2
*bp
= netdev_priv(dev
);
7616 if (!is_valid_ether_addr(addr
->sa_data
))
7619 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7620 if (netif_running(dev
))
7621 bnx2_set_mac_addr(bp
, bp
->dev
->dev_addr
, 0);
7626 /* Called with rtnl_lock */
7628 bnx2_change_mtu(struct net_device
*dev
, int new_mtu
)
7630 struct bnx2
*bp
= netdev_priv(dev
);
7632 if (((new_mtu
+ ETH_HLEN
) > MAX_ETHERNET_JUMBO_PACKET_SIZE
) ||
7633 ((new_mtu
+ ETH_HLEN
) < MIN_ETHERNET_PACKET_SIZE
))
7637 return (bnx2_change_ring_size(bp
, bp
->rx_ring_size
, bp
->tx_ring_size
));
7640 #ifdef CONFIG_NET_POLL_CONTROLLER
7642 poll_bnx2(struct net_device
*dev
)
7644 struct bnx2
*bp
= netdev_priv(dev
);
7647 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
7648 struct bnx2_irq
*irq
= &bp
->irq_tbl
[i
];
7650 disable_irq(irq
->vector
);
7651 irq
->handler(irq
->vector
, &bp
->bnx2_napi
[i
]);
7652 enable_irq(irq
->vector
);
7657 static void __devinit
7658 bnx2_get_5709_media(struct bnx2
*bp
)
7660 u32 val
= REG_RD(bp
, BNX2_MISC_DUAL_MEDIA_CTRL
);
7661 u32 bond_id
= val
& BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID
;
7664 if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C
)
7666 else if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S
) {
7667 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
7671 if (val
& BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE
)
7672 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL
) >> 21;
7674 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP
) >> 8;
7676 if (PCI_FUNC(bp
->pdev
->devfn
) == 0) {
7681 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
7689 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
7695 static void __devinit
7696 bnx2_get_pci_speed(struct bnx2
*bp
)
7700 reg
= REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
);
7701 if (reg
& BNX2_PCICFG_MISC_STATUS_PCIX_DET
) {
7704 bp
->flags
|= BNX2_FLAG_PCIX
;
7706 clkreg
= REG_RD(bp
, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS
);
7708 clkreg
&= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET
;
7710 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ
:
7711 bp
->bus_speed_mhz
= 133;
7714 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ
:
7715 bp
->bus_speed_mhz
= 100;
7718 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ
:
7719 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ
:
7720 bp
->bus_speed_mhz
= 66;
7723 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ
:
7724 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ
:
7725 bp
->bus_speed_mhz
= 50;
7728 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW
:
7729 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ
:
7730 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ
:
7731 bp
->bus_speed_mhz
= 33;
7736 if (reg
& BNX2_PCICFG_MISC_STATUS_M66EN
)
7737 bp
->bus_speed_mhz
= 66;
7739 bp
->bus_speed_mhz
= 33;
7742 if (reg
& BNX2_PCICFG_MISC_STATUS_32BIT_DET
)
7743 bp
->flags
|= BNX2_FLAG_PCI_32BIT
;
7747 static void __devinit
7748 bnx2_read_vpd_fw_ver(struct bnx2
*bp
)
7752 unsigned int block_end
, rosize
, len
;
7754 #define BNX2_VPD_NVRAM_OFFSET 0x300
7755 #define BNX2_VPD_LEN 128
7756 #define BNX2_MAX_VER_SLEN 30
7758 data
= kmalloc(256, GFP_KERNEL
);
7762 rc
= bnx2_nvram_read(bp
, BNX2_VPD_NVRAM_OFFSET
, data
+ BNX2_VPD_LEN
,
7767 for (i
= 0; i
< BNX2_VPD_LEN
; i
+= 4) {
7768 data
[i
] = data
[i
+ BNX2_VPD_LEN
+ 3];
7769 data
[i
+ 1] = data
[i
+ BNX2_VPD_LEN
+ 2];
7770 data
[i
+ 2] = data
[i
+ BNX2_VPD_LEN
+ 1];
7771 data
[i
+ 3] = data
[i
+ BNX2_VPD_LEN
];
7774 i
= pci_vpd_find_tag(data
, 0, BNX2_VPD_LEN
, PCI_VPD_LRDT_RO_DATA
);
7778 rosize
= pci_vpd_lrdt_size(&data
[i
]);
7779 i
+= PCI_VPD_LRDT_TAG_SIZE
;
7780 block_end
= i
+ rosize
;
7782 if (block_end
> BNX2_VPD_LEN
)
7785 j
= pci_vpd_find_info_keyword(data
, i
, rosize
,
7786 PCI_VPD_RO_KEYWORD_MFR_ID
);
7790 len
= pci_vpd_info_field_size(&data
[j
]);
7792 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
7793 if (j
+ len
> block_end
|| len
!= 4 ||
7794 memcmp(&data
[j
], "1028", 4))
7797 j
= pci_vpd_find_info_keyword(data
, i
, rosize
,
7798 PCI_VPD_RO_KEYWORD_VENDOR0
);
7802 len
= pci_vpd_info_field_size(&data
[j
]);
7804 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
7805 if (j
+ len
> block_end
|| len
> BNX2_MAX_VER_SLEN
)
7808 memcpy(bp
->fw_version
, &data
[j
], len
);
7809 bp
->fw_version
[len
] = ' ';
7815 static int __devinit
7816 bnx2_init_board(struct pci_dev
*pdev
, struct net_device
*dev
)
7819 unsigned long mem_len
;
7822 u64 dma_mask
, persist_dma_mask
;
7824 SET_NETDEV_DEV(dev
, &pdev
->dev
);
7825 bp
= netdev_priv(dev
);
7830 bp
->temp_stats_blk
=
7831 kzalloc(sizeof(struct statistics_block
), GFP_KERNEL
);
7833 if (bp
->temp_stats_blk
== NULL
) {
7838 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7839 rc
= pci_enable_device(pdev
);
7841 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
7845 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
7847 "Cannot find PCI device base address, aborting\n");
7849 goto err_out_disable
;
7852 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
7854 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
7855 goto err_out_disable
;
7858 pci_set_master(pdev
);
7859 pci_save_state(pdev
);
7861 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
7862 if (bp
->pm_cap
== 0) {
7864 "Cannot find power management capability, aborting\n");
7866 goto err_out_release
;
7872 spin_lock_init(&bp
->phy_lock
);
7873 spin_lock_init(&bp
->indirect_lock
);
7875 mutex_init(&bp
->cnic_lock
);
7877 INIT_WORK(&bp
->reset_task
, bnx2_reset_task
);
7879 dev
->base_addr
= dev
->mem_start
= pci_resource_start(pdev
, 0);
7880 mem_len
= MB_GET_CID_ADDR(TX_TSS_CID
+ TX_MAX_TSS_RINGS
+ 1);
7881 dev
->mem_end
= dev
->mem_start
+ mem_len
;
7882 dev
->irq
= pdev
->irq
;
7884 bp
->regview
= ioremap_nocache(dev
->base_addr
, mem_len
);
7887 dev_err(&pdev
->dev
, "Cannot map register space, aborting\n");
7889 goto err_out_release
;
7892 /* Configure byte swap and enable write to the reg_window registers.
7893 * Rely on CPU to do target byte swapping on big endian systems
7894 * The chip's target access swapping will not swap all accesses
7896 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
,
7897 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
7898 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
);
7900 bnx2_set_power_state(bp
, PCI_D0
);
7902 bp
->chip_id
= REG_RD(bp
, BNX2_MISC_ID
);
7904 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
7905 if (pci_find_capability(pdev
, PCI_CAP_ID_EXP
) == 0) {
7907 "Cannot find PCIE capability, aborting\n");
7911 bp
->flags
|= BNX2_FLAG_PCIE
;
7912 if (CHIP_REV(bp
) == CHIP_REV_Ax
)
7913 bp
->flags
|= BNX2_FLAG_JUMBO_BROKEN
;
7915 bp
->pcix_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PCIX
);
7916 if (bp
->pcix_cap
== 0) {
7918 "Cannot find PCIX capability, aborting\n");
7922 bp
->flags
|= BNX2_FLAG_BROKEN_STATS
;
7925 if (CHIP_NUM(bp
) == CHIP_NUM_5709
&& CHIP_REV(bp
) != CHIP_REV_Ax
) {
7926 if (pci_find_capability(pdev
, PCI_CAP_ID_MSIX
))
7927 bp
->flags
|= BNX2_FLAG_MSIX_CAP
;
7930 if (CHIP_ID(bp
) != CHIP_ID_5706_A0
&& CHIP_ID(bp
) != CHIP_ID_5706_A1
) {
7931 if (pci_find_capability(pdev
, PCI_CAP_ID_MSI
))
7932 bp
->flags
|= BNX2_FLAG_MSI_CAP
;
7935 /* 5708 cannot support DMA addresses > 40-bit. */
7936 if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
7937 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
7939 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
7941 /* Configure DMA attributes. */
7942 if (pci_set_dma_mask(pdev
, dma_mask
) == 0) {
7943 dev
->features
|= NETIF_F_HIGHDMA
;
7944 rc
= pci_set_consistent_dma_mask(pdev
, persist_dma_mask
);
7947 "pci_set_consistent_dma_mask failed, aborting\n");
7950 } else if ((rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) != 0) {
7951 dev_err(&pdev
->dev
, "System does not support DMA, aborting\n");
7955 if (!(bp
->flags
& BNX2_FLAG_PCIE
))
7956 bnx2_get_pci_speed(bp
);
7958 /* 5706A0 may falsely detect SERR and PERR. */
7959 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
7960 reg
= REG_RD(bp
, PCI_COMMAND
);
7961 reg
&= ~(PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
);
7962 REG_WR(bp
, PCI_COMMAND
, reg
);
7964 else if ((CHIP_ID(bp
) == CHIP_ID_5706_A1
) &&
7965 !(bp
->flags
& BNX2_FLAG_PCIX
)) {
7968 "5706 A1 can only be used in a PCIX bus, aborting\n");
7972 bnx2_init_nvram(bp
);
7974 reg
= bnx2_reg_rd_ind(bp
, BNX2_SHM_HDR_SIGNATURE
);
7976 if ((reg
& BNX2_SHM_HDR_SIGNATURE_SIG_MASK
) ==
7977 BNX2_SHM_HDR_SIGNATURE_SIG
) {
7978 u32 off
= PCI_FUNC(pdev
->devfn
) << 2;
7980 bp
->shmem_base
= bnx2_reg_rd_ind(bp
, BNX2_SHM_HDR_ADDR_0
+ off
);
7982 bp
->shmem_base
= HOST_VIEW_SHMEM_BASE
;
7984 /* Get the permanent MAC address. First we need to make sure the
7985 * firmware is actually running.
7987 reg
= bnx2_shmem_rd(bp
, BNX2_DEV_INFO_SIGNATURE
);
7989 if ((reg
& BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK
) !=
7990 BNX2_DEV_INFO_SIGNATURE_MAGIC
) {
7991 dev_err(&pdev
->dev
, "Firmware not running, aborting\n");
7996 bnx2_read_vpd_fw_ver(bp
);
7998 j
= strlen(bp
->fw_version
);
7999 reg
= bnx2_shmem_rd(bp
, BNX2_DEV_INFO_BC_REV
);
8000 for (i
= 0; i
< 3 && j
< 24; i
++) {
8004 bp
->fw_version
[j
++] = 'b';
8005 bp
->fw_version
[j
++] = 'c';
8006 bp
->fw_version
[j
++] = ' ';
8008 num
= (u8
) (reg
>> (24 - (i
* 8)));
8009 for (k
= 100, skip0
= 1; k
>= 1; num
%= k
, k
/= 10) {
8010 if (num
>= k
|| !skip0
|| k
== 1) {
8011 bp
->fw_version
[j
++] = (num
/ k
) + '0';
8016 bp
->fw_version
[j
++] = '.';
8018 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_FEATURE
);
8019 if (reg
& BNX2_PORT_FEATURE_WOL_ENABLED
)
8022 if (reg
& BNX2_PORT_FEATURE_ASF_ENABLED
) {
8023 bp
->flags
|= BNX2_FLAG_ASF_ENABLE
;
8025 for (i
= 0; i
< 30; i
++) {
8026 reg
= bnx2_shmem_rd(bp
, BNX2_BC_STATE_CONDITION
);
8027 if (reg
& BNX2_CONDITION_MFW_RUN_MASK
)
8032 reg
= bnx2_shmem_rd(bp
, BNX2_BC_STATE_CONDITION
);
8033 reg
&= BNX2_CONDITION_MFW_RUN_MASK
;
8034 if (reg
!= BNX2_CONDITION_MFW_RUN_UNKNOWN
&&
8035 reg
!= BNX2_CONDITION_MFW_RUN_NONE
) {
8036 u32 addr
= bnx2_shmem_rd(bp
, BNX2_MFW_VER_PTR
);
8039 bp
->fw_version
[j
++] = ' ';
8040 for (i
= 0; i
< 3 && j
< 28; i
++) {
8041 reg
= bnx2_reg_rd_ind(bp
, addr
+ i
* 4);
8043 memcpy(&bp
->fw_version
[j
], ®
, 4);
8048 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_MAC_UPPER
);
8049 bp
->mac_addr
[0] = (u8
) (reg
>> 8);
8050 bp
->mac_addr
[1] = (u8
) reg
;
8052 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_MAC_LOWER
);
8053 bp
->mac_addr
[2] = (u8
) (reg
>> 24);
8054 bp
->mac_addr
[3] = (u8
) (reg
>> 16);
8055 bp
->mac_addr
[4] = (u8
) (reg
>> 8);
8056 bp
->mac_addr
[5] = (u8
) reg
;
8058 bp
->tx_ring_size
= MAX_TX_DESC_CNT
;
8059 bnx2_set_rx_ring_size(bp
, 255);
8063 bp
->tx_quick_cons_trip_int
= 2;
8064 bp
->tx_quick_cons_trip
= 20;
8065 bp
->tx_ticks_int
= 18;
8068 bp
->rx_quick_cons_trip_int
= 2;
8069 bp
->rx_quick_cons_trip
= 12;
8070 bp
->rx_ticks_int
= 18;
8073 bp
->stats_ticks
= USEC_PER_SEC
& BNX2_HC_STATS_TICKS_HC_STAT_TICKS
;
8075 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
8079 /* Disable WOL support if we are running on a SERDES chip. */
8080 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
8081 bnx2_get_5709_media(bp
);
8082 else if (CHIP_BOND_ID(bp
) & CHIP_BOND_ID_SERDES_BIT
)
8083 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
8085 bp
->phy_port
= PORT_TP
;
8086 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
8087 bp
->phy_port
= PORT_FIBRE
;
8088 reg
= bnx2_shmem_rd(bp
, BNX2_SHARED_HW_CFG_CONFIG
);
8089 if (!(reg
& BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX
)) {
8090 bp
->flags
|= BNX2_FLAG_NO_WOL
;
8093 if (CHIP_NUM(bp
) == CHIP_NUM_5706
) {
8094 /* Don't do parallel detect on this board because of
8095 * some board problems. The link will not go down
8096 * if we do parallel detect.
8098 if (pdev
->subsystem_vendor
== PCI_VENDOR_ID_HP
&&
8099 pdev
->subsystem_device
== 0x310c)
8100 bp
->phy_flags
|= BNX2_PHY_FLAG_NO_PARALLEL
;
8103 if (reg
& BNX2_SHARED_HW_CFG_PHY_2_5G
)
8104 bp
->phy_flags
|= BNX2_PHY_FLAG_2_5G_CAPABLE
;
8106 } else if (CHIP_NUM(bp
) == CHIP_NUM_5706
||
8107 CHIP_NUM(bp
) == CHIP_NUM_5708
)
8108 bp
->phy_flags
|= BNX2_PHY_FLAG_CRC_FIX
;
8109 else if (CHIP_NUM(bp
) == CHIP_NUM_5709
&&
8110 (CHIP_REV(bp
) == CHIP_REV_Ax
||
8111 CHIP_REV(bp
) == CHIP_REV_Bx
))
8112 bp
->phy_flags
|= BNX2_PHY_FLAG_DIS_EARLY_DAC
;
8114 bnx2_init_fw_cap(bp
);
8116 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
8117 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
8118 (CHIP_ID(bp
) == CHIP_ID_5708_B1
) ||
8119 !(REG_RD(bp
, BNX2_PCI_CONFIG_3
) & BNX2_PCI_CONFIG_3_VAUX_PRESET
)) {
8120 bp
->flags
|= BNX2_FLAG_NO_WOL
;
8124 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
8125 bp
->tx_quick_cons_trip_int
=
8126 bp
->tx_quick_cons_trip
;
8127 bp
->tx_ticks_int
= bp
->tx_ticks
;
8128 bp
->rx_quick_cons_trip_int
=
8129 bp
->rx_quick_cons_trip
;
8130 bp
->rx_ticks_int
= bp
->rx_ticks
;
8131 bp
->comp_prod_trip_int
= bp
->comp_prod_trip
;
8132 bp
->com_ticks_int
= bp
->com_ticks
;
8133 bp
->cmd_ticks_int
= bp
->cmd_ticks
;
8136 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8138 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8139 * with byte enables disabled on the unused 32-bit word. This is legal
8140 * but causes problems on the AMD 8132 which will eventually stop
8141 * responding after a while.
8143 * AMD believes this incompatibility is unique to the 5706, and
8144 * prefers to locally disable MSI rather than globally disabling it.
8146 if (CHIP_NUM(bp
) == CHIP_NUM_5706
&& disable_msi
== 0) {
8147 struct pci_dev
*amd_8132
= NULL
;
8149 while ((amd_8132
= pci_get_device(PCI_VENDOR_ID_AMD
,
8150 PCI_DEVICE_ID_AMD_8132_BRIDGE
,
8153 if (amd_8132
->revision
>= 0x10 &&
8154 amd_8132
->revision
<= 0x13) {
8156 pci_dev_put(amd_8132
);
8162 bnx2_set_default_link(bp
);
8163 bp
->req_flow_ctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
8165 init_timer(&bp
->timer
);
8166 bp
->timer
.expires
= RUN_AT(BNX2_TIMER_INTERVAL
);
8167 bp
->timer
.data
= (unsigned long) bp
;
8168 bp
->timer
.function
= bnx2_timer
;
8174 iounmap(bp
->regview
);
8179 pci_release_regions(pdev
);
8182 pci_disable_device(pdev
);
8183 pci_set_drvdata(pdev
, NULL
);
8189 static char * __devinit
8190 bnx2_bus_string(struct bnx2
*bp
, char *str
)
8194 if (bp
->flags
& BNX2_FLAG_PCIE
) {
8195 s
+= sprintf(s
, "PCI Express");
8197 s
+= sprintf(s
, "PCI");
8198 if (bp
->flags
& BNX2_FLAG_PCIX
)
8199 s
+= sprintf(s
, "-X");
8200 if (bp
->flags
& BNX2_FLAG_PCI_32BIT
)
8201 s
+= sprintf(s
, " 32-bit");
8203 s
+= sprintf(s
, " 64-bit");
8204 s
+= sprintf(s
, " %dMHz", bp
->bus_speed_mhz
);
8209 static void __devinit
8210 bnx2_init_napi(struct bnx2
*bp
)
8214 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
8215 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
8216 int (*poll
)(struct napi_struct
*, int);
8221 poll
= bnx2_poll_msix
;
8223 netif_napi_add(bp
->dev
, &bp
->bnx2_napi
[i
].napi
, poll
, 64);
8228 static const struct net_device_ops bnx2_netdev_ops
= {
8229 .ndo_open
= bnx2_open
,
8230 .ndo_start_xmit
= bnx2_start_xmit
,
8231 .ndo_stop
= bnx2_close
,
8232 .ndo_get_stats
= bnx2_get_stats
,
8233 .ndo_set_rx_mode
= bnx2_set_rx_mode
,
8234 .ndo_do_ioctl
= bnx2_ioctl
,
8235 .ndo_validate_addr
= eth_validate_addr
,
8236 .ndo_set_mac_address
= bnx2_change_mac_addr
,
8237 .ndo_change_mtu
= bnx2_change_mtu
,
8238 .ndo_tx_timeout
= bnx2_tx_timeout
,
8240 .ndo_vlan_rx_register
= bnx2_vlan_rx_register
,
8242 #ifdef CONFIG_NET_POLL_CONTROLLER
8243 .ndo_poll_controller
= poll_bnx2
,
8247 static void inline vlan_features_add(struct net_device
*dev
, unsigned long flags
)
8250 dev
->vlan_features
|= flags
;
8254 static int __devinit
8255 bnx2_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
8257 static int version_printed
= 0;
8258 struct net_device
*dev
= NULL
;
8263 if (version_printed
++ == 0)
8264 pr_info("%s", version
);
8266 /* dev zeroed in init_etherdev */
8267 dev
= alloc_etherdev_mq(sizeof(*bp
), TX_MAX_RINGS
);
8272 rc
= bnx2_init_board(pdev
, dev
);
8278 dev
->netdev_ops
= &bnx2_netdev_ops
;
8279 dev
->watchdog_timeo
= TX_TIMEOUT
;
8280 dev
->ethtool_ops
= &bnx2_ethtool_ops
;
8282 bp
= netdev_priv(dev
);
8284 pci_set_drvdata(pdev
, dev
);
8286 rc
= bnx2_request_firmware(bp
);
8290 memcpy(dev
->dev_addr
, bp
->mac_addr
, 6);
8291 memcpy(dev
->perm_addr
, bp
->mac_addr
, 6);
8293 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
8294 vlan_features_add(dev
, NETIF_F_IP_CSUM
| NETIF_F_SG
);
8295 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
8296 dev
->features
|= NETIF_F_IPV6_CSUM
;
8297 vlan_features_add(dev
, NETIF_F_IPV6_CSUM
);
8300 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
8302 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
8303 vlan_features_add(dev
, NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8304 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
8305 dev
->features
|= NETIF_F_TSO6
;
8306 vlan_features_add(dev
, NETIF_F_TSO6
);
8308 if ((rc
= register_netdev(dev
))) {
8309 dev_err(&pdev
->dev
, "Cannot register net device\n");
8313 netdev_info(dev
, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8314 board_info
[ent
->driver_data
].name
,
8315 ((CHIP_ID(bp
) & 0xf000) >> 12) + 'A',
8316 ((CHIP_ID(bp
) & 0x0ff0) >> 4),
8317 bnx2_bus_string(bp
, str
),
8319 bp
->pdev
->irq
, dev
->dev_addr
);
8324 if (bp
->mips_firmware
)
8325 release_firmware(bp
->mips_firmware
);
8326 if (bp
->rv2p_firmware
)
8327 release_firmware(bp
->rv2p_firmware
);
8330 iounmap(bp
->regview
);
8331 pci_release_regions(pdev
);
8332 pci_disable_device(pdev
);
8333 pci_set_drvdata(pdev
, NULL
);
8338 static void __devexit
8339 bnx2_remove_one(struct pci_dev
*pdev
)
8341 struct net_device
*dev
= pci_get_drvdata(pdev
);
8342 struct bnx2
*bp
= netdev_priv(dev
);
8344 flush_scheduled_work();
8346 unregister_netdev(dev
);
8348 if (bp
->mips_firmware
)
8349 release_firmware(bp
->mips_firmware
);
8350 if (bp
->rv2p_firmware
)
8351 release_firmware(bp
->rv2p_firmware
);
8354 iounmap(bp
->regview
);
8356 kfree(bp
->temp_stats_blk
);
8359 pci_release_regions(pdev
);
8360 pci_disable_device(pdev
);
8361 pci_set_drvdata(pdev
, NULL
);
8365 bnx2_suspend(struct pci_dev
*pdev
, pm_message_t state
)
8367 struct net_device
*dev
= pci_get_drvdata(pdev
);
8368 struct bnx2
*bp
= netdev_priv(dev
);
8370 /* PCI register 4 needs to be saved whether netif_running() or not.
8371 * MSI address and data need to be saved if using MSI and
8374 pci_save_state(pdev
);
8375 if (!netif_running(dev
))
8378 flush_scheduled_work();
8379 bnx2_netif_stop(bp
);
8380 netif_device_detach(dev
);
8381 del_timer_sync(&bp
->timer
);
8382 bnx2_shutdown_chip(bp
);
8384 bnx2_set_power_state(bp
, pci_choose_state(pdev
, state
));
8389 bnx2_resume(struct pci_dev
*pdev
)
8391 struct net_device
*dev
= pci_get_drvdata(pdev
);
8392 struct bnx2
*bp
= netdev_priv(dev
);
8394 pci_restore_state(pdev
);
8395 if (!netif_running(dev
))
8398 bnx2_set_power_state(bp
, PCI_D0
);
8399 netif_device_attach(dev
);
8400 bnx2_init_nic(bp
, 1);
8401 bnx2_netif_start(bp
);
8406 * bnx2_io_error_detected - called when PCI error is detected
8407 * @pdev: Pointer to PCI device
8408 * @state: The current pci connection state
8410 * This function is called after a PCI bus error affecting
8411 * this device has been detected.
8413 static pci_ers_result_t
bnx2_io_error_detected(struct pci_dev
*pdev
,
8414 pci_channel_state_t state
)
8416 struct net_device
*dev
= pci_get_drvdata(pdev
);
8417 struct bnx2
*bp
= netdev_priv(dev
);
8420 netif_device_detach(dev
);
8422 if (state
== pci_channel_io_perm_failure
) {
8424 return PCI_ERS_RESULT_DISCONNECT
;
8427 if (netif_running(dev
)) {
8428 bnx2_netif_stop(bp
);
8429 del_timer_sync(&bp
->timer
);
8430 bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
);
8433 pci_disable_device(pdev
);
8436 /* Request a slot slot reset. */
8437 return PCI_ERS_RESULT_NEED_RESET
;
8441 * bnx2_io_slot_reset - called after the pci bus has been reset.
8442 * @pdev: Pointer to PCI device
8444 * Restart the card from scratch, as if from a cold-boot.
8446 static pci_ers_result_t
bnx2_io_slot_reset(struct pci_dev
*pdev
)
8448 struct net_device
*dev
= pci_get_drvdata(pdev
);
8449 struct bnx2
*bp
= netdev_priv(dev
);
8452 if (pci_enable_device(pdev
)) {
8454 "Cannot re-enable PCI device after reset\n");
8456 return PCI_ERS_RESULT_DISCONNECT
;
8458 pci_set_master(pdev
);
8459 pci_restore_state(pdev
);
8460 pci_save_state(pdev
);
8462 if (netif_running(dev
)) {
8463 bnx2_set_power_state(bp
, PCI_D0
);
8464 bnx2_init_nic(bp
, 1);
8468 return PCI_ERS_RESULT_RECOVERED
;
8472 * bnx2_io_resume - called when traffic can start flowing again.
8473 * @pdev: Pointer to PCI device
8475 * This callback is called when the error recovery driver tells us that
8476 * its OK to resume normal operation.
8478 static void bnx2_io_resume(struct pci_dev
*pdev
)
8480 struct net_device
*dev
= pci_get_drvdata(pdev
);
8481 struct bnx2
*bp
= netdev_priv(dev
);
8484 if (netif_running(dev
))
8485 bnx2_netif_start(bp
);
8487 netif_device_attach(dev
);
8491 static struct pci_error_handlers bnx2_err_handler
= {
8492 .error_detected
= bnx2_io_error_detected
,
8493 .slot_reset
= bnx2_io_slot_reset
,
8494 .resume
= bnx2_io_resume
,
8497 static struct pci_driver bnx2_pci_driver
= {
8498 .name
= DRV_MODULE_NAME
,
8499 .id_table
= bnx2_pci_tbl
,
8500 .probe
= bnx2_init_one
,
8501 .remove
= __devexit_p(bnx2_remove_one
),
8502 .suspend
= bnx2_suspend
,
8503 .resume
= bnx2_resume
,
8504 .err_handler
= &bnx2_err_handler
,
8507 static int __init
bnx2_init(void)
8509 return pci_register_driver(&bnx2_pci_driver
);
8512 static void __exit
bnx2_cleanup(void)
8514 pci_unregister_driver(&bnx2_pci_driver
);
8517 module_init(bnx2_init
);
8518 module_exit(bnx2_cleanup
);