1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
60 #define DRV_MODULE_NAME "bnx2"
61 #define PFX DRV_MODULE_NAME ": "
62 #define DRV_MODULE_VERSION "2.0.1"
63 #define DRV_MODULE_RELDATE "May 6, 2009"
64 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw"
65 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw"
66 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-4.6.15.fw"
69 #define RUN_AT(x) (jiffies + (x))
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
74 static char version
[] __devinitdata
=
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION
);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06
);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06
);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09
);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09
);
86 static int disable_msi
= 0;
88 module_param(disable_msi
, int, 0);
89 MODULE_PARM_DESC(disable_msi
, "Disable Message Signaled Interrupt (MSI)");
105 /* indexed by board_t, above */
108 } board_info
[] __devinitdata
= {
109 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
110 { "HP NC370T Multifunction Gigabit Server Adapter" },
111 { "HP NC370i Multifunction Gigabit Server Adapter" },
112 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 { "HP NC370F Multifunction Gigabit Server Adapter" },
114 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
116 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
118 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl
) = {
123 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
124 PCI_VENDOR_ID_HP
, 0x3101, 0, 0, NC370T
},
125 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
126 PCI_VENDOR_ID_HP
, 0x3106, 0, 0, NC370I
},
127 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
128 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706
},
129 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708
,
130 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708
},
131 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
132 PCI_VENDOR_ID_HP
, 0x3102, 0, 0, NC370F
},
133 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
134 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706S
},
135 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708S
,
136 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708S
},
137 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5709
,
138 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5709
},
139 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5709S
,
140 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5709S
},
141 { PCI_VENDOR_ID_BROADCOM
, 0x163b,
142 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5716
},
143 { PCI_VENDOR_ID_BROADCOM
, 0x163c,
144 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5716S
},
148 static struct flash_spec flash_table
[] =
150 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
153 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
154 BUFFERED_FLAGS
, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
155 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
157 /* Expansion entry 0001 */
158 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
159 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
160 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
162 /* Saifun SA25F010 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
164 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
165 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
166 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*2,
167 "Non-buffered flash (128kB)"},
168 /* Saifun SA25F020 (non-buffered flash) */
169 /* strap, cfg1, & write1 need updates */
170 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
172 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*4,
173 "Non-buffered flash (256kB)"},
174 /* Expansion entry 0100 */
175 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
176 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
177 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
179 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
180 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
181 NONBUFFERED_FLAGS
, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
182 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*2,
183 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
185 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
186 NONBUFFERED_FLAGS
, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
187 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*4,
188 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189 /* Saifun SA25F005 (non-buffered flash) */
190 /* strap, cfg1, & write1 need updates */
191 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
192 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
193 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
,
194 "Non-buffered flash (64kB)"},
196 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
197 BUFFERED_FLAGS
, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
198 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
200 /* Expansion entry 1001 */
201 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
202 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
203 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
205 /* Expansion entry 1010 */
206 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
207 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
208 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
210 /* ATMEL AT45DB011B (buffered flash) */
211 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
212 BUFFERED_FLAGS
, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
213 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
,
214 "Buffered flash (128kB)"},
215 /* Expansion entry 1100 */
216 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
217 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
218 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
220 /* Expansion entry 1101 */
221 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
222 NONBUFFERED_FLAGS
, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
223 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
225 /* Ateml Expansion entry 1110 */
226 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
227 BUFFERED_FLAGS
, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
228 BUFFERED_FLASH_BYTE_ADDR_MASK
, 0,
229 "Entry 1110 (Atmel)"},
230 /* ATMEL AT45DB021B (buffered flash) */
231 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
232 BUFFERED_FLAGS
, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
233 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
*2,
234 "Buffered flash (256kB)"},
237 static struct flash_spec flash_5709
= {
238 .flags
= BNX2_NV_BUFFERED
,
239 .page_bits
= BCM5709_FLASH_PAGE_BITS
,
240 .page_size
= BCM5709_FLASH_PAGE_SIZE
,
241 .addr_mask
= BCM5709_FLASH_BYTE_ADDR_MASK
,
242 .total_size
= BUFFERED_FLASH_TOTAL_SIZE
*2,
243 .name
= "5709 Buffered flash (256kB)",
246 MODULE_DEVICE_TABLE(pci
, bnx2_pci_tbl
);
248 static inline u32
bnx2_tx_avail(struct bnx2
*bp
, struct bnx2_tx_ring_info
*txr
)
254 /* The ring uses 256 indices for 255 entries, one of them
255 * needs to be skipped.
257 diff
= txr
->tx_prod
- txr
->tx_cons
;
258 if (unlikely(diff
>= TX_DESC_CNT
)) {
260 if (diff
== TX_DESC_CNT
)
261 diff
= MAX_TX_DESC_CNT
;
263 return (bp
->tx_ring_size
- diff
);
267 bnx2_reg_rd_ind(struct bnx2
*bp
, u32 offset
)
271 spin_lock_bh(&bp
->indirect_lock
);
272 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
273 val
= REG_RD(bp
, BNX2_PCICFG_REG_WINDOW
);
274 spin_unlock_bh(&bp
->indirect_lock
);
279 bnx2_reg_wr_ind(struct bnx2
*bp
, u32 offset
, u32 val
)
281 spin_lock_bh(&bp
->indirect_lock
);
282 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
283 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW
, val
);
284 spin_unlock_bh(&bp
->indirect_lock
);
288 bnx2_shmem_wr(struct bnx2
*bp
, u32 offset
, u32 val
)
290 bnx2_reg_wr_ind(bp
, bp
->shmem_base
+ offset
, val
);
294 bnx2_shmem_rd(struct bnx2
*bp
, u32 offset
)
296 return (bnx2_reg_rd_ind(bp
, bp
->shmem_base
+ offset
));
300 bnx2_ctx_wr(struct bnx2
*bp
, u32 cid_addr
, u32 offset
, u32 val
)
303 spin_lock_bh(&bp
->indirect_lock
);
304 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
307 REG_WR(bp
, BNX2_CTX_CTX_DATA
, val
);
308 REG_WR(bp
, BNX2_CTX_CTX_CTRL
,
309 offset
| BNX2_CTX_CTX_CTRL_WRITE_REQ
);
310 for (i
= 0; i
< 5; i
++) {
311 val
= REG_RD(bp
, BNX2_CTX_CTX_CTRL
);
312 if ((val
& BNX2_CTX_CTX_CTRL_WRITE_REQ
) == 0)
317 REG_WR(bp
, BNX2_CTX_DATA_ADR
, offset
);
318 REG_WR(bp
, BNX2_CTX_DATA
, val
);
320 spin_unlock_bh(&bp
->indirect_lock
);
325 bnx2_drv_ctl(struct net_device
*dev
, struct drv_ctl_info
*info
)
327 struct bnx2
*bp
= netdev_priv(dev
);
328 struct drv_ctl_io
*io
= &info
->data
.io
;
331 case DRV_CTL_IO_WR_CMD
:
332 bnx2_reg_wr_ind(bp
, io
->offset
, io
->data
);
334 case DRV_CTL_IO_RD_CMD
:
335 io
->data
= bnx2_reg_rd_ind(bp
, io
->offset
);
337 case DRV_CTL_CTX_WR_CMD
:
338 bnx2_ctx_wr(bp
, io
->cid_addr
, io
->offset
, io
->data
);
346 static void bnx2_setup_cnic_irq_info(struct bnx2
*bp
)
348 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
349 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
352 if (bp
->flags
& BNX2_FLAG_USING_MSIX
) {
353 cp
->drv_state
|= CNIC_DRV_STATE_USING_MSIX
;
354 bnapi
->cnic_present
= 0;
355 sb_id
= bp
->irq_nvecs
;
356 cp
->irq_arr
[0].irq_flags
|= CNIC_IRQ_FL_MSIX
;
358 cp
->drv_state
&= ~CNIC_DRV_STATE_USING_MSIX
;
359 bnapi
->cnic_tag
= bnapi
->last_status_idx
;
360 bnapi
->cnic_present
= 1;
362 cp
->irq_arr
[0].irq_flags
&= ~CNIC_IRQ_FL_MSIX
;
365 cp
->irq_arr
[0].vector
= bp
->irq_tbl
[sb_id
].vector
;
366 cp
->irq_arr
[0].status_blk
= (void *)
367 ((unsigned long) bnapi
->status_blk
.msi
+
368 (BNX2_SBLK_MSIX_ALIGN_SIZE
* sb_id
));
369 cp
->irq_arr
[0].status_blk_num
= sb_id
;
373 static int bnx2_register_cnic(struct net_device
*dev
, struct cnic_ops
*ops
,
376 struct bnx2
*bp
= netdev_priv(dev
);
377 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
382 if (cp
->drv_state
& CNIC_DRV_STATE_REGD
)
385 bp
->cnic_data
= data
;
386 rcu_assign_pointer(bp
->cnic_ops
, ops
);
389 cp
->drv_state
= CNIC_DRV_STATE_REGD
;
391 bnx2_setup_cnic_irq_info(bp
);
396 static int bnx2_unregister_cnic(struct net_device
*dev
)
398 struct bnx2
*bp
= netdev_priv(dev
);
399 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
400 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
402 mutex_lock(&bp
->cnic_lock
);
404 bnapi
->cnic_present
= 0;
405 rcu_assign_pointer(bp
->cnic_ops
, NULL
);
406 mutex_unlock(&bp
->cnic_lock
);
411 struct cnic_eth_dev
*bnx2_cnic_probe(struct net_device
*dev
)
413 struct bnx2
*bp
= netdev_priv(dev
);
414 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
416 cp
->drv_owner
= THIS_MODULE
;
417 cp
->chip_id
= bp
->chip_id
;
419 cp
->io_base
= bp
->regview
;
420 cp
->drv_ctl
= bnx2_drv_ctl
;
421 cp
->drv_register_cnic
= bnx2_register_cnic
;
422 cp
->drv_unregister_cnic
= bnx2_unregister_cnic
;
426 EXPORT_SYMBOL(bnx2_cnic_probe
);
429 bnx2_cnic_stop(struct bnx2
*bp
)
431 struct cnic_ops
*c_ops
;
432 struct cnic_ctl_info info
;
434 mutex_lock(&bp
->cnic_lock
);
435 c_ops
= bp
->cnic_ops
;
437 info
.cmd
= CNIC_CTL_STOP_CMD
;
438 c_ops
->cnic_ctl(bp
->cnic_data
, &info
);
440 mutex_unlock(&bp
->cnic_lock
);
444 bnx2_cnic_start(struct bnx2
*bp
)
446 struct cnic_ops
*c_ops
;
447 struct cnic_ctl_info info
;
449 mutex_lock(&bp
->cnic_lock
);
450 c_ops
= bp
->cnic_ops
;
452 if (!(bp
->flags
& BNX2_FLAG_USING_MSIX
)) {
453 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
455 bnapi
->cnic_tag
= bnapi
->last_status_idx
;
457 info
.cmd
= CNIC_CTL_START_CMD
;
458 c_ops
->cnic_ctl(bp
->cnic_data
, &info
);
460 mutex_unlock(&bp
->cnic_lock
);
466 bnx2_cnic_stop(struct bnx2
*bp
)
471 bnx2_cnic_start(struct bnx2
*bp
)
478 bnx2_read_phy(struct bnx2
*bp
, u32 reg
, u32
*val
)
483 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
484 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
485 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
487 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
488 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
493 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) |
494 BNX2_EMAC_MDIO_COMM_COMMAND_READ
| BNX2_EMAC_MDIO_COMM_DISEXT
|
495 BNX2_EMAC_MDIO_COMM_START_BUSY
;
496 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
498 for (i
= 0; i
< 50; i
++) {
501 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
502 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
505 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
506 val1
&= BNX2_EMAC_MDIO_COMM_DATA
;
512 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
) {
521 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
522 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
523 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
525 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
526 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
535 bnx2_write_phy(struct bnx2
*bp
, u32 reg
, u32 val
)
540 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
541 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
542 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
544 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
545 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
550 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) | val
|
551 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE
|
552 BNX2_EMAC_MDIO_COMM_START_BUSY
| BNX2_EMAC_MDIO_COMM_DISEXT
;
553 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
555 for (i
= 0; i
< 50; i
++) {
558 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
559 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
565 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)
570 if (bp
->phy_flags
& BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING
) {
571 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
572 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
574 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
575 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
584 bnx2_disable_int(struct bnx2
*bp
)
587 struct bnx2_napi
*bnapi
;
589 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
590 bnapi
= &bp
->bnx2_napi
[i
];
591 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
592 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
594 REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
);
598 bnx2_enable_int(struct bnx2
*bp
)
601 struct bnx2_napi
*bnapi
;
603 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
604 bnapi
= &bp
->bnx2_napi
[i
];
606 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
607 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
608 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
|
609 bnapi
->last_status_idx
);
611 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
612 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
613 bnapi
->last_status_idx
);
615 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
619 bnx2_disable_int_sync(struct bnx2
*bp
)
623 atomic_inc(&bp
->intr_sem
);
624 bnx2_disable_int(bp
);
625 for (i
= 0; i
< bp
->irq_nvecs
; i
++)
626 synchronize_irq(bp
->irq_tbl
[i
].vector
);
630 bnx2_napi_disable(struct bnx2
*bp
)
634 for (i
= 0; i
< bp
->irq_nvecs
; i
++)
635 napi_disable(&bp
->bnx2_napi
[i
].napi
);
639 bnx2_napi_enable(struct bnx2
*bp
)
643 for (i
= 0; i
< bp
->irq_nvecs
; i
++)
644 napi_enable(&bp
->bnx2_napi
[i
].napi
);
648 bnx2_netif_stop(struct bnx2
*bp
)
651 bnx2_disable_int_sync(bp
);
652 if (netif_running(bp
->dev
)) {
653 bnx2_napi_disable(bp
);
654 netif_tx_disable(bp
->dev
);
655 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
660 bnx2_netif_start(struct bnx2
*bp
)
662 if (atomic_dec_and_test(&bp
->intr_sem
)) {
663 if (netif_running(bp
->dev
)) {
664 netif_tx_wake_all_queues(bp
->dev
);
665 bnx2_napi_enable(bp
);
673 bnx2_free_tx_mem(struct bnx2
*bp
)
677 for (i
= 0; i
< bp
->num_tx_rings
; i
++) {
678 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
679 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
681 if (txr
->tx_desc_ring
) {
682 pci_free_consistent(bp
->pdev
, TXBD_RING_SIZE
,
684 txr
->tx_desc_mapping
);
685 txr
->tx_desc_ring
= NULL
;
687 kfree(txr
->tx_buf_ring
);
688 txr
->tx_buf_ring
= NULL
;
693 bnx2_free_rx_mem(struct bnx2
*bp
)
697 for (i
= 0; i
< bp
->num_rx_rings
; i
++) {
698 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
699 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
702 for (j
= 0; j
< bp
->rx_max_ring
; j
++) {
703 if (rxr
->rx_desc_ring
[j
])
704 pci_free_consistent(bp
->pdev
, RXBD_RING_SIZE
,
705 rxr
->rx_desc_ring
[j
],
706 rxr
->rx_desc_mapping
[j
]);
707 rxr
->rx_desc_ring
[j
] = NULL
;
709 vfree(rxr
->rx_buf_ring
);
710 rxr
->rx_buf_ring
= NULL
;
712 for (j
= 0; j
< bp
->rx_max_pg_ring
; j
++) {
713 if (rxr
->rx_pg_desc_ring
[j
])
714 pci_free_consistent(bp
->pdev
, RXBD_RING_SIZE
,
715 rxr
->rx_pg_desc_ring
[j
],
716 rxr
->rx_pg_desc_mapping
[j
]);
717 rxr
->rx_pg_desc_ring
[j
] = NULL
;
719 vfree(rxr
->rx_pg_ring
);
720 rxr
->rx_pg_ring
= NULL
;
725 bnx2_alloc_tx_mem(struct bnx2
*bp
)
729 for (i
= 0; i
< bp
->num_tx_rings
; i
++) {
730 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
731 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
733 txr
->tx_buf_ring
= kzalloc(SW_TXBD_RING_SIZE
, GFP_KERNEL
);
734 if (txr
->tx_buf_ring
== NULL
)
738 pci_alloc_consistent(bp
->pdev
, TXBD_RING_SIZE
,
739 &txr
->tx_desc_mapping
);
740 if (txr
->tx_desc_ring
== NULL
)
747 bnx2_alloc_rx_mem(struct bnx2
*bp
)
751 for (i
= 0; i
< bp
->num_rx_rings
; i
++) {
752 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
753 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
757 vmalloc(SW_RXBD_RING_SIZE
* bp
->rx_max_ring
);
758 if (rxr
->rx_buf_ring
== NULL
)
761 memset(rxr
->rx_buf_ring
, 0,
762 SW_RXBD_RING_SIZE
* bp
->rx_max_ring
);
764 for (j
= 0; j
< bp
->rx_max_ring
; j
++) {
765 rxr
->rx_desc_ring
[j
] =
766 pci_alloc_consistent(bp
->pdev
, RXBD_RING_SIZE
,
767 &rxr
->rx_desc_mapping
[j
]);
768 if (rxr
->rx_desc_ring
[j
] == NULL
)
773 if (bp
->rx_pg_ring_size
) {
774 rxr
->rx_pg_ring
= vmalloc(SW_RXPG_RING_SIZE
*
776 if (rxr
->rx_pg_ring
== NULL
)
779 memset(rxr
->rx_pg_ring
, 0, SW_RXPG_RING_SIZE
*
783 for (j
= 0; j
< bp
->rx_max_pg_ring
; j
++) {
784 rxr
->rx_pg_desc_ring
[j
] =
785 pci_alloc_consistent(bp
->pdev
, RXBD_RING_SIZE
,
786 &rxr
->rx_pg_desc_mapping
[j
]);
787 if (rxr
->rx_pg_desc_ring
[j
] == NULL
)
796 bnx2_free_mem(struct bnx2
*bp
)
799 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
801 bnx2_free_tx_mem(bp
);
802 bnx2_free_rx_mem(bp
);
804 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
805 if (bp
->ctx_blk
[i
]) {
806 pci_free_consistent(bp
->pdev
, BCM_PAGE_SIZE
,
808 bp
->ctx_blk_mapping
[i
]);
809 bp
->ctx_blk
[i
] = NULL
;
812 if (bnapi
->status_blk
.msi
) {
813 pci_free_consistent(bp
->pdev
, bp
->status_stats_size
,
814 bnapi
->status_blk
.msi
,
815 bp
->status_blk_mapping
);
816 bnapi
->status_blk
.msi
= NULL
;
817 bp
->stats_blk
= NULL
;
822 bnx2_alloc_mem(struct bnx2
*bp
)
824 int i
, status_blk_size
, err
;
825 struct bnx2_napi
*bnapi
;
828 /* Combine status and statistics blocks into one allocation. */
829 status_blk_size
= L1_CACHE_ALIGN(sizeof(struct status_block
));
830 if (bp
->flags
& BNX2_FLAG_MSIX_CAP
)
831 status_blk_size
= L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC
*
832 BNX2_SBLK_MSIX_ALIGN_SIZE
);
833 bp
->status_stats_size
= status_blk_size
+
834 sizeof(struct statistics_block
);
836 status_blk
= pci_alloc_consistent(bp
->pdev
, bp
->status_stats_size
,
837 &bp
->status_blk_mapping
);
838 if (status_blk
== NULL
)
841 memset(status_blk
, 0, bp
->status_stats_size
);
843 bnapi
= &bp
->bnx2_napi
[0];
844 bnapi
->status_blk
.msi
= status_blk
;
845 bnapi
->hw_tx_cons_ptr
=
846 &bnapi
->status_blk
.msi
->status_tx_quick_consumer_index0
;
847 bnapi
->hw_rx_cons_ptr
=
848 &bnapi
->status_blk
.msi
->status_rx_quick_consumer_index0
;
849 if (bp
->flags
& BNX2_FLAG_MSIX_CAP
) {
850 for (i
= 1; i
< BNX2_MAX_MSIX_VEC
; i
++) {
851 struct status_block_msix
*sblk
;
853 bnapi
= &bp
->bnx2_napi
[i
];
855 sblk
= (void *) (status_blk
+
856 BNX2_SBLK_MSIX_ALIGN_SIZE
* i
);
857 bnapi
->status_blk
.msix
= sblk
;
858 bnapi
->hw_tx_cons_ptr
=
859 &sblk
->status_tx_quick_consumer_index
;
860 bnapi
->hw_rx_cons_ptr
=
861 &sblk
->status_rx_quick_consumer_index
;
862 bnapi
->int_num
= i
<< 24;
866 bp
->stats_blk
= status_blk
+ status_blk_size
;
868 bp
->stats_blk_mapping
= bp
->status_blk_mapping
+ status_blk_size
;
870 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
871 bp
->ctx_pages
= 0x2000 / BCM_PAGE_SIZE
;
872 if (bp
->ctx_pages
== 0)
874 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
875 bp
->ctx_blk
[i
] = pci_alloc_consistent(bp
->pdev
,
877 &bp
->ctx_blk_mapping
[i
]);
878 if (bp
->ctx_blk
[i
] == NULL
)
883 err
= bnx2_alloc_rx_mem(bp
);
887 err
= bnx2_alloc_tx_mem(bp
);
899 bnx2_report_fw_link(struct bnx2
*bp
)
901 u32 fw_link_status
= 0;
903 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
909 switch (bp
->line_speed
) {
911 if (bp
->duplex
== DUPLEX_HALF
)
912 fw_link_status
= BNX2_LINK_STATUS_10HALF
;
914 fw_link_status
= BNX2_LINK_STATUS_10FULL
;
917 if (bp
->duplex
== DUPLEX_HALF
)
918 fw_link_status
= BNX2_LINK_STATUS_100HALF
;
920 fw_link_status
= BNX2_LINK_STATUS_100FULL
;
923 if (bp
->duplex
== DUPLEX_HALF
)
924 fw_link_status
= BNX2_LINK_STATUS_1000HALF
;
926 fw_link_status
= BNX2_LINK_STATUS_1000FULL
;
929 if (bp
->duplex
== DUPLEX_HALF
)
930 fw_link_status
= BNX2_LINK_STATUS_2500HALF
;
932 fw_link_status
= BNX2_LINK_STATUS_2500FULL
;
936 fw_link_status
|= BNX2_LINK_STATUS_LINK_UP
;
939 fw_link_status
|= BNX2_LINK_STATUS_AN_ENABLED
;
941 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
942 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
944 if (!(bmsr
& BMSR_ANEGCOMPLETE
) ||
945 bp
->phy_flags
& BNX2_PHY_FLAG_PARALLEL_DETECT
)
946 fw_link_status
|= BNX2_LINK_STATUS_PARALLEL_DET
;
948 fw_link_status
|= BNX2_LINK_STATUS_AN_COMPLETE
;
952 fw_link_status
= BNX2_LINK_STATUS_LINK_DOWN
;
954 bnx2_shmem_wr(bp
, BNX2_LINK_STATUS
, fw_link_status
);
958 bnx2_xceiver_str(struct bnx2
*bp
)
960 return ((bp
->phy_port
== PORT_FIBRE
) ? "SerDes" :
961 ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) ? "Remote Copper" :
966 bnx2_report_link(struct bnx2
*bp
)
969 netif_carrier_on(bp
->dev
);
970 printk(KERN_INFO PFX
"%s NIC %s Link is Up, ", bp
->dev
->name
,
971 bnx2_xceiver_str(bp
));
973 printk("%d Mbps ", bp
->line_speed
);
975 if (bp
->duplex
== DUPLEX_FULL
)
976 printk("full duplex");
978 printk("half duplex");
981 if (bp
->flow_ctrl
& FLOW_CTRL_RX
) {
982 printk(", receive ");
983 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
984 printk("& transmit ");
987 printk(", transmit ");
989 printk("flow control ON");
994 netif_carrier_off(bp
->dev
);
995 printk(KERN_ERR PFX
"%s NIC %s Link is Down\n", bp
->dev
->name
,
996 bnx2_xceiver_str(bp
));
999 bnx2_report_fw_link(bp
);
1003 bnx2_resolve_flow_ctrl(struct bnx2
*bp
)
1005 u32 local_adv
, remote_adv
;
1008 if ((bp
->autoneg
& (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) !=
1009 (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) {
1011 if (bp
->duplex
== DUPLEX_FULL
) {
1012 bp
->flow_ctrl
= bp
->req_flow_ctrl
;
1017 if (bp
->duplex
!= DUPLEX_FULL
) {
1021 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1022 (CHIP_NUM(bp
) == CHIP_NUM_5708
)) {
1025 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
1026 if (val
& BCM5708S_1000X_STAT1_TX_PAUSE
)
1027 bp
->flow_ctrl
|= FLOW_CTRL_TX
;
1028 if (val
& BCM5708S_1000X_STAT1_RX_PAUSE
)
1029 bp
->flow_ctrl
|= FLOW_CTRL_RX
;
1033 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
1034 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
1036 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1037 u32 new_local_adv
= 0;
1038 u32 new_remote_adv
= 0;
1040 if (local_adv
& ADVERTISE_1000XPAUSE
)
1041 new_local_adv
|= ADVERTISE_PAUSE_CAP
;
1042 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
1043 new_local_adv
|= ADVERTISE_PAUSE_ASYM
;
1044 if (remote_adv
& ADVERTISE_1000XPAUSE
)
1045 new_remote_adv
|= ADVERTISE_PAUSE_CAP
;
1046 if (remote_adv
& ADVERTISE_1000XPSE_ASYM
)
1047 new_remote_adv
|= ADVERTISE_PAUSE_ASYM
;
1049 local_adv
= new_local_adv
;
1050 remote_adv
= new_remote_adv
;
1053 /* See Table 28B-3 of 802.3ab-1999 spec. */
1054 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
1055 if(local_adv
& ADVERTISE_PAUSE_ASYM
) {
1056 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
1057 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1059 else if (remote_adv
& ADVERTISE_PAUSE_ASYM
) {
1060 bp
->flow_ctrl
= FLOW_CTRL_RX
;
1064 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
1065 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1069 else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1070 if ((remote_adv
& ADVERTISE_PAUSE_CAP
) &&
1071 (remote_adv
& ADVERTISE_PAUSE_ASYM
)) {
1073 bp
->flow_ctrl
= FLOW_CTRL_TX
;
1079 bnx2_5709s_linkup(struct bnx2
*bp
)
1085 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_GP_STATUS
);
1086 bnx2_read_phy(bp
, MII_BNX2_GP_TOP_AN_STATUS1
, &val
);
1087 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1089 if ((bp
->autoneg
& AUTONEG_SPEED
) == 0) {
1090 bp
->line_speed
= bp
->req_line_speed
;
1091 bp
->duplex
= bp
->req_duplex
;
1094 speed
= val
& MII_BNX2_GP_TOP_AN_SPEED_MSK
;
1096 case MII_BNX2_GP_TOP_AN_SPEED_10
:
1097 bp
->line_speed
= SPEED_10
;
1099 case MII_BNX2_GP_TOP_AN_SPEED_100
:
1100 bp
->line_speed
= SPEED_100
;
1102 case MII_BNX2_GP_TOP_AN_SPEED_1G
:
1103 case MII_BNX2_GP_TOP_AN_SPEED_1GKV
:
1104 bp
->line_speed
= SPEED_1000
;
1106 case MII_BNX2_GP_TOP_AN_SPEED_2_5G
:
1107 bp
->line_speed
= SPEED_2500
;
1110 if (val
& MII_BNX2_GP_TOP_AN_FD
)
1111 bp
->duplex
= DUPLEX_FULL
;
1113 bp
->duplex
= DUPLEX_HALF
;
1118 bnx2_5708s_linkup(struct bnx2
*bp
)
1123 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
1124 switch (val
& BCM5708S_1000X_STAT1_SPEED_MASK
) {
1125 case BCM5708S_1000X_STAT1_SPEED_10
:
1126 bp
->line_speed
= SPEED_10
;
1128 case BCM5708S_1000X_STAT1_SPEED_100
:
1129 bp
->line_speed
= SPEED_100
;
1131 case BCM5708S_1000X_STAT1_SPEED_1G
:
1132 bp
->line_speed
= SPEED_1000
;
1134 case BCM5708S_1000X_STAT1_SPEED_2G5
:
1135 bp
->line_speed
= SPEED_2500
;
1138 if (val
& BCM5708S_1000X_STAT1_FD
)
1139 bp
->duplex
= DUPLEX_FULL
;
1141 bp
->duplex
= DUPLEX_HALF
;
1147 bnx2_5706s_linkup(struct bnx2
*bp
)
1149 u32 bmcr
, local_adv
, remote_adv
, common
;
1152 bp
->line_speed
= SPEED_1000
;
1154 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1155 if (bmcr
& BMCR_FULLDPLX
) {
1156 bp
->duplex
= DUPLEX_FULL
;
1159 bp
->duplex
= DUPLEX_HALF
;
1162 if (!(bmcr
& BMCR_ANENABLE
)) {
1166 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
1167 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
1169 common
= local_adv
& remote_adv
;
1170 if (common
& (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
)) {
1172 if (common
& ADVERTISE_1000XFULL
) {
1173 bp
->duplex
= DUPLEX_FULL
;
1176 bp
->duplex
= DUPLEX_HALF
;
1184 bnx2_copper_linkup(struct bnx2
*bp
)
1188 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1189 if (bmcr
& BMCR_ANENABLE
) {
1190 u32 local_adv
, remote_adv
, common
;
1192 bnx2_read_phy(bp
, MII_CTRL1000
, &local_adv
);
1193 bnx2_read_phy(bp
, MII_STAT1000
, &remote_adv
);
1195 common
= local_adv
& (remote_adv
>> 2);
1196 if (common
& ADVERTISE_1000FULL
) {
1197 bp
->line_speed
= SPEED_1000
;
1198 bp
->duplex
= DUPLEX_FULL
;
1200 else if (common
& ADVERTISE_1000HALF
) {
1201 bp
->line_speed
= SPEED_1000
;
1202 bp
->duplex
= DUPLEX_HALF
;
1205 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
1206 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
1208 common
= local_adv
& remote_adv
;
1209 if (common
& ADVERTISE_100FULL
) {
1210 bp
->line_speed
= SPEED_100
;
1211 bp
->duplex
= DUPLEX_FULL
;
1213 else if (common
& ADVERTISE_100HALF
) {
1214 bp
->line_speed
= SPEED_100
;
1215 bp
->duplex
= DUPLEX_HALF
;
1217 else if (common
& ADVERTISE_10FULL
) {
1218 bp
->line_speed
= SPEED_10
;
1219 bp
->duplex
= DUPLEX_FULL
;
1221 else if (common
& ADVERTISE_10HALF
) {
1222 bp
->line_speed
= SPEED_10
;
1223 bp
->duplex
= DUPLEX_HALF
;
1232 if (bmcr
& BMCR_SPEED100
) {
1233 bp
->line_speed
= SPEED_100
;
1236 bp
->line_speed
= SPEED_10
;
1238 if (bmcr
& BMCR_FULLDPLX
) {
1239 bp
->duplex
= DUPLEX_FULL
;
1242 bp
->duplex
= DUPLEX_HALF
;
1250 bnx2_init_rx_context(struct bnx2
*bp
, u32 cid
)
1252 u32 val
, rx_cid_addr
= GET_CID_ADDR(cid
);
1254 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
;
1255 val
|= BNX2_L2CTX_CTX_TYPE_SIZE_L2
;
1258 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1259 u32 lo_water
, hi_water
;
1261 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
1262 lo_water
= BNX2_L2CTX_LO_WATER_MARK_DEFAULT
;
1264 lo_water
= BNX2_L2CTX_LO_WATER_MARK_DIS
;
1265 if (lo_water
>= bp
->rx_ring_size
)
1268 hi_water
= bp
->rx_ring_size
/ 4;
1270 if (hi_water
<= lo_water
)
1273 hi_water
/= BNX2_L2CTX_HI_WATER_MARK_SCALE
;
1274 lo_water
/= BNX2_L2CTX_LO_WATER_MARK_SCALE
;
1278 else if (hi_water
== 0)
1280 val
|= lo_water
| (hi_water
<< BNX2_L2CTX_HI_WATER_MARK_SHIFT
);
1282 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
1286 bnx2_init_all_rx_contexts(struct bnx2
*bp
)
1291 for (i
= 0, cid
= RX_CID
; i
< bp
->num_rx_rings
; i
++, cid
++) {
1294 bnx2_init_rx_context(bp
, cid
);
1299 bnx2_set_mac_link(struct bnx2
*bp
)
1303 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x2620);
1304 if (bp
->link_up
&& (bp
->line_speed
== SPEED_1000
) &&
1305 (bp
->duplex
== DUPLEX_HALF
)) {
1306 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x26ff);
1309 /* Configure the EMAC mode register. */
1310 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
1312 val
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
1313 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
1314 BNX2_EMAC_MODE_25G_MODE
);
1317 switch (bp
->line_speed
) {
1319 if (CHIP_NUM(bp
) != CHIP_NUM_5706
) {
1320 val
|= BNX2_EMAC_MODE_PORT_MII_10M
;
1325 val
|= BNX2_EMAC_MODE_PORT_MII
;
1328 val
|= BNX2_EMAC_MODE_25G_MODE
;
1331 val
|= BNX2_EMAC_MODE_PORT_GMII
;
1336 val
|= BNX2_EMAC_MODE_PORT_GMII
;
1339 /* Set the MAC to operate in the appropriate duplex mode. */
1340 if (bp
->duplex
== DUPLEX_HALF
)
1341 val
|= BNX2_EMAC_MODE_HALF_DUPLEX
;
1342 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
1344 /* Enable/disable rx PAUSE. */
1345 bp
->rx_mode
&= ~BNX2_EMAC_RX_MODE_FLOW_EN
;
1347 if (bp
->flow_ctrl
& FLOW_CTRL_RX
)
1348 bp
->rx_mode
|= BNX2_EMAC_RX_MODE_FLOW_EN
;
1349 REG_WR(bp
, BNX2_EMAC_RX_MODE
, bp
->rx_mode
);
1351 /* Enable/disable tx PAUSE. */
1352 val
= REG_RD(bp
, BNX2_EMAC_TX_MODE
);
1353 val
&= ~BNX2_EMAC_TX_MODE_FLOW_EN
;
1355 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
1356 val
|= BNX2_EMAC_TX_MODE_FLOW_EN
;
1357 REG_WR(bp
, BNX2_EMAC_TX_MODE
, val
);
1359 /* Acknowledge the interrupt. */
1360 REG_WR(bp
, BNX2_EMAC_STATUS
, BNX2_EMAC_STATUS_LINK_CHANGE
);
1362 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1363 bnx2_init_all_rx_contexts(bp
);
1367 bnx2_enable_bmsr1(struct bnx2
*bp
)
1369 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1370 (CHIP_NUM(bp
) == CHIP_NUM_5709
))
1371 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1372 MII_BNX2_BLK_ADDR_GP_STATUS
);
1376 bnx2_disable_bmsr1(struct bnx2
*bp
)
1378 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1379 (CHIP_NUM(bp
) == CHIP_NUM_5709
))
1380 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1381 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1385 bnx2_test_and_enable_2g5(struct bnx2
*bp
)
1390 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1393 if (bp
->autoneg
& AUTONEG_SPEED
)
1394 bp
->advertising
|= ADVERTISED_2500baseX_Full
;
1396 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1397 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
1399 bnx2_read_phy(bp
, bp
->mii_up1
, &up1
);
1400 if (!(up1
& BCM5708S_UP1_2G5
)) {
1401 up1
|= BCM5708S_UP1_2G5
;
1402 bnx2_write_phy(bp
, bp
->mii_up1
, up1
);
1406 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1407 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1408 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1414 bnx2_test_and_disable_2g5(struct bnx2
*bp
)
1419 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1422 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1423 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
1425 bnx2_read_phy(bp
, bp
->mii_up1
, &up1
);
1426 if (up1
& BCM5708S_UP1_2G5
) {
1427 up1
&= ~BCM5708S_UP1_2G5
;
1428 bnx2_write_phy(bp
, bp
->mii_up1
, up1
);
1432 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1433 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1434 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1440 bnx2_enable_forced_2g5(struct bnx2
*bp
)
1444 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1447 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1450 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1451 MII_BNX2_BLK_ADDR_SERDES_DIG
);
1452 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, &val
);
1453 val
&= ~MII_BNX2_SD_MISC1_FORCE_MSK
;
1454 val
|= MII_BNX2_SD_MISC1_FORCE
| MII_BNX2_SD_MISC1_FORCE_2_5G
;
1455 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, val
);
1457 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1458 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1459 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1461 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1462 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1463 bmcr
|= BCM5708S_BMCR_FORCE_2500
;
1466 if (bp
->autoneg
& AUTONEG_SPEED
) {
1467 bmcr
&= ~BMCR_ANENABLE
;
1468 if (bp
->req_duplex
== DUPLEX_FULL
)
1469 bmcr
|= BMCR_FULLDPLX
;
1471 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
1475 bnx2_disable_forced_2g5(struct bnx2
*bp
)
1479 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
1482 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1485 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1486 MII_BNX2_BLK_ADDR_SERDES_DIG
);
1487 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, &val
);
1488 val
&= ~MII_BNX2_SD_MISC1_FORCE
;
1489 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, val
);
1491 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1492 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1493 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1495 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1496 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1497 bmcr
&= ~BCM5708S_BMCR_FORCE_2500
;
1500 if (bp
->autoneg
& AUTONEG_SPEED
)
1501 bmcr
|= BMCR_SPEED1000
| BMCR_ANENABLE
| BMCR_ANRESTART
;
1502 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
1506 bnx2_5706s_force_link_dn(struct bnx2
*bp
, int start
)
1510 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
, MII_EXPAND_SERDES_CTL
);
1511 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &val
);
1513 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
& 0xff0f);
1515 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
| 0xc0);
1519 bnx2_set_link(struct bnx2
*bp
)
1524 if (bp
->loopback
== MAC_LOOPBACK
|| bp
->loopback
== PHY_LOOPBACK
) {
1529 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
1532 link_up
= bp
->link_up
;
1534 bnx2_enable_bmsr1(bp
);
1535 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
1536 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
1537 bnx2_disable_bmsr1(bp
);
1539 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1540 (CHIP_NUM(bp
) == CHIP_NUM_5706
)) {
1543 if (bp
->phy_flags
& BNX2_PHY_FLAG_FORCED_DOWN
) {
1544 bnx2_5706s_force_link_dn(bp
, 0);
1545 bp
->phy_flags
&= ~BNX2_PHY_FLAG_FORCED_DOWN
;
1547 val
= REG_RD(bp
, BNX2_EMAC_STATUS
);
1549 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_AN_DBG
);
1550 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
1551 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
1553 if ((val
& BNX2_EMAC_STATUS_LINK
) &&
1554 !(an_dbg
& MISC_SHDW_AN_DBG_NOSYNC
))
1555 bmsr
|= BMSR_LSTATUS
;
1557 bmsr
&= ~BMSR_LSTATUS
;
1560 if (bmsr
& BMSR_LSTATUS
) {
1563 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1564 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1565 bnx2_5706s_linkup(bp
);
1566 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
1567 bnx2_5708s_linkup(bp
);
1568 else if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1569 bnx2_5709s_linkup(bp
);
1572 bnx2_copper_linkup(bp
);
1574 bnx2_resolve_flow_ctrl(bp
);
1577 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
1578 (bp
->autoneg
& AUTONEG_SPEED
))
1579 bnx2_disable_forced_2g5(bp
);
1581 if (bp
->phy_flags
& BNX2_PHY_FLAG_PARALLEL_DETECT
) {
1584 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1585 bmcr
|= BMCR_ANENABLE
;
1586 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
1588 bp
->phy_flags
&= ~BNX2_PHY_FLAG_PARALLEL_DETECT
;
1593 if (bp
->link_up
!= link_up
) {
1594 bnx2_report_link(bp
);
1597 bnx2_set_mac_link(bp
);
1603 bnx2_reset_phy(struct bnx2
*bp
)
1608 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_RESET
);
1610 #define PHY_RESET_MAX_WAIT 100
1611 for (i
= 0; i
< PHY_RESET_MAX_WAIT
; i
++) {
1614 bnx2_read_phy(bp
, bp
->mii_bmcr
, ®
);
1615 if (!(reg
& BMCR_RESET
)) {
1620 if (i
== PHY_RESET_MAX_WAIT
) {
1627 bnx2_phy_get_pause_adv(struct bnx2
*bp
)
1631 if ((bp
->req_flow_ctrl
& (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) ==
1632 (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) {
1634 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1635 adv
= ADVERTISE_1000XPAUSE
;
1638 adv
= ADVERTISE_PAUSE_CAP
;
1641 else if (bp
->req_flow_ctrl
& FLOW_CTRL_TX
) {
1642 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1643 adv
= ADVERTISE_1000XPSE_ASYM
;
1646 adv
= ADVERTISE_PAUSE_ASYM
;
1649 else if (bp
->req_flow_ctrl
& FLOW_CTRL_RX
) {
1650 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1651 adv
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1654 adv
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1660 static int bnx2_fw_sync(struct bnx2
*, u32
, int, int);
1663 bnx2_setup_remote_phy(struct bnx2
*bp
, u8 port
)
1664 __releases(&bp
->phy_lock
)
1665 __acquires(&bp
->phy_lock
)
1667 u32 speed_arg
= 0, pause_adv
;
1669 pause_adv
= bnx2_phy_get_pause_adv(bp
);
1671 if (bp
->autoneg
& AUTONEG_SPEED
) {
1672 speed_arg
|= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG
;
1673 if (bp
->advertising
& ADVERTISED_10baseT_Half
)
1674 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_10HALF
;
1675 if (bp
->advertising
& ADVERTISED_10baseT_Full
)
1676 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_10FULL
;
1677 if (bp
->advertising
& ADVERTISED_100baseT_Half
)
1678 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_100HALF
;
1679 if (bp
->advertising
& ADVERTISED_100baseT_Full
)
1680 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_100FULL
;
1681 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1682 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_1GFULL
;
1683 if (bp
->advertising
& ADVERTISED_2500baseX_Full
)
1684 speed_arg
|= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
;
1686 if (bp
->req_line_speed
== SPEED_2500
)
1687 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
;
1688 else if (bp
->req_line_speed
== SPEED_1000
)
1689 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_1GFULL
;
1690 else if (bp
->req_line_speed
== SPEED_100
) {
1691 if (bp
->req_duplex
== DUPLEX_FULL
)
1692 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_100FULL
;
1694 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_100HALF
;
1695 } else if (bp
->req_line_speed
== SPEED_10
) {
1696 if (bp
->req_duplex
== DUPLEX_FULL
)
1697 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_10FULL
;
1699 speed_arg
= BNX2_NETLINK_SET_LINK_SPEED_10HALF
;
1703 if (pause_adv
& (ADVERTISE_1000XPAUSE
| ADVERTISE_PAUSE_CAP
))
1704 speed_arg
|= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE
;
1705 if (pause_adv
& (ADVERTISE_1000XPSE_ASYM
| ADVERTISE_PAUSE_ASYM
))
1706 speed_arg
|= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE
;
1708 if (port
== PORT_TP
)
1709 speed_arg
|= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE
|
1710 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED
;
1712 bnx2_shmem_wr(bp
, BNX2_DRV_MB_ARG0
, speed_arg
);
1714 spin_unlock_bh(&bp
->phy_lock
);
1715 bnx2_fw_sync(bp
, BNX2_DRV_MSG_CODE_CMD_SET_LINK
, 1, 0);
1716 spin_lock_bh(&bp
->phy_lock
);
1722 bnx2_setup_serdes_phy(struct bnx2
*bp
, u8 port
)
1723 __releases(&bp
->phy_lock
)
1724 __acquires(&bp
->phy_lock
)
1729 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
1730 return (bnx2_setup_remote_phy(bp
, port
));
1732 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
1734 int force_link_down
= 0;
1736 if (bp
->req_line_speed
== SPEED_2500
) {
1737 if (!bnx2_test_and_enable_2g5(bp
))
1738 force_link_down
= 1;
1739 } else if (bp
->req_line_speed
== SPEED_1000
) {
1740 if (bnx2_test_and_disable_2g5(bp
))
1741 force_link_down
= 1;
1743 bnx2_read_phy(bp
, bp
->mii_adv
, &adv
);
1744 adv
&= ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
);
1746 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1747 new_bmcr
= bmcr
& ~BMCR_ANENABLE
;
1748 new_bmcr
|= BMCR_SPEED1000
;
1750 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1751 if (bp
->req_line_speed
== SPEED_2500
)
1752 bnx2_enable_forced_2g5(bp
);
1753 else if (bp
->req_line_speed
== SPEED_1000
) {
1754 bnx2_disable_forced_2g5(bp
);
1755 new_bmcr
&= ~0x2000;
1758 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1759 if (bp
->req_line_speed
== SPEED_2500
)
1760 new_bmcr
|= BCM5708S_BMCR_FORCE_2500
;
1762 new_bmcr
= bmcr
& ~BCM5708S_BMCR_FORCE_2500
;
1765 if (bp
->req_duplex
== DUPLEX_FULL
) {
1766 adv
|= ADVERTISE_1000XFULL
;
1767 new_bmcr
|= BMCR_FULLDPLX
;
1770 adv
|= ADVERTISE_1000XHALF
;
1771 new_bmcr
&= ~BMCR_FULLDPLX
;
1773 if ((new_bmcr
!= bmcr
) || (force_link_down
)) {
1774 /* Force a link down visible on the other side */
1776 bnx2_write_phy(bp
, bp
->mii_adv
, adv
&
1777 ~(ADVERTISE_1000XFULL
|
1778 ADVERTISE_1000XHALF
));
1779 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
|
1780 BMCR_ANRESTART
| BMCR_ANENABLE
);
1783 netif_carrier_off(bp
->dev
);
1784 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
1785 bnx2_report_link(bp
);
1787 bnx2_write_phy(bp
, bp
->mii_adv
, adv
);
1788 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
1790 bnx2_resolve_flow_ctrl(bp
);
1791 bnx2_set_mac_link(bp
);
1796 bnx2_test_and_enable_2g5(bp
);
1798 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1799 new_adv
|= ADVERTISE_1000XFULL
;
1801 new_adv
|= bnx2_phy_get_pause_adv(bp
);
1803 bnx2_read_phy(bp
, bp
->mii_adv
, &adv
);
1804 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1806 bp
->serdes_an_pending
= 0;
1807 if ((adv
!= new_adv
) || ((bmcr
& BMCR_ANENABLE
) == 0)) {
1808 /* Force a link down visible on the other side */
1810 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
1811 spin_unlock_bh(&bp
->phy_lock
);
1813 spin_lock_bh(&bp
->phy_lock
);
1816 bnx2_write_phy(bp
, bp
->mii_adv
, new_adv
);
1817 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
| BMCR_ANRESTART
|
1819 /* Speed up link-up time when the link partner
1820 * does not autonegotiate which is very common
1821 * in blade servers. Some blade servers use
1822 * IPMI for kerboard input and it's important
1823 * to minimize link disruptions. Autoneg. involves
1824 * exchanging base pages plus 3 next pages and
1825 * normally completes in about 120 msec.
1827 bp
->current_interval
= BNX2_SERDES_AN_TIMEOUT
;
1828 bp
->serdes_an_pending
= 1;
1829 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1831 bnx2_resolve_flow_ctrl(bp
);
1832 bnx2_set_mac_link(bp
);
1838 #define ETHTOOL_ALL_FIBRE_SPEED \
1839 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1840 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1841 (ADVERTISED_1000baseT_Full)
1843 #define ETHTOOL_ALL_COPPER_SPEED \
1844 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1845 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1846 ADVERTISED_1000baseT_Full)
1848 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1849 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1851 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1854 bnx2_set_default_remote_link(struct bnx2
*bp
)
1858 if (bp
->phy_port
== PORT_TP
)
1859 link
= bnx2_shmem_rd(bp
, BNX2_RPHY_COPPER_LINK
);
1861 link
= bnx2_shmem_rd(bp
, BNX2_RPHY_SERDES_LINK
);
1863 if (link
& BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG
) {
1864 bp
->req_line_speed
= 0;
1865 bp
->autoneg
|= AUTONEG_SPEED
;
1866 bp
->advertising
= ADVERTISED_Autoneg
;
1867 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10HALF
)
1868 bp
->advertising
|= ADVERTISED_10baseT_Half
;
1869 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10FULL
)
1870 bp
->advertising
|= ADVERTISED_10baseT_Full
;
1871 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100HALF
)
1872 bp
->advertising
|= ADVERTISED_100baseT_Half
;
1873 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100FULL
)
1874 bp
->advertising
|= ADVERTISED_100baseT_Full
;
1875 if (link
& BNX2_NETLINK_SET_LINK_SPEED_1GFULL
)
1876 bp
->advertising
|= ADVERTISED_1000baseT_Full
;
1877 if (link
& BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
)
1878 bp
->advertising
|= ADVERTISED_2500baseX_Full
;
1881 bp
->advertising
= 0;
1882 bp
->req_duplex
= DUPLEX_FULL
;
1883 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10
) {
1884 bp
->req_line_speed
= SPEED_10
;
1885 if (link
& BNX2_NETLINK_SET_LINK_SPEED_10HALF
)
1886 bp
->req_duplex
= DUPLEX_HALF
;
1888 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100
) {
1889 bp
->req_line_speed
= SPEED_100
;
1890 if (link
& BNX2_NETLINK_SET_LINK_SPEED_100HALF
)
1891 bp
->req_duplex
= DUPLEX_HALF
;
1893 if (link
& BNX2_NETLINK_SET_LINK_SPEED_1GFULL
)
1894 bp
->req_line_speed
= SPEED_1000
;
1895 if (link
& BNX2_NETLINK_SET_LINK_SPEED_2G5FULL
)
1896 bp
->req_line_speed
= SPEED_2500
;
1901 bnx2_set_default_link(struct bnx2
*bp
)
1903 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
1904 bnx2_set_default_remote_link(bp
);
1908 bp
->autoneg
= AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
;
1909 bp
->req_line_speed
= 0;
1910 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
1913 bp
->advertising
= ETHTOOL_ALL_FIBRE_SPEED
| ADVERTISED_Autoneg
;
1915 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_CONFIG
);
1916 reg
&= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK
;
1917 if (reg
== BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G
) {
1919 bp
->req_line_speed
= bp
->line_speed
= SPEED_1000
;
1920 bp
->req_duplex
= DUPLEX_FULL
;
1923 bp
->advertising
= ETHTOOL_ALL_COPPER_SPEED
| ADVERTISED_Autoneg
;
1927 bnx2_send_heart_beat(struct bnx2
*bp
)
1932 spin_lock(&bp
->indirect_lock
);
1933 msg
= (u32
) (++bp
->fw_drv_pulse_wr_seq
& BNX2_DRV_PULSE_SEQ_MASK
);
1934 addr
= bp
->shmem_base
+ BNX2_DRV_PULSE_MB
;
1935 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, addr
);
1936 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW
, msg
);
1937 spin_unlock(&bp
->indirect_lock
);
1941 bnx2_remote_phy_event(struct bnx2
*bp
)
1944 u8 link_up
= bp
->link_up
;
1947 msg
= bnx2_shmem_rd(bp
, BNX2_LINK_STATUS
);
1949 if (msg
& BNX2_LINK_STATUS_HEART_BEAT_EXPIRED
)
1950 bnx2_send_heart_beat(bp
);
1952 msg
&= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED
;
1954 if ((msg
& BNX2_LINK_STATUS_LINK_UP
) == BNX2_LINK_STATUS_LINK_DOWN
)
1960 speed
= msg
& BNX2_LINK_STATUS_SPEED_MASK
;
1961 bp
->duplex
= DUPLEX_FULL
;
1963 case BNX2_LINK_STATUS_10HALF
:
1964 bp
->duplex
= DUPLEX_HALF
;
1965 case BNX2_LINK_STATUS_10FULL
:
1966 bp
->line_speed
= SPEED_10
;
1968 case BNX2_LINK_STATUS_100HALF
:
1969 bp
->duplex
= DUPLEX_HALF
;
1970 case BNX2_LINK_STATUS_100BASE_T4
:
1971 case BNX2_LINK_STATUS_100FULL
:
1972 bp
->line_speed
= SPEED_100
;
1974 case BNX2_LINK_STATUS_1000HALF
:
1975 bp
->duplex
= DUPLEX_HALF
;
1976 case BNX2_LINK_STATUS_1000FULL
:
1977 bp
->line_speed
= SPEED_1000
;
1979 case BNX2_LINK_STATUS_2500HALF
:
1980 bp
->duplex
= DUPLEX_HALF
;
1981 case BNX2_LINK_STATUS_2500FULL
:
1982 bp
->line_speed
= SPEED_2500
;
1990 if ((bp
->autoneg
& (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) !=
1991 (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) {
1992 if (bp
->duplex
== DUPLEX_FULL
)
1993 bp
->flow_ctrl
= bp
->req_flow_ctrl
;
1995 if (msg
& BNX2_LINK_STATUS_TX_FC_ENABLED
)
1996 bp
->flow_ctrl
|= FLOW_CTRL_TX
;
1997 if (msg
& BNX2_LINK_STATUS_RX_FC_ENABLED
)
1998 bp
->flow_ctrl
|= FLOW_CTRL_RX
;
2001 old_port
= bp
->phy_port
;
2002 if (msg
& BNX2_LINK_STATUS_SERDES_LINK
)
2003 bp
->phy_port
= PORT_FIBRE
;
2005 bp
->phy_port
= PORT_TP
;
2007 if (old_port
!= bp
->phy_port
)
2008 bnx2_set_default_link(bp
);
2011 if (bp
->link_up
!= link_up
)
2012 bnx2_report_link(bp
);
2014 bnx2_set_mac_link(bp
);
2018 bnx2_set_remote_link(struct bnx2
*bp
)
2022 evt_code
= bnx2_shmem_rd(bp
, BNX2_FW_EVT_CODE_MB
);
2024 case BNX2_FW_EVT_CODE_LINK_EVENT
:
2025 bnx2_remote_phy_event(bp
);
2027 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT
:
2029 bnx2_send_heart_beat(bp
);
2036 bnx2_setup_copper_phy(struct bnx2
*bp
)
2037 __releases(&bp
->phy_lock
)
2038 __acquires(&bp
->phy_lock
)
2043 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
2045 if (bp
->autoneg
& AUTONEG_SPEED
) {
2046 u32 adv_reg
, adv1000_reg
;
2047 u32 new_adv_reg
= 0;
2048 u32 new_adv1000_reg
= 0;
2050 bnx2_read_phy(bp
, bp
->mii_adv
, &adv_reg
);
2051 adv_reg
&= (PHY_ALL_10_100_SPEED
| ADVERTISE_PAUSE_CAP
|
2052 ADVERTISE_PAUSE_ASYM
);
2054 bnx2_read_phy(bp
, MII_CTRL1000
, &adv1000_reg
);
2055 adv1000_reg
&= PHY_ALL_1000_SPEED
;
2057 if (bp
->advertising
& ADVERTISED_10baseT_Half
)
2058 new_adv_reg
|= ADVERTISE_10HALF
;
2059 if (bp
->advertising
& ADVERTISED_10baseT_Full
)
2060 new_adv_reg
|= ADVERTISE_10FULL
;
2061 if (bp
->advertising
& ADVERTISED_100baseT_Half
)
2062 new_adv_reg
|= ADVERTISE_100HALF
;
2063 if (bp
->advertising
& ADVERTISED_100baseT_Full
)
2064 new_adv_reg
|= ADVERTISE_100FULL
;
2065 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
2066 new_adv1000_reg
|= ADVERTISE_1000FULL
;
2068 new_adv_reg
|= ADVERTISE_CSMA
;
2070 new_adv_reg
|= bnx2_phy_get_pause_adv(bp
);
2072 if ((adv1000_reg
!= new_adv1000_reg
) ||
2073 (adv_reg
!= new_adv_reg
) ||
2074 ((bmcr
& BMCR_ANENABLE
) == 0)) {
2076 bnx2_write_phy(bp
, bp
->mii_adv
, new_adv_reg
);
2077 bnx2_write_phy(bp
, MII_CTRL1000
, new_adv1000_reg
);
2078 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_ANRESTART
|
2081 else if (bp
->link_up
) {
2082 /* Flow ctrl may have changed from auto to forced */
2083 /* or vice-versa. */
2085 bnx2_resolve_flow_ctrl(bp
);
2086 bnx2_set_mac_link(bp
);
2092 if (bp
->req_line_speed
== SPEED_100
) {
2093 new_bmcr
|= BMCR_SPEED100
;
2095 if (bp
->req_duplex
== DUPLEX_FULL
) {
2096 new_bmcr
|= BMCR_FULLDPLX
;
2098 if (new_bmcr
!= bmcr
) {
2101 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2102 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2104 if (bmsr
& BMSR_LSTATUS
) {
2105 /* Force link down */
2106 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
2107 spin_unlock_bh(&bp
->phy_lock
);
2109 spin_lock_bh(&bp
->phy_lock
);
2111 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2112 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
2115 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
2117 /* Normally, the new speed is setup after the link has
2118 * gone down and up again. In some cases, link will not go
2119 * down so we need to set up the new speed here.
2121 if (bmsr
& BMSR_LSTATUS
) {
2122 bp
->line_speed
= bp
->req_line_speed
;
2123 bp
->duplex
= bp
->req_duplex
;
2124 bnx2_resolve_flow_ctrl(bp
);
2125 bnx2_set_mac_link(bp
);
2128 bnx2_resolve_flow_ctrl(bp
);
2129 bnx2_set_mac_link(bp
);
2135 bnx2_setup_phy(struct bnx2
*bp
, u8 port
)
2136 __releases(&bp
->phy_lock
)
2137 __acquires(&bp
->phy_lock
)
2139 if (bp
->loopback
== MAC_LOOPBACK
)
2142 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
2143 return (bnx2_setup_serdes_phy(bp
, port
));
2146 return (bnx2_setup_copper_phy(bp
));
2151 bnx2_init_5709s_phy(struct bnx2
*bp
, int reset_phy
)
2155 bp
->mii_bmcr
= MII_BMCR
+ 0x10;
2156 bp
->mii_bmsr
= MII_BMSR
+ 0x10;
2157 bp
->mii_bmsr1
= MII_BNX2_GP_TOP_AN_STATUS1
;
2158 bp
->mii_adv
= MII_ADVERTISE
+ 0x10;
2159 bp
->mii_lpa
= MII_LPA
+ 0x10;
2160 bp
->mii_up1
= MII_BNX2_OVER1G_UP1
;
2162 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_AER
);
2163 bnx2_write_phy(bp
, MII_BNX2_AER_AER
, MII_BNX2_AER_AER_AN_MMD
);
2165 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
2169 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_SERDES_DIG
);
2171 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_1000XCTL1
, &val
);
2172 val
&= ~MII_BNX2_SD_1000XCTL1_AUTODET
;
2173 val
|= MII_BNX2_SD_1000XCTL1_FIBER
;
2174 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_1000XCTL1
, val
);
2176 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
2177 bnx2_read_phy(bp
, MII_BNX2_OVER1G_UP1
, &val
);
2178 if (bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
)
2179 val
|= BCM5708S_UP1_2G5
;
2181 val
&= ~BCM5708S_UP1_2G5
;
2182 bnx2_write_phy(bp
, MII_BNX2_OVER1G_UP1
, val
);
2184 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_BAM_NXTPG
);
2185 bnx2_read_phy(bp
, MII_BNX2_BAM_NXTPG_CTL
, &val
);
2186 val
|= MII_BNX2_NXTPG_CTL_T2
| MII_BNX2_NXTPG_CTL_BAM
;
2187 bnx2_write_phy(bp
, MII_BNX2_BAM_NXTPG_CTL
, val
);
2189 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_CL73_USERB0
);
2191 val
= MII_BNX2_CL73_BAM_EN
| MII_BNX2_CL73_BAM_STA_MGR_EN
|
2192 MII_BNX2_CL73_BAM_NP_AFT_BP_EN
;
2193 bnx2_write_phy(bp
, MII_BNX2_CL73_BAM_CTL1
, val
);
2195 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
2201 bnx2_init_5708s_phy(struct bnx2
*bp
, int reset_phy
)
2208 bp
->mii_up1
= BCM5708S_UP1
;
2210 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG3
);
2211 bnx2_write_phy(bp
, BCM5708S_DIG_3_0
, BCM5708S_DIG_3_0_USE_IEEE
);
2212 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
2214 bnx2_read_phy(bp
, BCM5708S_1000X_CTL1
, &val
);
2215 val
|= BCM5708S_1000X_CTL1_FIBER_MODE
| BCM5708S_1000X_CTL1_AUTODET_EN
;
2216 bnx2_write_phy(bp
, BCM5708S_1000X_CTL1
, val
);
2218 bnx2_read_phy(bp
, BCM5708S_1000X_CTL2
, &val
);
2219 val
|= BCM5708S_1000X_CTL2_PLLEL_DET_EN
;
2220 bnx2_write_phy(bp
, BCM5708S_1000X_CTL2
, val
);
2222 if (bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
) {
2223 bnx2_read_phy(bp
, BCM5708S_UP1
, &val
);
2224 val
|= BCM5708S_UP1_2G5
;
2225 bnx2_write_phy(bp
, BCM5708S_UP1
, val
);
2228 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
2229 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
2230 (CHIP_ID(bp
) == CHIP_ID_5708_B1
)) {
2231 /* increase tx signal amplitude */
2232 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
2233 BCM5708S_BLK_ADDR_TX_MISC
);
2234 bnx2_read_phy(bp
, BCM5708S_TX_ACTL1
, &val
);
2235 val
&= ~BCM5708S_TX_ACTL1_DRIVER_VCM
;
2236 bnx2_write_phy(bp
, BCM5708S_TX_ACTL1
, val
);
2237 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
2240 val
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_CONFIG
) &
2241 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK
;
2246 is_backplane
= bnx2_shmem_rd(bp
, BNX2_SHARED_HW_CFG_CONFIG
);
2247 if (is_backplane
& BNX2_SHARED_HW_CFG_PHY_BACKPLANE
) {
2248 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
2249 BCM5708S_BLK_ADDR_TX_MISC
);
2250 bnx2_write_phy(bp
, BCM5708S_TX_ACTL3
, val
);
2251 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
2252 BCM5708S_BLK_ADDR_DIG
);
2259 bnx2_init_5706s_phy(struct bnx2
*bp
, int reset_phy
)
2264 bp
->phy_flags
&= ~BNX2_PHY_FLAG_PARALLEL_DETECT
;
2266 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
2267 REG_WR(bp
, BNX2_MISC_GP_HW_CTL0
, 0x300);
2269 if (bp
->dev
->mtu
> 1500) {
2272 /* Set extended packet length bit */
2273 bnx2_write_phy(bp
, 0x18, 0x7);
2274 bnx2_read_phy(bp
, 0x18, &val
);
2275 bnx2_write_phy(bp
, 0x18, (val
& 0xfff8) | 0x4000);
2277 bnx2_write_phy(bp
, 0x1c, 0x6c00);
2278 bnx2_read_phy(bp
, 0x1c, &val
);
2279 bnx2_write_phy(bp
, 0x1c, (val
& 0x3ff) | 0xec02);
2284 bnx2_write_phy(bp
, 0x18, 0x7);
2285 bnx2_read_phy(bp
, 0x18, &val
);
2286 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
2288 bnx2_write_phy(bp
, 0x1c, 0x6c00);
2289 bnx2_read_phy(bp
, 0x1c, &val
);
2290 bnx2_write_phy(bp
, 0x1c, (val
& 0x3fd) | 0xec00);
2297 bnx2_init_copper_phy(struct bnx2
*bp
, int reset_phy
)
2304 if (bp
->phy_flags
& BNX2_PHY_FLAG_CRC_FIX
) {
2305 bnx2_write_phy(bp
, 0x18, 0x0c00);
2306 bnx2_write_phy(bp
, 0x17, 0x000a);
2307 bnx2_write_phy(bp
, 0x15, 0x310b);
2308 bnx2_write_phy(bp
, 0x17, 0x201f);
2309 bnx2_write_phy(bp
, 0x15, 0x9506);
2310 bnx2_write_phy(bp
, 0x17, 0x401f);
2311 bnx2_write_phy(bp
, 0x15, 0x14e2);
2312 bnx2_write_phy(bp
, 0x18, 0x0400);
2315 if (bp
->phy_flags
& BNX2_PHY_FLAG_DIS_EARLY_DAC
) {
2316 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
,
2317 MII_BNX2_DSP_EXPAND_REG
| 0x8);
2318 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &val
);
2320 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
);
2323 if (bp
->dev
->mtu
> 1500) {
2324 /* Set extended packet length bit */
2325 bnx2_write_phy(bp
, 0x18, 0x7);
2326 bnx2_read_phy(bp
, 0x18, &val
);
2327 bnx2_write_phy(bp
, 0x18, val
| 0x4000);
2329 bnx2_read_phy(bp
, 0x10, &val
);
2330 bnx2_write_phy(bp
, 0x10, val
| 0x1);
2333 bnx2_write_phy(bp
, 0x18, 0x7);
2334 bnx2_read_phy(bp
, 0x18, &val
);
2335 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
2337 bnx2_read_phy(bp
, 0x10, &val
);
2338 bnx2_write_phy(bp
, 0x10, val
& ~0x1);
2341 /* ethernet@wirespeed */
2342 bnx2_write_phy(bp
, 0x18, 0x7007);
2343 bnx2_read_phy(bp
, 0x18, &val
);
2344 bnx2_write_phy(bp
, 0x18, val
| (1 << 15) | (1 << 4));
2350 bnx2_init_phy(struct bnx2
*bp
, int reset_phy
)
2351 __releases(&bp
->phy_lock
)
2352 __acquires(&bp
->phy_lock
)
2357 bp
->phy_flags
&= ~BNX2_PHY_FLAG_INT_MODE_MASK
;
2358 bp
->phy_flags
|= BNX2_PHY_FLAG_INT_MODE_LINK_READY
;
2360 bp
->mii_bmcr
= MII_BMCR
;
2361 bp
->mii_bmsr
= MII_BMSR
;
2362 bp
->mii_bmsr1
= MII_BMSR
;
2363 bp
->mii_adv
= MII_ADVERTISE
;
2364 bp
->mii_lpa
= MII_LPA
;
2366 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
2368 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
2371 bnx2_read_phy(bp
, MII_PHYSID1
, &val
);
2372 bp
->phy_id
= val
<< 16;
2373 bnx2_read_phy(bp
, MII_PHYSID2
, &val
);
2374 bp
->phy_id
|= val
& 0xffff;
2376 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
2377 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
2378 rc
= bnx2_init_5706s_phy(bp
, reset_phy
);
2379 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
2380 rc
= bnx2_init_5708s_phy(bp
, reset_phy
);
2381 else if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2382 rc
= bnx2_init_5709s_phy(bp
, reset_phy
);
2385 rc
= bnx2_init_copper_phy(bp
, reset_phy
);
2390 rc
= bnx2_setup_phy(bp
, bp
->phy_port
);
2396 bnx2_set_mac_loopback(struct bnx2
*bp
)
2400 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
2401 mac_mode
&= ~BNX2_EMAC_MODE_PORT
;
2402 mac_mode
|= BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
;
2403 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
2408 static int bnx2_test_link(struct bnx2
*);
2411 bnx2_set_phy_loopback(struct bnx2
*bp
)
2416 spin_lock_bh(&bp
->phy_lock
);
2417 rc
= bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
2419 spin_unlock_bh(&bp
->phy_lock
);
2423 for (i
= 0; i
< 10; i
++) {
2424 if (bnx2_test_link(bp
) == 0)
2429 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
2430 mac_mode
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
2431 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
2432 BNX2_EMAC_MODE_25G_MODE
);
2434 mac_mode
|= BNX2_EMAC_MODE_PORT_GMII
;
2435 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
2441 bnx2_fw_sync(struct bnx2
*bp
, u32 msg_data
, int ack
, int silent
)
2447 msg_data
|= bp
->fw_wr_seq
;
2449 bnx2_shmem_wr(bp
, BNX2_DRV_MB
, msg_data
);
2454 /* wait for an acknowledgement. */
2455 for (i
= 0; i
< (BNX2_FW_ACK_TIME_OUT_MS
/ 10); i
++) {
2458 val
= bnx2_shmem_rd(bp
, BNX2_FW_MB
);
2460 if ((val
& BNX2_FW_MSG_ACK
) == (msg_data
& BNX2_DRV_MSG_SEQ
))
2463 if ((msg_data
& BNX2_DRV_MSG_DATA
) == BNX2_DRV_MSG_DATA_WAIT0
)
2466 /* If we timed out, inform the firmware that this is the case. */
2467 if ((val
& BNX2_FW_MSG_ACK
) != (msg_data
& BNX2_DRV_MSG_SEQ
)) {
2469 printk(KERN_ERR PFX
"fw sync timeout, reset code = "
2472 msg_data
&= ~BNX2_DRV_MSG_CODE
;
2473 msg_data
|= BNX2_DRV_MSG_CODE_FW_TIMEOUT
;
2475 bnx2_shmem_wr(bp
, BNX2_DRV_MB
, msg_data
);
2480 if ((val
& BNX2_FW_MSG_STATUS_MASK
) != BNX2_FW_MSG_STATUS_OK
)
2487 bnx2_init_5709_context(struct bnx2
*bp
)
2492 val
= BNX2_CTX_COMMAND_ENABLED
| BNX2_CTX_COMMAND_MEM_INIT
| (1 << 12);
2493 val
|= (BCM_PAGE_BITS
- 8) << 16;
2494 REG_WR(bp
, BNX2_CTX_COMMAND
, val
);
2495 for (i
= 0; i
< 10; i
++) {
2496 val
= REG_RD(bp
, BNX2_CTX_COMMAND
);
2497 if (!(val
& BNX2_CTX_COMMAND_MEM_INIT
))
2501 if (val
& BNX2_CTX_COMMAND_MEM_INIT
)
2504 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
2508 memset(bp
->ctx_blk
[i
], 0, BCM_PAGE_SIZE
);
2512 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
2513 (bp
->ctx_blk_mapping
[i
] & 0xffffffff) |
2514 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
);
2515 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
2516 (u64
) bp
->ctx_blk_mapping
[i
] >> 32);
2517 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, i
|
2518 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
2519 for (j
= 0; j
< 10; j
++) {
2521 val
= REG_RD(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
2522 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
2526 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
2535 bnx2_init_context(struct bnx2
*bp
)
2541 u32 vcid_addr
, pcid_addr
, offset
;
2546 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
2549 vcid_addr
= GET_PCID_ADDR(vcid
);
2551 new_vcid
= 0x60 + (vcid
& 0xf0) + (vcid
& 0x7);
2556 pcid_addr
= GET_PCID_ADDR(new_vcid
);
2559 vcid_addr
= GET_CID_ADDR(vcid
);
2560 pcid_addr
= vcid_addr
;
2563 for (i
= 0; i
< (CTX_SIZE
/ PHY_CTX_SIZE
); i
++) {
2564 vcid_addr
+= (i
<< PHY_CTX_SHIFT
);
2565 pcid_addr
+= (i
<< PHY_CTX_SHIFT
);
2567 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, vcid_addr
);
2568 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
2570 /* Zero out the context. */
2571 for (offset
= 0; offset
< PHY_CTX_SIZE
; offset
+= 4)
2572 bnx2_ctx_wr(bp
, vcid_addr
, offset
, 0);
2578 bnx2_alloc_bad_rbuf(struct bnx2
*bp
)
2584 good_mbuf
= kmalloc(512 * sizeof(u16
), GFP_KERNEL
);
2585 if (good_mbuf
== NULL
) {
2586 printk(KERN_ERR PFX
"Failed to allocate memory in "
2587 "bnx2_alloc_bad_rbuf\n");
2591 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
2592 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE
);
2596 /* Allocate a bunch of mbufs and save the good ones in an array. */
2597 val
= bnx2_reg_rd_ind(bp
, BNX2_RBUF_STATUS1
);
2598 while (val
& BNX2_RBUF_STATUS1_FREE_COUNT
) {
2599 bnx2_reg_wr_ind(bp
, BNX2_RBUF_COMMAND
,
2600 BNX2_RBUF_COMMAND_ALLOC_REQ
);
2602 val
= bnx2_reg_rd_ind(bp
, BNX2_RBUF_FW_BUF_ALLOC
);
2604 val
&= BNX2_RBUF_FW_BUF_ALLOC_VALUE
;
2606 /* The addresses with Bit 9 set are bad memory blocks. */
2607 if (!(val
& (1 << 9))) {
2608 good_mbuf
[good_mbuf_cnt
] = (u16
) val
;
2612 val
= bnx2_reg_rd_ind(bp
, BNX2_RBUF_STATUS1
);
2615 /* Free the good ones back to the mbuf pool thus discarding
2616 * all the bad ones. */
2617 while (good_mbuf_cnt
) {
2620 val
= good_mbuf
[good_mbuf_cnt
];
2621 val
= (val
<< 9) | val
| 1;
2623 bnx2_reg_wr_ind(bp
, BNX2_RBUF_FW_BUF_FREE
, val
);
2630 bnx2_set_mac_addr(struct bnx2
*bp
, u8
*mac_addr
, u32 pos
)
2634 val
= (mac_addr
[0] << 8) | mac_addr
[1];
2636 REG_WR(bp
, BNX2_EMAC_MAC_MATCH0
+ (pos
* 8), val
);
2638 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
2639 (mac_addr
[4] << 8) | mac_addr
[5];
2641 REG_WR(bp
, BNX2_EMAC_MAC_MATCH1
+ (pos
* 8), val
);
2645 bnx2_alloc_rx_page(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, u16 index
)
2648 struct sw_pg
*rx_pg
= &rxr
->rx_pg_ring
[index
];
2649 struct rx_bd
*rxbd
=
2650 &rxr
->rx_pg_desc_ring
[RX_RING(index
)][RX_IDX(index
)];
2651 struct page
*page
= alloc_page(GFP_ATOMIC
);
2655 mapping
= pci_map_page(bp
->pdev
, page
, 0, PAGE_SIZE
,
2656 PCI_DMA_FROMDEVICE
);
2657 if (pci_dma_mapping_error(bp
->pdev
, mapping
)) {
2663 pci_unmap_addr_set(rx_pg
, mapping
, mapping
);
2664 rxbd
->rx_bd_haddr_hi
= (u64
) mapping
>> 32;
2665 rxbd
->rx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
2670 bnx2_free_rx_page(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, u16 index
)
2672 struct sw_pg
*rx_pg
= &rxr
->rx_pg_ring
[index
];
2673 struct page
*page
= rx_pg
->page
;
2678 pci_unmap_page(bp
->pdev
, pci_unmap_addr(rx_pg
, mapping
), PAGE_SIZE
,
2679 PCI_DMA_FROMDEVICE
);
2686 bnx2_alloc_rx_skb(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, u16 index
)
2688 struct sk_buff
*skb
;
2689 struct sw_bd
*rx_buf
= &rxr
->rx_buf_ring
[index
];
2691 struct rx_bd
*rxbd
= &rxr
->rx_desc_ring
[RX_RING(index
)][RX_IDX(index
)];
2692 unsigned long align
;
2694 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
2699 if (unlikely((align
= (unsigned long) skb
->data
& (BNX2_RX_ALIGN
- 1))))
2700 skb_reserve(skb
, BNX2_RX_ALIGN
- align
);
2702 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_use_size
,
2703 PCI_DMA_FROMDEVICE
);
2704 if (pci_dma_mapping_error(bp
->pdev
, mapping
)) {
2710 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
2712 rxbd
->rx_bd_haddr_hi
= (u64
) mapping
>> 32;
2713 rxbd
->rx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
2715 rxr
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
2721 bnx2_phy_event_is_set(struct bnx2
*bp
, struct bnx2_napi
*bnapi
, u32 event
)
2723 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
2724 u32 new_link_state
, old_link_state
;
2727 new_link_state
= sblk
->status_attn_bits
& event
;
2728 old_link_state
= sblk
->status_attn_bits_ack
& event
;
2729 if (new_link_state
!= old_link_state
) {
2731 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_SET_CMD
, event
);
2733 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD
, event
);
2741 bnx2_phy_int(struct bnx2
*bp
, struct bnx2_napi
*bnapi
)
2743 spin_lock(&bp
->phy_lock
);
2745 if (bnx2_phy_event_is_set(bp
, bnapi
, STATUS_ATTN_BITS_LINK_STATE
))
2747 if (bnx2_phy_event_is_set(bp
, bnapi
, STATUS_ATTN_BITS_TIMER_ABORT
))
2748 bnx2_set_remote_link(bp
);
2750 spin_unlock(&bp
->phy_lock
);
2755 bnx2_get_hw_tx_cons(struct bnx2_napi
*bnapi
)
2759 /* Tell compiler that status block fields can change. */
2761 cons
= *bnapi
->hw_tx_cons_ptr
;
2763 if (unlikely((cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
))
2769 bnx2_tx_int(struct bnx2
*bp
, struct bnx2_napi
*bnapi
, int budget
)
2771 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
2772 u16 hw_cons
, sw_cons
, sw_ring_cons
;
2773 int tx_pkt
= 0, index
;
2774 struct netdev_queue
*txq
;
2776 index
= (bnapi
- bp
->bnx2_napi
);
2777 txq
= netdev_get_tx_queue(bp
->dev
, index
);
2779 hw_cons
= bnx2_get_hw_tx_cons(bnapi
);
2780 sw_cons
= txr
->tx_cons
;
2782 while (sw_cons
!= hw_cons
) {
2783 struct sw_tx_bd
*tx_buf
;
2784 struct sk_buff
*skb
;
2787 sw_ring_cons
= TX_RING_IDX(sw_cons
);
2789 tx_buf
= &txr
->tx_buf_ring
[sw_ring_cons
];
2792 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2793 prefetch(&skb
->end
);
2795 /* partial BD completions possible with TSO packets */
2796 if (tx_buf
->is_gso
) {
2797 u16 last_idx
, last_ring_idx
;
2799 last_idx
= sw_cons
+ tx_buf
->nr_frags
+ 1;
2800 last_ring_idx
= sw_ring_cons
+ tx_buf
->nr_frags
+ 1;
2801 if (unlikely(last_ring_idx
>= MAX_TX_DESC_CNT
)) {
2804 if (((s16
) ((s16
) last_idx
- (s16
) hw_cons
)) > 0) {
2809 skb_dma_unmap(&bp
->pdev
->dev
, skb
, DMA_TO_DEVICE
);
2812 last
= tx_buf
->nr_frags
;
2814 for (i
= 0; i
< last
; i
++) {
2815 sw_cons
= NEXT_TX_BD(sw_cons
);
2818 sw_cons
= NEXT_TX_BD(sw_cons
);
2822 if (tx_pkt
== budget
)
2825 if (hw_cons
== sw_cons
)
2826 hw_cons
= bnx2_get_hw_tx_cons(bnapi
);
2829 txr
->hw_tx_cons
= hw_cons
;
2830 txr
->tx_cons
= sw_cons
;
2832 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2833 * before checking for netif_tx_queue_stopped(). Without the
2834 * memory barrier, there is a small possibility that bnx2_start_xmit()
2835 * will miss it and cause the queue to be stopped forever.
2839 if (unlikely(netif_tx_queue_stopped(txq
)) &&
2840 (bnx2_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)) {
2841 __netif_tx_lock(txq
, smp_processor_id());
2842 if ((netif_tx_queue_stopped(txq
)) &&
2843 (bnx2_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
))
2844 netif_tx_wake_queue(txq
);
2845 __netif_tx_unlock(txq
);
2852 bnx2_reuse_rx_skb_pages(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
,
2853 struct sk_buff
*skb
, int count
)
2855 struct sw_pg
*cons_rx_pg
, *prod_rx_pg
;
2856 struct rx_bd
*cons_bd
, *prod_bd
;
2859 u16 cons
= rxr
->rx_pg_cons
;
2861 cons_rx_pg
= &rxr
->rx_pg_ring
[cons
];
2863 /* The caller was unable to allocate a new page to replace the
2864 * last one in the frags array, so we need to recycle that page
2865 * and then free the skb.
2869 struct skb_shared_info
*shinfo
;
2871 shinfo
= skb_shinfo(skb
);
2873 page
= shinfo
->frags
[shinfo
->nr_frags
].page
;
2874 shinfo
->frags
[shinfo
->nr_frags
].page
= NULL
;
2876 cons_rx_pg
->page
= page
;
2880 hw_prod
= rxr
->rx_pg_prod
;
2882 for (i
= 0; i
< count
; i
++) {
2883 prod
= RX_PG_RING_IDX(hw_prod
);
2885 prod_rx_pg
= &rxr
->rx_pg_ring
[prod
];
2886 cons_rx_pg
= &rxr
->rx_pg_ring
[cons
];
2887 cons_bd
= &rxr
->rx_pg_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
2888 prod_bd
= &rxr
->rx_pg_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
2891 prod_rx_pg
->page
= cons_rx_pg
->page
;
2892 cons_rx_pg
->page
= NULL
;
2893 pci_unmap_addr_set(prod_rx_pg
, mapping
,
2894 pci_unmap_addr(cons_rx_pg
, mapping
));
2896 prod_bd
->rx_bd_haddr_hi
= cons_bd
->rx_bd_haddr_hi
;
2897 prod_bd
->rx_bd_haddr_lo
= cons_bd
->rx_bd_haddr_lo
;
2900 cons
= RX_PG_RING_IDX(NEXT_RX_BD(cons
));
2901 hw_prod
= NEXT_RX_BD(hw_prod
);
2903 rxr
->rx_pg_prod
= hw_prod
;
2904 rxr
->rx_pg_cons
= cons
;
2908 bnx2_reuse_rx_skb(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
,
2909 struct sk_buff
*skb
, u16 cons
, u16 prod
)
2911 struct sw_bd
*cons_rx_buf
, *prod_rx_buf
;
2912 struct rx_bd
*cons_bd
, *prod_bd
;
2914 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
2915 prod_rx_buf
= &rxr
->rx_buf_ring
[prod
];
2917 pci_dma_sync_single_for_device(bp
->pdev
,
2918 pci_unmap_addr(cons_rx_buf
, mapping
),
2919 BNX2_RX_OFFSET
+ BNX2_RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
2921 rxr
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
2923 prod_rx_buf
->skb
= skb
;
2928 pci_unmap_addr_set(prod_rx_buf
, mapping
,
2929 pci_unmap_addr(cons_rx_buf
, mapping
));
2931 cons_bd
= &rxr
->rx_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
2932 prod_bd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
2933 prod_bd
->rx_bd_haddr_hi
= cons_bd
->rx_bd_haddr_hi
;
2934 prod_bd
->rx_bd_haddr_lo
= cons_bd
->rx_bd_haddr_lo
;
2938 bnx2_rx_skb(struct bnx2
*bp
, struct bnx2_rx_ring_info
*rxr
, struct sk_buff
*skb
,
2939 unsigned int len
, unsigned int hdr_len
, dma_addr_t dma_addr
,
2943 u16 prod
= ring_idx
& 0xffff;
2945 err
= bnx2_alloc_rx_skb(bp
, rxr
, prod
);
2946 if (unlikely(err
)) {
2947 bnx2_reuse_rx_skb(bp
, rxr
, skb
, (u16
) (ring_idx
>> 16), prod
);
2949 unsigned int raw_len
= len
+ 4;
2950 int pages
= PAGE_ALIGN(raw_len
- hdr_len
) >> PAGE_SHIFT
;
2952 bnx2_reuse_rx_skb_pages(bp
, rxr
, NULL
, pages
);
2957 skb_reserve(skb
, BNX2_RX_OFFSET
);
2958 pci_unmap_single(bp
->pdev
, dma_addr
, bp
->rx_buf_use_size
,
2959 PCI_DMA_FROMDEVICE
);
2965 unsigned int i
, frag_len
, frag_size
, pages
;
2966 struct sw_pg
*rx_pg
;
2967 u16 pg_cons
= rxr
->rx_pg_cons
;
2968 u16 pg_prod
= rxr
->rx_pg_prod
;
2970 frag_size
= len
+ 4 - hdr_len
;
2971 pages
= PAGE_ALIGN(frag_size
) >> PAGE_SHIFT
;
2972 skb_put(skb
, hdr_len
);
2974 for (i
= 0; i
< pages
; i
++) {
2975 dma_addr_t mapping_old
;
2977 frag_len
= min(frag_size
, (unsigned int) PAGE_SIZE
);
2978 if (unlikely(frag_len
<= 4)) {
2979 unsigned int tail
= 4 - frag_len
;
2981 rxr
->rx_pg_cons
= pg_cons
;
2982 rxr
->rx_pg_prod
= pg_prod
;
2983 bnx2_reuse_rx_skb_pages(bp
, rxr
, NULL
,
2990 &skb_shinfo(skb
)->frags
[i
- 1];
2992 skb
->data_len
-= tail
;
2993 skb
->truesize
-= tail
;
2997 rx_pg
= &rxr
->rx_pg_ring
[pg_cons
];
2999 /* Don't unmap yet. If we're unable to allocate a new
3000 * page, we need to recycle the page and the DMA addr.
3002 mapping_old
= pci_unmap_addr(rx_pg
, mapping
);
3006 skb_fill_page_desc(skb
, i
, rx_pg
->page
, 0, frag_len
);
3009 err
= bnx2_alloc_rx_page(bp
, rxr
,
3010 RX_PG_RING_IDX(pg_prod
));
3011 if (unlikely(err
)) {
3012 rxr
->rx_pg_cons
= pg_cons
;
3013 rxr
->rx_pg_prod
= pg_prod
;
3014 bnx2_reuse_rx_skb_pages(bp
, rxr
, skb
,
3019 pci_unmap_page(bp
->pdev
, mapping_old
,
3020 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
3022 frag_size
-= frag_len
;
3023 skb
->data_len
+= frag_len
;
3024 skb
->truesize
+= frag_len
;
3025 skb
->len
+= frag_len
;
3027 pg_prod
= NEXT_RX_BD(pg_prod
);
3028 pg_cons
= RX_PG_RING_IDX(NEXT_RX_BD(pg_cons
));
3030 rxr
->rx_pg_prod
= pg_prod
;
3031 rxr
->rx_pg_cons
= pg_cons
;
3037 bnx2_get_hw_rx_cons(struct bnx2_napi
*bnapi
)
3041 /* Tell compiler that status block fields can change. */
3043 cons
= *bnapi
->hw_rx_cons_ptr
;
3045 if (unlikely((cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
))
3051 bnx2_rx_int(struct bnx2
*bp
, struct bnx2_napi
*bnapi
, int budget
)
3053 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
3054 u16 hw_cons
, sw_cons
, sw_ring_cons
, sw_prod
, sw_ring_prod
;
3055 struct l2_fhdr
*rx_hdr
;
3056 int rx_pkt
= 0, pg_ring_used
= 0;
3058 hw_cons
= bnx2_get_hw_rx_cons(bnapi
);
3059 sw_cons
= rxr
->rx_cons
;
3060 sw_prod
= rxr
->rx_prod
;
3062 /* Memory barrier necessary as speculative reads of the rx
3063 * buffer can be ahead of the index in the status block
3066 while (sw_cons
!= hw_cons
) {
3067 unsigned int len
, hdr_len
;
3069 struct sw_bd
*rx_buf
;
3070 struct sk_buff
*skb
;
3071 dma_addr_t dma_addr
;
3073 int hw_vlan __maybe_unused
= 0;
3075 sw_ring_cons
= RX_RING_IDX(sw_cons
);
3076 sw_ring_prod
= RX_RING_IDX(sw_prod
);
3078 rx_buf
= &rxr
->rx_buf_ring
[sw_ring_cons
];
3083 dma_addr
= pci_unmap_addr(rx_buf
, mapping
);
3085 pci_dma_sync_single_for_cpu(bp
->pdev
, dma_addr
,
3086 BNX2_RX_OFFSET
+ BNX2_RX_COPY_THRESH
,
3087 PCI_DMA_FROMDEVICE
);
3089 rx_hdr
= (struct l2_fhdr
*) skb
->data
;
3090 len
= rx_hdr
->l2_fhdr_pkt_len
;
3091 status
= rx_hdr
->l2_fhdr_status
;
3094 if (status
& L2_FHDR_STATUS_SPLIT
) {
3095 hdr_len
= rx_hdr
->l2_fhdr_ip_xsum
;
3097 } else if (len
> bp
->rx_jumbo_thresh
) {
3098 hdr_len
= bp
->rx_jumbo_thresh
;
3102 if (unlikely(status
& (L2_FHDR_ERRORS_BAD_CRC
|
3103 L2_FHDR_ERRORS_PHY_DECODE
|
3104 L2_FHDR_ERRORS_ALIGNMENT
|
3105 L2_FHDR_ERRORS_TOO_SHORT
|
3106 L2_FHDR_ERRORS_GIANT_FRAME
))) {
3108 bnx2_reuse_rx_skb(bp
, rxr
, skb
, sw_ring_cons
,
3113 pages
= PAGE_ALIGN(len
- hdr_len
) >> PAGE_SHIFT
;
3115 bnx2_reuse_rx_skb_pages(bp
, rxr
, NULL
, pages
);
3122 if (len
<= bp
->rx_copy_thresh
) {
3123 struct sk_buff
*new_skb
;
3125 new_skb
= netdev_alloc_skb(bp
->dev
, len
+ 6);
3126 if (new_skb
== NULL
) {
3127 bnx2_reuse_rx_skb(bp
, rxr
, skb
, sw_ring_cons
,
3133 skb_copy_from_linear_data_offset(skb
,
3135 new_skb
->data
, len
+ 6);
3136 skb_reserve(new_skb
, 6);
3137 skb_put(new_skb
, len
);
3139 bnx2_reuse_rx_skb(bp
, rxr
, skb
,
3140 sw_ring_cons
, sw_ring_prod
);
3143 } else if (unlikely(bnx2_rx_skb(bp
, rxr
, skb
, len
, hdr_len
,
3144 dma_addr
, (sw_ring_cons
<< 16) | sw_ring_prod
)))
3147 if ((status
& L2_FHDR_STATUS_L2_VLAN_TAG
) &&
3148 !(bp
->rx_mode
& BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
)) {
3149 vtag
= rx_hdr
->l2_fhdr_vlan_tag
;
3156 struct vlan_ethhdr
*ve
= (struct vlan_ethhdr
*)
3159 memmove(ve
, skb
->data
+ 4, ETH_ALEN
* 2);
3160 ve
->h_vlan_proto
= htons(ETH_P_8021Q
);
3161 ve
->h_vlan_TCI
= htons(vtag
);
3166 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
3168 if ((len
> (bp
->dev
->mtu
+ ETH_HLEN
)) &&
3169 (ntohs(skb
->protocol
) != 0x8100)) {
3176 skb
->ip_summed
= CHECKSUM_NONE
;
3178 (status
& (L2_FHDR_STATUS_TCP_SEGMENT
|
3179 L2_FHDR_STATUS_UDP_DATAGRAM
))) {
3181 if (likely((status
& (L2_FHDR_ERRORS_TCP_XSUM
|
3182 L2_FHDR_ERRORS_UDP_XSUM
)) == 0))
3183 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3186 skb_record_rx_queue(skb
, bnapi
- &bp
->bnx2_napi
[0]);
3190 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
, vtag
);
3193 netif_receive_skb(skb
);
3198 sw_cons
= NEXT_RX_BD(sw_cons
);
3199 sw_prod
= NEXT_RX_BD(sw_prod
);
3201 if ((rx_pkt
== budget
))
3204 /* Refresh hw_cons to see if there is new work */
3205 if (sw_cons
== hw_cons
) {
3206 hw_cons
= bnx2_get_hw_rx_cons(bnapi
);
3210 rxr
->rx_cons
= sw_cons
;
3211 rxr
->rx_prod
= sw_prod
;
3214 REG_WR16(bp
, rxr
->rx_pg_bidx_addr
, rxr
->rx_pg_prod
);
3216 REG_WR16(bp
, rxr
->rx_bidx_addr
, sw_prod
);
3218 REG_WR(bp
, rxr
->rx_bseq_addr
, rxr
->rx_prod_bseq
);
3226 /* MSI ISR - The only difference between this and the INTx ISR
3227 * is that the MSI interrupt is always serviced.
3230 bnx2_msi(int irq
, void *dev_instance
)
3232 struct bnx2_napi
*bnapi
= dev_instance
;
3233 struct bnx2
*bp
= bnapi
->bp
;
3235 prefetch(bnapi
->status_blk
.msi
);
3236 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3237 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
3238 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3240 /* Return here if interrupt is disabled. */
3241 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
3244 napi_schedule(&bnapi
->napi
);
3250 bnx2_msi_1shot(int irq
, void *dev_instance
)
3252 struct bnx2_napi
*bnapi
= dev_instance
;
3253 struct bnx2
*bp
= bnapi
->bp
;
3255 prefetch(bnapi
->status_blk
.msi
);
3257 /* Return here if interrupt is disabled. */
3258 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
3261 napi_schedule(&bnapi
->napi
);
3267 bnx2_interrupt(int irq
, void *dev_instance
)
3269 struct bnx2_napi
*bnapi
= dev_instance
;
3270 struct bnx2
*bp
= bnapi
->bp
;
3271 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3273 /* When using INTx, it is possible for the interrupt to arrive
3274 * at the CPU before the status block posted prior to the
3275 * interrupt. Reading a register will flush the status block.
3276 * When using MSI, the MSI message will always complete after
3277 * the status block write.
3279 if ((sblk
->status_idx
== bnapi
->last_status_idx
) &&
3280 (REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
) &
3281 BNX2_PCICFG_MISC_STATUS_INTA_VALUE
))
3284 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3285 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
3286 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3288 /* Read back to deassert IRQ immediately to avoid too many
3289 * spurious interrupts.
3291 REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
);
3293 /* Return here if interrupt is shared and is disabled. */
3294 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
3297 if (napi_schedule_prep(&bnapi
->napi
)) {
3298 bnapi
->last_status_idx
= sblk
->status_idx
;
3299 __napi_schedule(&bnapi
->napi
);
3306 bnx2_has_fast_work(struct bnx2_napi
*bnapi
)
3308 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
3309 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
3311 if ((bnx2_get_hw_rx_cons(bnapi
) != rxr
->rx_cons
) ||
3312 (bnx2_get_hw_tx_cons(bnapi
) != txr
->hw_tx_cons
))
3317 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3318 STATUS_ATTN_BITS_TIMER_ABORT)
3321 bnx2_has_work(struct bnx2_napi
*bnapi
)
3323 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3325 if (bnx2_has_fast_work(bnapi
))
3329 if (bnapi
->cnic_present
&& (bnapi
->cnic_tag
!= sblk
->status_idx
))
3333 if ((sblk
->status_attn_bits
& STATUS_ATTN_EVENTS
) !=
3334 (sblk
->status_attn_bits_ack
& STATUS_ATTN_EVENTS
))
3341 bnx2_chk_missed_msi(struct bnx2
*bp
)
3343 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0];
3346 if (bnx2_has_work(bnapi
)) {
3347 msi_ctrl
= REG_RD(bp
, BNX2_PCICFG_MSI_CONTROL
);
3348 if (!(msi_ctrl
& BNX2_PCICFG_MSI_CONTROL_ENABLE
))
3351 if (bnapi
->last_status_idx
== bp
->idle_chk_status_idx
) {
3352 REG_WR(bp
, BNX2_PCICFG_MSI_CONTROL
, msi_ctrl
&
3353 ~BNX2_PCICFG_MSI_CONTROL_ENABLE
);
3354 REG_WR(bp
, BNX2_PCICFG_MSI_CONTROL
, msi_ctrl
);
3355 bnx2_msi(bp
->irq_tbl
[0].vector
, bnapi
);
3359 bp
->idle_chk_status_idx
= bnapi
->last_status_idx
;
3363 static void bnx2_poll_cnic(struct bnx2
*bp
, struct bnx2_napi
*bnapi
)
3365 struct cnic_ops
*c_ops
;
3367 if (!bnapi
->cnic_present
)
3371 c_ops
= rcu_dereference(bp
->cnic_ops
);
3373 bnapi
->cnic_tag
= c_ops
->cnic_handler(bp
->cnic_data
,
3374 bnapi
->status_blk
.msi
);
3379 static void bnx2_poll_link(struct bnx2
*bp
, struct bnx2_napi
*bnapi
)
3381 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3382 u32 status_attn_bits
= sblk
->status_attn_bits
;
3383 u32 status_attn_bits_ack
= sblk
->status_attn_bits_ack
;
3385 if ((status_attn_bits
& STATUS_ATTN_EVENTS
) !=
3386 (status_attn_bits_ack
& STATUS_ATTN_EVENTS
)) {
3388 bnx2_phy_int(bp
, bnapi
);
3390 /* This is needed to take care of transient status
3391 * during link changes.
3393 REG_WR(bp
, BNX2_HC_COMMAND
,
3394 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
3395 REG_RD(bp
, BNX2_HC_COMMAND
);
3399 static int bnx2_poll_work(struct bnx2
*bp
, struct bnx2_napi
*bnapi
,
3400 int work_done
, int budget
)
3402 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
3403 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
3405 if (bnx2_get_hw_tx_cons(bnapi
) != txr
->hw_tx_cons
)
3406 bnx2_tx_int(bp
, bnapi
, 0);
3408 if (bnx2_get_hw_rx_cons(bnapi
) != rxr
->rx_cons
)
3409 work_done
+= bnx2_rx_int(bp
, bnapi
, budget
- work_done
);
3414 static int bnx2_poll_msix(struct napi_struct
*napi
, int budget
)
3416 struct bnx2_napi
*bnapi
= container_of(napi
, struct bnx2_napi
, napi
);
3417 struct bnx2
*bp
= bnapi
->bp
;
3419 struct status_block_msix
*sblk
= bnapi
->status_blk
.msix
;
3422 work_done
= bnx2_poll_work(bp
, bnapi
, work_done
, budget
);
3423 if (unlikely(work_done
>= budget
))
3426 bnapi
->last_status_idx
= sblk
->status_idx
;
3427 /* status idx must be read before checking for more work. */
3429 if (likely(!bnx2_has_fast_work(bnapi
))) {
3431 napi_complete(napi
);
3432 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, bnapi
->int_num
|
3433 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3434 bnapi
->last_status_idx
);
3441 static int bnx2_poll(struct napi_struct
*napi
, int budget
)
3443 struct bnx2_napi
*bnapi
= container_of(napi
, struct bnx2_napi
, napi
);
3444 struct bnx2
*bp
= bnapi
->bp
;
3446 struct status_block
*sblk
= bnapi
->status_blk
.msi
;
3449 bnx2_poll_link(bp
, bnapi
);
3451 work_done
= bnx2_poll_work(bp
, bnapi
, work_done
, budget
);
3454 bnx2_poll_cnic(bp
, bnapi
);
3457 /* bnapi->last_status_idx is used below to tell the hw how
3458 * much work has been processed, so we must read it before
3459 * checking for more work.
3461 bnapi
->last_status_idx
= sblk
->status_idx
;
3463 if (unlikely(work_done
>= budget
))
3467 if (likely(!bnx2_has_work(bnapi
))) {
3468 napi_complete(napi
);
3469 if (likely(bp
->flags
& BNX2_FLAG_USING_MSI_OR_MSIX
)) {
3470 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3471 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3472 bnapi
->last_status_idx
);
3475 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3476 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3477 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
|
3478 bnapi
->last_status_idx
);
3480 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
3481 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
3482 bnapi
->last_status_idx
);
3490 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3491 * from set_multicast.
3494 bnx2_set_rx_mode(struct net_device
*dev
)
3496 struct bnx2
*bp
= netdev_priv(dev
);
3497 u32 rx_mode
, sort_mode
;
3498 struct netdev_hw_addr
*ha
;
3501 if (!netif_running(dev
))
3504 spin_lock_bh(&bp
->phy_lock
);
3506 rx_mode
= bp
->rx_mode
& ~(BNX2_EMAC_RX_MODE_PROMISCUOUS
|
3507 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
);
3508 sort_mode
= 1 | BNX2_RPM_SORT_USER0_BC_EN
;
3510 if (!bp
->vlgrp
&& (bp
->flags
& BNX2_FLAG_CAN_KEEP_VLAN
))
3511 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
3513 if (bp
->flags
& BNX2_FLAG_CAN_KEEP_VLAN
)
3514 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
3516 if (dev
->flags
& IFF_PROMISC
) {
3517 /* Promiscuous mode. */
3518 rx_mode
|= BNX2_EMAC_RX_MODE_PROMISCUOUS
;
3519 sort_mode
|= BNX2_RPM_SORT_USER0_PROM_EN
|
3520 BNX2_RPM_SORT_USER0_PROM_VLAN
;
3522 else if (dev
->flags
& IFF_ALLMULTI
) {
3523 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
3524 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
3527 sort_mode
|= BNX2_RPM_SORT_USER0_MC_EN
;
3530 /* Accept one or more multicast(s). */
3531 struct dev_mc_list
*mclist
;
3532 u32 mc_filter
[NUM_MC_HASH_REGISTERS
];
3537 memset(mc_filter
, 0, 4 * NUM_MC_HASH_REGISTERS
);
3539 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
3540 i
++, mclist
= mclist
->next
) {
3542 crc
= ether_crc_le(ETH_ALEN
, mclist
->dmi_addr
);
3544 regidx
= (bit
& 0xe0) >> 5;
3546 mc_filter
[regidx
] |= (1 << bit
);
3549 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
3550 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
3554 sort_mode
|= BNX2_RPM_SORT_USER0_MC_HSH_EN
;
3557 if (dev
->uc
.count
> BNX2_MAX_UNICAST_ADDRESSES
) {
3558 rx_mode
|= BNX2_EMAC_RX_MODE_PROMISCUOUS
;
3559 sort_mode
|= BNX2_RPM_SORT_USER0_PROM_EN
|
3560 BNX2_RPM_SORT_USER0_PROM_VLAN
;
3561 } else if (!(dev
->flags
& IFF_PROMISC
)) {
3562 /* Add all entries into to the match filter list */
3564 list_for_each_entry(ha
, &dev
->uc
.list
, list
) {
3565 bnx2_set_mac_addr(bp
, ha
->addr
,
3566 i
+ BNX2_START_UNICAST_ADDRESS_INDEX
);
3568 (i
+ BNX2_START_UNICAST_ADDRESS_INDEX
));
3574 if (rx_mode
!= bp
->rx_mode
) {
3575 bp
->rx_mode
= rx_mode
;
3576 REG_WR(bp
, BNX2_EMAC_RX_MODE
, rx_mode
);
3579 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
3580 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
);
3581 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
| BNX2_RPM_SORT_USER0_ENA
);
3583 spin_unlock_bh(&bp
->phy_lock
);
3586 static int __devinit
3587 check_fw_section(const struct firmware
*fw
,
3588 const struct bnx2_fw_file_section
*section
,
3589 u32 alignment
, bool non_empty
)
3591 u32 offset
= be32_to_cpu(section
->offset
);
3592 u32 len
= be32_to_cpu(section
->len
);
3594 if ((offset
== 0 && len
!= 0) || offset
>= fw
->size
|| offset
& 3)
3596 if ((non_empty
&& len
== 0) || len
> fw
->size
- offset
||
3597 len
& (alignment
- 1))
3602 static int __devinit
3603 check_mips_fw_entry(const struct firmware
*fw
,
3604 const struct bnx2_mips_fw_file_entry
*entry
)
3606 if (check_fw_section(fw
, &entry
->text
, 4, true) ||
3607 check_fw_section(fw
, &entry
->data
, 4, false) ||
3608 check_fw_section(fw
, &entry
->rodata
, 4, false))
3613 static int __devinit
3614 bnx2_request_firmware(struct bnx2
*bp
)
3616 const char *mips_fw_file
, *rv2p_fw_file
;
3617 const struct bnx2_mips_fw_file
*mips_fw
;
3618 const struct bnx2_rv2p_fw_file
*rv2p_fw
;
3621 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
3622 mips_fw_file
= FW_MIPS_FILE_09
;
3623 rv2p_fw_file
= FW_RV2P_FILE_09
;
3625 mips_fw_file
= FW_MIPS_FILE_06
;
3626 rv2p_fw_file
= FW_RV2P_FILE_06
;
3629 rc
= request_firmware(&bp
->mips_firmware
, mips_fw_file
, &bp
->pdev
->dev
);
3631 printk(KERN_ERR PFX
"Can't load firmware file \"%s\"\n",
3636 rc
= request_firmware(&bp
->rv2p_firmware
, rv2p_fw_file
, &bp
->pdev
->dev
);
3638 printk(KERN_ERR PFX
"Can't load firmware file \"%s\"\n",
3642 mips_fw
= (const struct bnx2_mips_fw_file
*) bp
->mips_firmware
->data
;
3643 rv2p_fw
= (const struct bnx2_rv2p_fw_file
*) bp
->rv2p_firmware
->data
;
3644 if (bp
->mips_firmware
->size
< sizeof(*mips_fw
) ||
3645 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->com
) ||
3646 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->cp
) ||
3647 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->rxp
) ||
3648 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->tpat
) ||
3649 check_mips_fw_entry(bp
->mips_firmware
, &mips_fw
->txp
)) {
3650 printk(KERN_ERR PFX
"Firmware file \"%s\" is invalid\n",
3654 if (bp
->rv2p_firmware
->size
< sizeof(*rv2p_fw
) ||
3655 check_fw_section(bp
->rv2p_firmware
, &rv2p_fw
->proc1
.rv2p
, 8, true) ||
3656 check_fw_section(bp
->rv2p_firmware
, &rv2p_fw
->proc2
.rv2p
, 8, true)) {
3657 printk(KERN_ERR PFX
"Firmware file \"%s\" is invalid\n",
3666 rv2p_fw_fixup(u32 rv2p_proc
, int idx
, u32 loc
, u32 rv2p_code
)
3669 case RV2P_P1_FIXUP_PAGE_SIZE_IDX
:
3670 rv2p_code
&= ~RV2P_BD_PAGE_SIZE_MSK
;
3671 rv2p_code
|= RV2P_BD_PAGE_SIZE
;
3678 load_rv2p_fw(struct bnx2
*bp
, u32 rv2p_proc
,
3679 const struct bnx2_rv2p_fw_file_entry
*fw_entry
)
3681 u32 rv2p_code_len
, file_offset
;
3686 rv2p_code_len
= be32_to_cpu(fw_entry
->rv2p
.len
);
3687 file_offset
= be32_to_cpu(fw_entry
->rv2p
.offset
);
3689 rv2p_code
= (__be32
*)(bp
->rv2p_firmware
->data
+ file_offset
);
3691 if (rv2p_proc
== RV2P_PROC1
) {
3692 cmd
= BNX2_RV2P_PROC1_ADDR_CMD_RDWR
;
3693 addr
= BNX2_RV2P_PROC1_ADDR_CMD
;
3695 cmd
= BNX2_RV2P_PROC2_ADDR_CMD_RDWR
;
3696 addr
= BNX2_RV2P_PROC2_ADDR_CMD
;
3699 for (i
= 0; i
< rv2p_code_len
; i
+= 8) {
3700 REG_WR(bp
, BNX2_RV2P_INSTR_HIGH
, be32_to_cpu(*rv2p_code
));
3702 REG_WR(bp
, BNX2_RV2P_INSTR_LOW
, be32_to_cpu(*rv2p_code
));
3705 val
= (i
/ 8) | cmd
;
3706 REG_WR(bp
, addr
, val
);
3709 rv2p_code
= (__be32
*)(bp
->rv2p_firmware
->data
+ file_offset
);
3710 for (i
= 0; i
< 8; i
++) {
3713 loc
= be32_to_cpu(fw_entry
->fixup
[i
]);
3714 if (loc
&& ((loc
* 4) < rv2p_code_len
)) {
3715 code
= be32_to_cpu(*(rv2p_code
+ loc
- 1));
3716 REG_WR(bp
, BNX2_RV2P_INSTR_HIGH
, code
);
3717 code
= be32_to_cpu(*(rv2p_code
+ loc
));
3718 code
= rv2p_fw_fixup(rv2p_proc
, i
, loc
, code
);
3719 REG_WR(bp
, BNX2_RV2P_INSTR_LOW
, code
);
3721 val
= (loc
/ 2) | cmd
;
3722 REG_WR(bp
, addr
, val
);
3726 /* Reset the processor, un-stall is done later. */
3727 if (rv2p_proc
== RV2P_PROC1
) {
3728 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC1_RESET
);
3731 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC2_RESET
);
3738 load_cpu_fw(struct bnx2
*bp
, const struct cpu_reg
*cpu_reg
,
3739 const struct bnx2_mips_fw_file_entry
*fw_entry
)
3741 u32 addr
, len
, file_offset
;
3747 val
= bnx2_reg_rd_ind(bp
, cpu_reg
->mode
);
3748 val
|= cpu_reg
->mode_value_halt
;
3749 bnx2_reg_wr_ind(bp
, cpu_reg
->mode
, val
);
3750 bnx2_reg_wr_ind(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
3752 /* Load the Text area. */
3753 addr
= be32_to_cpu(fw_entry
->text
.addr
);
3754 len
= be32_to_cpu(fw_entry
->text
.len
);
3755 file_offset
= be32_to_cpu(fw_entry
->text
.offset
);
3756 data
= (__be32
*)(bp
->mips_firmware
->data
+ file_offset
);
3758 offset
= cpu_reg
->spad_base
+ (addr
- cpu_reg
->mips_view_base
);
3762 for (j
= 0; j
< (len
/ 4); j
++, offset
+= 4)
3763 bnx2_reg_wr_ind(bp
, offset
, be32_to_cpu(data
[j
]));
3766 /* Load the Data area. */
3767 addr
= be32_to_cpu(fw_entry
->data
.addr
);
3768 len
= be32_to_cpu(fw_entry
->data
.len
);
3769 file_offset
= be32_to_cpu(fw_entry
->data
.offset
);
3770 data
= (__be32
*)(bp
->mips_firmware
->data
+ file_offset
);
3772 offset
= cpu_reg
->spad_base
+ (addr
- cpu_reg
->mips_view_base
);
3776 for (j
= 0; j
< (len
/ 4); j
++, offset
+= 4)
3777 bnx2_reg_wr_ind(bp
, offset
, be32_to_cpu(data
[j
]));
3780 /* Load the Read-Only area. */
3781 addr
= be32_to_cpu(fw_entry
->rodata
.addr
);
3782 len
= be32_to_cpu(fw_entry
->rodata
.len
);
3783 file_offset
= be32_to_cpu(fw_entry
->rodata
.offset
);
3784 data
= (__be32
*)(bp
->mips_firmware
->data
+ file_offset
);
3786 offset
= cpu_reg
->spad_base
+ (addr
- cpu_reg
->mips_view_base
);
3790 for (j
= 0; j
< (len
/ 4); j
++, offset
+= 4)
3791 bnx2_reg_wr_ind(bp
, offset
, be32_to_cpu(data
[j
]));
3794 /* Clear the pre-fetch instruction. */
3795 bnx2_reg_wr_ind(bp
, cpu_reg
->inst
, 0);
3797 val
= be32_to_cpu(fw_entry
->start_addr
);
3798 bnx2_reg_wr_ind(bp
, cpu_reg
->pc
, val
);
3800 /* Start the CPU. */
3801 val
= bnx2_reg_rd_ind(bp
, cpu_reg
->mode
);
3802 val
&= ~cpu_reg
->mode_value_halt
;
3803 bnx2_reg_wr_ind(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
3804 bnx2_reg_wr_ind(bp
, cpu_reg
->mode
, val
);
3810 bnx2_init_cpus(struct bnx2
*bp
)
3812 const struct bnx2_mips_fw_file
*mips_fw
=
3813 (const struct bnx2_mips_fw_file
*) bp
->mips_firmware
->data
;
3814 const struct bnx2_rv2p_fw_file
*rv2p_fw
=
3815 (const struct bnx2_rv2p_fw_file
*) bp
->rv2p_firmware
->data
;
3818 /* Initialize the RV2P processor. */
3819 load_rv2p_fw(bp
, RV2P_PROC1
, &rv2p_fw
->proc1
);
3820 load_rv2p_fw(bp
, RV2P_PROC2
, &rv2p_fw
->proc2
);
3822 /* Initialize the RX Processor. */
3823 rc
= load_cpu_fw(bp
, &cpu_reg_rxp
, &mips_fw
->rxp
);
3827 /* Initialize the TX Processor. */
3828 rc
= load_cpu_fw(bp
, &cpu_reg_txp
, &mips_fw
->txp
);
3832 /* Initialize the TX Patch-up Processor. */
3833 rc
= load_cpu_fw(bp
, &cpu_reg_tpat
, &mips_fw
->tpat
);
3837 /* Initialize the Completion Processor. */
3838 rc
= load_cpu_fw(bp
, &cpu_reg_com
, &mips_fw
->com
);
3842 /* Initialize the Command Processor. */
3843 rc
= load_cpu_fw(bp
, &cpu_reg_cp
, &mips_fw
->cp
);
3850 bnx2_set_power_state(struct bnx2
*bp
, pci_power_t state
)
3854 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
3860 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
3861 (pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
3862 PCI_PM_CTRL_PME_STATUS
);
3864 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
3865 /* delay required during transition out of D3hot */
3868 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
3869 val
|= BNX2_EMAC_MODE_MPKT_RCVD
| BNX2_EMAC_MODE_ACPI_RCVD
;
3870 val
&= ~BNX2_EMAC_MODE_MPKT
;
3871 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
3873 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
3874 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
3875 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
3886 autoneg
= bp
->autoneg
;
3887 advertising
= bp
->advertising
;
3889 if (bp
->phy_port
== PORT_TP
) {
3890 bp
->autoneg
= AUTONEG_SPEED
;
3891 bp
->advertising
= ADVERTISED_10baseT_Half
|
3892 ADVERTISED_10baseT_Full
|
3893 ADVERTISED_100baseT_Half
|
3894 ADVERTISED_100baseT_Full
|
3898 spin_lock_bh(&bp
->phy_lock
);
3899 bnx2_setup_phy(bp
, bp
->phy_port
);
3900 spin_unlock_bh(&bp
->phy_lock
);
3902 bp
->autoneg
= autoneg
;
3903 bp
->advertising
= advertising
;
3905 bnx2_set_mac_addr(bp
, bp
->dev
->dev_addr
, 0);
3907 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
3909 /* Enable port mode. */
3910 val
&= ~BNX2_EMAC_MODE_PORT
;
3911 val
|= BNX2_EMAC_MODE_MPKT_RCVD
|
3912 BNX2_EMAC_MODE_ACPI_RCVD
|
3913 BNX2_EMAC_MODE_MPKT
;
3914 if (bp
->phy_port
== PORT_TP
)
3915 val
|= BNX2_EMAC_MODE_PORT_MII
;
3917 val
|= BNX2_EMAC_MODE_PORT_GMII
;
3918 if (bp
->line_speed
== SPEED_2500
)
3919 val
|= BNX2_EMAC_MODE_25G_MODE
;
3922 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
3924 /* receive all multicast */
3925 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
3926 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
3929 REG_WR(bp
, BNX2_EMAC_RX_MODE
,
3930 BNX2_EMAC_RX_MODE_SORT_MODE
);
3932 val
= 1 | BNX2_RPM_SORT_USER0_BC_EN
|
3933 BNX2_RPM_SORT_USER0_MC_EN
;
3934 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
3935 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
);
3936 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
|
3937 BNX2_RPM_SORT_USER0_ENA
);
3939 /* Need to enable EMAC and RPM for WOL. */
3940 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
3941 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE
|
3942 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE
|
3943 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE
);
3945 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
3946 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
3947 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
3949 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
3952 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
3955 if (!(bp
->flags
& BNX2_FLAG_NO_WOL
))
3956 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT3
| wol_msg
,
3959 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3960 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
3961 (CHIP_ID(bp
) == CHIP_ID_5706_A1
)) {
3970 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
3972 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
3975 /* No more memory access after this point until
3976 * device is brought back to D0.
3988 bnx2_acquire_nvram_lock(struct bnx2
*bp
)
3993 /* Request access to the flash interface. */
3994 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_SET2
);
3995 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
3996 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
3997 if (val
& BNX2_NVM_SW_ARB_ARB_ARB2
)
4003 if (j
>= NVRAM_TIMEOUT_COUNT
)
4010 bnx2_release_nvram_lock(struct bnx2
*bp
)
4015 /* Relinquish nvram interface. */
4016 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_CLR2
);
4018 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4019 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
4020 if (!(val
& BNX2_NVM_SW_ARB_ARB_ARB2
))
4026 if (j
>= NVRAM_TIMEOUT_COUNT
)
4034 bnx2_enable_nvram_write(struct bnx2
*bp
)
4038 val
= REG_RD(bp
, BNX2_MISC_CFG
);
4039 REG_WR(bp
, BNX2_MISC_CFG
, val
| BNX2_MISC_CFG_NVM_WR_EN_PCI
);
4041 if (bp
->flash_info
->flags
& BNX2_NV_WREN
) {
4044 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4045 REG_WR(bp
, BNX2_NVM_COMMAND
,
4046 BNX2_NVM_COMMAND_WREN
| BNX2_NVM_COMMAND_DOIT
);
4048 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4051 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
4052 if (val
& BNX2_NVM_COMMAND_DONE
)
4056 if (j
>= NVRAM_TIMEOUT_COUNT
)
4063 bnx2_disable_nvram_write(struct bnx2
*bp
)
4067 val
= REG_RD(bp
, BNX2_MISC_CFG
);
4068 REG_WR(bp
, BNX2_MISC_CFG
, val
& ~BNX2_MISC_CFG_NVM_WR_EN
);
4073 bnx2_enable_nvram_access(struct bnx2
*bp
)
4077 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
4078 /* Enable both bits, even on read. */
4079 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
4080 val
| BNX2_NVM_ACCESS_ENABLE_EN
| BNX2_NVM_ACCESS_ENABLE_WR_EN
);
4084 bnx2_disable_nvram_access(struct bnx2
*bp
)
4088 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
4089 /* Disable both bits, even after read. */
4090 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
4091 val
& ~(BNX2_NVM_ACCESS_ENABLE_EN
|
4092 BNX2_NVM_ACCESS_ENABLE_WR_EN
));
4096 bnx2_nvram_erase_page(struct bnx2
*bp
, u32 offset
)
4101 if (bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)
4102 /* Buffered flash, no erase needed */
4105 /* Build an erase command */
4106 cmd
= BNX2_NVM_COMMAND_ERASE
| BNX2_NVM_COMMAND_WR
|
4107 BNX2_NVM_COMMAND_DOIT
;
4109 /* Need to clear DONE bit separately. */
4110 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4112 /* Address of the NVRAM to read from. */
4113 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
4115 /* Issue an erase command. */
4116 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
4118 /* Wait for completion. */
4119 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4124 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
4125 if (val
& BNX2_NVM_COMMAND_DONE
)
4129 if (j
>= NVRAM_TIMEOUT_COUNT
)
4136 bnx2_nvram_read_dword(struct bnx2
*bp
, u32 offset
, u8
*ret_val
, u32 cmd_flags
)
4141 /* Build the command word. */
4142 cmd
= BNX2_NVM_COMMAND_DOIT
| cmd_flags
;
4144 /* Calculate an offset of a buffered flash, not needed for 5709. */
4145 if (bp
->flash_info
->flags
& BNX2_NV_TRANSLATE
) {
4146 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
4147 bp
->flash_info
->page_bits
) +
4148 (offset
% bp
->flash_info
->page_size
);
4151 /* Need to clear DONE bit separately. */
4152 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4154 /* Address of the NVRAM to read from. */
4155 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
4157 /* Issue a read command. */
4158 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
4160 /* Wait for completion. */
4161 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4166 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
4167 if (val
& BNX2_NVM_COMMAND_DONE
) {
4168 __be32 v
= cpu_to_be32(REG_RD(bp
, BNX2_NVM_READ
));
4169 memcpy(ret_val
, &v
, 4);
4173 if (j
>= NVRAM_TIMEOUT_COUNT
)
4181 bnx2_nvram_write_dword(struct bnx2
*bp
, u32 offset
, u8
*val
, u32 cmd_flags
)
4187 /* Build the command word. */
4188 cmd
= BNX2_NVM_COMMAND_DOIT
| BNX2_NVM_COMMAND_WR
| cmd_flags
;
4190 /* Calculate an offset of a buffered flash, not needed for 5709. */
4191 if (bp
->flash_info
->flags
& BNX2_NV_TRANSLATE
) {
4192 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
4193 bp
->flash_info
->page_bits
) +
4194 (offset
% bp
->flash_info
->page_size
);
4197 /* Need to clear DONE bit separately. */
4198 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
4200 memcpy(&val32
, val
, 4);
4202 /* Write the data. */
4203 REG_WR(bp
, BNX2_NVM_WRITE
, be32_to_cpu(val32
));
4205 /* Address of the NVRAM to write to. */
4206 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
4208 /* Issue the write command. */
4209 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
4211 /* Wait for completion. */
4212 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
4215 if (REG_RD(bp
, BNX2_NVM_COMMAND
) & BNX2_NVM_COMMAND_DONE
)
4218 if (j
>= NVRAM_TIMEOUT_COUNT
)
4225 bnx2_init_nvram(struct bnx2
*bp
)
4228 int j
, entry_count
, rc
= 0;
4229 struct flash_spec
*flash
;
4231 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4232 bp
->flash_info
= &flash_5709
;
4233 goto get_flash_size
;
4236 /* Determine the selected interface. */
4237 val
= REG_RD(bp
, BNX2_NVM_CFG1
);
4239 entry_count
= ARRAY_SIZE(flash_table
);
4241 if (val
& 0x40000000) {
4243 /* Flash interface has been reconfigured */
4244 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
4246 if ((val
& FLASH_BACKUP_STRAP_MASK
) ==
4247 (flash
->config1
& FLASH_BACKUP_STRAP_MASK
)) {
4248 bp
->flash_info
= flash
;
4255 /* Not yet been reconfigured */
4257 if (val
& (1 << 23))
4258 mask
= FLASH_BACKUP_STRAP_MASK
;
4260 mask
= FLASH_STRAP_MASK
;
4262 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
4265 if ((val
& mask
) == (flash
->strapping
& mask
)) {
4266 bp
->flash_info
= flash
;
4268 /* Request access to the flash interface. */
4269 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
4272 /* Enable access to flash interface */
4273 bnx2_enable_nvram_access(bp
);
4275 /* Reconfigure the flash interface */
4276 REG_WR(bp
, BNX2_NVM_CFG1
, flash
->config1
);
4277 REG_WR(bp
, BNX2_NVM_CFG2
, flash
->config2
);
4278 REG_WR(bp
, BNX2_NVM_CFG3
, flash
->config3
);
4279 REG_WR(bp
, BNX2_NVM_WRITE1
, flash
->write1
);
4281 /* Disable access to flash interface */
4282 bnx2_disable_nvram_access(bp
);
4283 bnx2_release_nvram_lock(bp
);
4288 } /* if (val & 0x40000000) */
4290 if (j
== entry_count
) {
4291 bp
->flash_info
= NULL
;
4292 printk(KERN_ALERT PFX
"Unknown flash/EEPROM type.\n");
4297 val
= bnx2_shmem_rd(bp
, BNX2_SHARED_HW_CFG_CONFIG2
);
4298 val
&= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK
;
4300 bp
->flash_size
= val
;
4302 bp
->flash_size
= bp
->flash_info
->total_size
;
4308 bnx2_nvram_read(struct bnx2
*bp
, u32 offset
, u8
*ret_buf
,
4312 u32 cmd_flags
, offset32
, len32
, extra
;
4317 /* Request access to the flash interface. */
4318 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
4321 /* Enable access to flash interface */
4322 bnx2_enable_nvram_access(bp
);
4335 pre_len
= 4 - (offset
& 3);
4337 if (pre_len
>= len32
) {
4339 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
4340 BNX2_NVM_COMMAND_LAST
;
4343 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
4346 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
4351 memcpy(ret_buf
, buf
+ (offset
& 3), pre_len
);
4358 extra
= 4 - (len32
& 3);
4359 len32
= (len32
+ 4) & ~3;
4366 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
4368 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
4369 BNX2_NVM_COMMAND_LAST
;
4371 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
4373 memcpy(ret_buf
, buf
, 4 - extra
);
4375 else if (len32
> 0) {
4378 /* Read the first word. */
4382 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
4384 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, cmd_flags
);
4386 /* Advance to the next dword. */
4391 while (len32
> 4 && rc
== 0) {
4392 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, 0);
4394 /* Advance to the next dword. */
4403 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
4404 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
4406 memcpy(ret_buf
, buf
, 4 - extra
);
4409 /* Disable access to flash interface */
4410 bnx2_disable_nvram_access(bp
);
4412 bnx2_release_nvram_lock(bp
);
4418 bnx2_nvram_write(struct bnx2
*bp
, u32 offset
, u8
*data_buf
,
4421 u32 written
, offset32
, len32
;
4422 u8
*buf
, start
[4], end
[4], *align_buf
= NULL
, *flash_buffer
= NULL
;
4424 int align_start
, align_end
;
4429 align_start
= align_end
= 0;
4431 if ((align_start
= (offset32
& 3))) {
4433 len32
+= align_start
;
4436 if ((rc
= bnx2_nvram_read(bp
, offset32
, start
, 4)))
4441 align_end
= 4 - (len32
& 3);
4443 if ((rc
= bnx2_nvram_read(bp
, offset32
+ len32
- 4, end
, 4)))
4447 if (align_start
|| align_end
) {
4448 align_buf
= kmalloc(len32
, GFP_KERNEL
);
4449 if (align_buf
== NULL
)
4452 memcpy(align_buf
, start
, 4);
4455 memcpy(align_buf
+ len32
- 4, end
, 4);
4457 memcpy(align_buf
+ align_start
, data_buf
, buf_size
);
4461 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4462 flash_buffer
= kmalloc(264, GFP_KERNEL
);
4463 if (flash_buffer
== NULL
) {
4465 goto nvram_write_end
;
4470 while ((written
< len32
) && (rc
== 0)) {
4471 u32 page_start
, page_end
, data_start
, data_end
;
4472 u32 addr
, cmd_flags
;
4475 /* Find the page_start addr */
4476 page_start
= offset32
+ written
;
4477 page_start
-= (page_start
% bp
->flash_info
->page_size
);
4478 /* Find the page_end addr */
4479 page_end
= page_start
+ bp
->flash_info
->page_size
;
4480 /* Find the data_start addr */
4481 data_start
= (written
== 0) ? offset32
: page_start
;
4482 /* Find the data_end addr */
4483 data_end
= (page_end
> offset32
+ len32
) ?
4484 (offset32
+ len32
) : page_end
;
4486 /* Request access to the flash interface. */
4487 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
4488 goto nvram_write_end
;
4490 /* Enable access to flash interface */
4491 bnx2_enable_nvram_access(bp
);
4493 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
4494 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4497 /* Read the whole page into the buffer
4498 * (non-buffer flash only) */
4499 for (j
= 0; j
< bp
->flash_info
->page_size
; j
+= 4) {
4500 if (j
== (bp
->flash_info
->page_size
- 4)) {
4501 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
4503 rc
= bnx2_nvram_read_dword(bp
,
4509 goto nvram_write_end
;
4515 /* Enable writes to flash interface (unlock write-protect) */
4516 if ((rc
= bnx2_enable_nvram_write(bp
)) != 0)
4517 goto nvram_write_end
;
4519 /* Loop to write back the buffer data from page_start to
4522 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4523 /* Erase the page */
4524 if ((rc
= bnx2_nvram_erase_page(bp
, page_start
)) != 0)
4525 goto nvram_write_end
;
4527 /* Re-enable the write again for the actual write */
4528 bnx2_enable_nvram_write(bp
);
4530 for (addr
= page_start
; addr
< data_start
;
4531 addr
+= 4, i
+= 4) {
4533 rc
= bnx2_nvram_write_dword(bp
, addr
,
4534 &flash_buffer
[i
], cmd_flags
);
4537 goto nvram_write_end
;
4543 /* Loop to write the new data from data_start to data_end */
4544 for (addr
= data_start
; addr
< data_end
; addr
+= 4, i
+= 4) {
4545 if ((addr
== page_end
- 4) ||
4546 ((bp
->flash_info
->flags
& BNX2_NV_BUFFERED
) &&
4547 (addr
== data_end
- 4))) {
4549 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
4551 rc
= bnx2_nvram_write_dword(bp
, addr
, buf
,
4555 goto nvram_write_end
;
4561 /* Loop to write back the buffer data from data_end
4563 if (!(bp
->flash_info
->flags
& BNX2_NV_BUFFERED
)) {
4564 for (addr
= data_end
; addr
< page_end
;
4565 addr
+= 4, i
+= 4) {
4567 if (addr
== page_end
-4) {
4568 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
4570 rc
= bnx2_nvram_write_dword(bp
, addr
,
4571 &flash_buffer
[i
], cmd_flags
);
4574 goto nvram_write_end
;
4580 /* Disable writes to flash interface (lock write-protect) */
4581 bnx2_disable_nvram_write(bp
);
4583 /* Disable access to flash interface */
4584 bnx2_disable_nvram_access(bp
);
4585 bnx2_release_nvram_lock(bp
);
4587 /* Increment written */
4588 written
+= data_end
- data_start
;
4592 kfree(flash_buffer
);
4598 bnx2_init_fw_cap(struct bnx2
*bp
)
4602 bp
->phy_flags
&= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP
;
4603 bp
->flags
&= ~BNX2_FLAG_CAN_KEEP_VLAN
;
4605 if (!(bp
->flags
& BNX2_FLAG_ASF_ENABLE
))
4606 bp
->flags
|= BNX2_FLAG_CAN_KEEP_VLAN
;
4608 val
= bnx2_shmem_rd(bp
, BNX2_FW_CAP_MB
);
4609 if ((val
& BNX2_FW_CAP_SIGNATURE_MASK
) != BNX2_FW_CAP_SIGNATURE
)
4612 if ((val
& BNX2_FW_CAP_CAN_KEEP_VLAN
) == BNX2_FW_CAP_CAN_KEEP_VLAN
) {
4613 bp
->flags
|= BNX2_FLAG_CAN_KEEP_VLAN
;
4614 sig
|= BNX2_DRV_ACK_CAP_SIGNATURE
| BNX2_FW_CAP_CAN_KEEP_VLAN
;
4617 if ((bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) &&
4618 (val
& BNX2_FW_CAP_REMOTE_PHY_CAPABLE
)) {
4621 bp
->phy_flags
|= BNX2_PHY_FLAG_REMOTE_PHY_CAP
;
4623 link
= bnx2_shmem_rd(bp
, BNX2_LINK_STATUS
);
4624 if (link
& BNX2_LINK_STATUS_SERDES_LINK
)
4625 bp
->phy_port
= PORT_FIBRE
;
4627 bp
->phy_port
= PORT_TP
;
4629 sig
|= BNX2_DRV_ACK_CAP_SIGNATURE
|
4630 BNX2_FW_CAP_REMOTE_PHY_CAPABLE
;
4633 if (netif_running(bp
->dev
) && sig
)
4634 bnx2_shmem_wr(bp
, BNX2_DRV_ACK_CAP_MB
, sig
);
4638 bnx2_setup_msix_tbl(struct bnx2
*bp
)
4640 REG_WR(bp
, BNX2_PCI_GRC_WINDOW_ADDR
, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN
);
4642 REG_WR(bp
, BNX2_PCI_GRC_WINDOW2_ADDR
, BNX2_MSIX_TABLE_ADDR
);
4643 REG_WR(bp
, BNX2_PCI_GRC_WINDOW3_ADDR
, BNX2_MSIX_PBA_ADDR
);
4647 bnx2_reset_chip(struct bnx2
*bp
, u32 reset_code
)
4653 /* Wait for the current PCI transaction to complete before
4654 * issuing a reset. */
4655 REG_WR(bp
, BNX2_MISC_ENABLE_CLR_BITS
,
4656 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE
|
4657 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE
|
4658 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE
|
4659 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE
);
4660 val
= REG_RD(bp
, BNX2_MISC_ENABLE_CLR_BITS
);
4663 /* Wait for the firmware to tell us it is ok to issue a reset. */
4664 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT0
| reset_code
, 1, 1);
4666 /* Deposit a driver reset signature so the firmware knows that
4667 * this is a soft reset. */
4668 bnx2_shmem_wr(bp
, BNX2_DRV_RESET_SIGNATURE
,
4669 BNX2_DRV_RESET_SIGNATURE_MAGIC
);
4671 /* Do a dummy read to force the chip to complete all current transaction
4672 * before we issue a reset. */
4673 val
= REG_RD(bp
, BNX2_MISC_ID
);
4675 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4676 REG_WR(bp
, BNX2_MISC_COMMAND
, BNX2_MISC_COMMAND_SW_RESET
);
4677 REG_RD(bp
, BNX2_MISC_COMMAND
);
4680 val
= BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
4681 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
4683 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
, val
);
4686 val
= BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
4687 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
4688 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
4691 REG_WR(bp
, BNX2_PCICFG_MISC_CONFIG
, val
);
4693 /* Reading back any register after chip reset will hang the
4694 * bus on 5706 A0 and A1. The msleep below provides plenty
4695 * of margin for write posting.
4697 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
4698 (CHIP_ID(bp
) == CHIP_ID_5706_A1
))
4701 /* Reset takes approximate 30 usec */
4702 for (i
= 0; i
< 10; i
++) {
4703 val
= REG_RD(bp
, BNX2_PCICFG_MISC_CONFIG
);
4704 if ((val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
4705 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) == 0)
4710 if (val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
4711 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) {
4712 printk(KERN_ERR PFX
"Chip reset did not complete\n");
4717 /* Make sure byte swapping is properly configured. */
4718 val
= REG_RD(bp
, BNX2_PCI_SWAP_DIAG0
);
4719 if (val
!= 0x01020304) {
4720 printk(KERN_ERR PFX
"Chip not in correct endian mode\n");
4724 /* Wait for the firmware to finish its initialization. */
4725 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT1
| reset_code
, 1, 0);
4729 spin_lock_bh(&bp
->phy_lock
);
4730 old_port
= bp
->phy_port
;
4731 bnx2_init_fw_cap(bp
);
4732 if ((bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) &&
4733 old_port
!= bp
->phy_port
)
4734 bnx2_set_default_remote_link(bp
);
4735 spin_unlock_bh(&bp
->phy_lock
);
4737 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
4738 /* Adjust the voltage regular to two steps lower. The default
4739 * of this register is 0x0000000e. */
4740 REG_WR(bp
, BNX2_MISC_VREG_CONTROL
, 0x000000fa);
4742 /* Remove bad rbuf memory from the free pool. */
4743 rc
= bnx2_alloc_bad_rbuf(bp
);
4746 if (bp
->flags
& BNX2_FLAG_USING_MSIX
)
4747 bnx2_setup_msix_tbl(bp
);
4753 bnx2_init_chip(struct bnx2
*bp
)
4758 /* Make sure the interrupt is not active. */
4759 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
4761 val
= BNX2_DMA_CONFIG_DATA_BYTE_SWAP
|
4762 BNX2_DMA_CONFIG_DATA_WORD_SWAP
|
4764 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP
|
4766 BNX2_DMA_CONFIG_CNTL_WORD_SWAP
|
4767 DMA_READ_CHANS
<< 12 |
4768 DMA_WRITE_CHANS
<< 16;
4770 val
|= (0x2 << 20) | (1 << 11);
4772 if ((bp
->flags
& BNX2_FLAG_PCIX
) && (bp
->bus_speed_mhz
== 133))
4775 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) &&
4776 (CHIP_ID(bp
) != CHIP_ID_5706_A0
) && !(bp
->flags
& BNX2_FLAG_PCIX
))
4777 val
|= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA
;
4779 REG_WR(bp
, BNX2_DMA_CONFIG
, val
);
4781 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
4782 val
= REG_RD(bp
, BNX2_TDMA_CONFIG
);
4783 val
|= BNX2_TDMA_CONFIG_ONE_DMA
;
4784 REG_WR(bp
, BNX2_TDMA_CONFIG
, val
);
4787 if (bp
->flags
& BNX2_FLAG_PCIX
) {
4790 pci_read_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
4792 pci_write_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
4793 val16
& ~PCI_X_CMD_ERO
);
4796 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
4797 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE
|
4798 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE
|
4799 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE
);
4801 /* Initialize context mapping and zero out the quick contexts. The
4802 * context block must have already been enabled. */
4803 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4804 rc
= bnx2_init_5709_context(bp
);
4808 bnx2_init_context(bp
);
4810 if ((rc
= bnx2_init_cpus(bp
)) != 0)
4813 bnx2_init_nvram(bp
);
4815 bnx2_set_mac_addr(bp
, bp
->dev
->dev_addr
, 0);
4817 val
= REG_RD(bp
, BNX2_MQ_CONFIG
);
4818 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
4819 val
|= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256
;
4820 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4821 val
|= BNX2_MQ_CONFIG_BIN_MQ_MODE
;
4822 if (CHIP_REV(bp
) == CHIP_REV_Ax
)
4823 val
|= BNX2_MQ_CONFIG_HALT_DIS
;
4826 REG_WR(bp
, BNX2_MQ_CONFIG
, val
);
4828 val
= 0x10000 + (MAX_CID_CNT
* MB_KERNEL_CTX_SIZE
);
4829 REG_WR(bp
, BNX2_MQ_KNL_BYP_WIND_START
, val
);
4830 REG_WR(bp
, BNX2_MQ_KNL_WIND_END
, val
);
4832 val
= (BCM_PAGE_BITS
- 8) << 24;
4833 REG_WR(bp
, BNX2_RV2P_CONFIG
, val
);
4835 /* Configure page size. */
4836 val
= REG_RD(bp
, BNX2_TBDR_CONFIG
);
4837 val
&= ~BNX2_TBDR_CONFIG_PAGE_SIZE
;
4838 val
|= (BCM_PAGE_BITS
- 8) << 24 | 0x40;
4839 REG_WR(bp
, BNX2_TBDR_CONFIG
, val
);
4841 val
= bp
->mac_addr
[0] +
4842 (bp
->mac_addr
[1] << 8) +
4843 (bp
->mac_addr
[2] << 16) +
4845 (bp
->mac_addr
[4] << 8) +
4846 (bp
->mac_addr
[5] << 16);
4847 REG_WR(bp
, BNX2_EMAC_BACKOFF_SEED
, val
);
4849 /* Program the MTU. Also include 4 bytes for CRC32. */
4851 val
= mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
4852 if (val
> (MAX_ETHERNET_PACKET_SIZE
+ 4))
4853 val
|= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA
;
4854 REG_WR(bp
, BNX2_EMAC_RX_MTU_SIZE
, val
);
4859 bnx2_reg_wr_ind(bp
, BNX2_RBUF_CONFIG
, BNX2_RBUF_CONFIG_VAL(mtu
));
4860 bnx2_reg_wr_ind(bp
, BNX2_RBUF_CONFIG2
, BNX2_RBUF_CONFIG2_VAL(mtu
));
4861 bnx2_reg_wr_ind(bp
, BNX2_RBUF_CONFIG3
, BNX2_RBUF_CONFIG3_VAL(mtu
));
4863 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++)
4864 bp
->bnx2_napi
[i
].last_status_idx
= 0;
4866 bp
->idle_chk_status_idx
= 0xffff;
4868 bp
->rx_mode
= BNX2_EMAC_RX_MODE_SORT_MODE
;
4870 /* Set up how to generate a link change interrupt. */
4871 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
4873 REG_WR(bp
, BNX2_HC_STATUS_ADDR_L
,
4874 (u64
) bp
->status_blk_mapping
& 0xffffffff);
4875 REG_WR(bp
, BNX2_HC_STATUS_ADDR_H
, (u64
) bp
->status_blk_mapping
>> 32);
4877 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_L
,
4878 (u64
) bp
->stats_blk_mapping
& 0xffffffff);
4879 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_H
,
4880 (u64
) bp
->stats_blk_mapping
>> 32);
4882 REG_WR(bp
, BNX2_HC_TX_QUICK_CONS_TRIP
,
4883 (bp
->tx_quick_cons_trip_int
<< 16) | bp
->tx_quick_cons_trip
);
4885 REG_WR(bp
, BNX2_HC_RX_QUICK_CONS_TRIP
,
4886 (bp
->rx_quick_cons_trip_int
<< 16) | bp
->rx_quick_cons_trip
);
4888 REG_WR(bp
, BNX2_HC_COMP_PROD_TRIP
,
4889 (bp
->comp_prod_trip_int
<< 16) | bp
->comp_prod_trip
);
4891 REG_WR(bp
, BNX2_HC_TX_TICKS
, (bp
->tx_ticks_int
<< 16) | bp
->tx_ticks
);
4893 REG_WR(bp
, BNX2_HC_RX_TICKS
, (bp
->rx_ticks_int
<< 16) | bp
->rx_ticks
);
4895 REG_WR(bp
, BNX2_HC_COM_TICKS
,
4896 (bp
->com_ticks_int
<< 16) | bp
->com_ticks
);
4898 REG_WR(bp
, BNX2_HC_CMD_TICKS
,
4899 (bp
->cmd_ticks_int
<< 16) | bp
->cmd_ticks
);
4901 if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
4902 REG_WR(bp
, BNX2_HC_STATS_TICKS
, 0);
4904 REG_WR(bp
, BNX2_HC_STATS_TICKS
, bp
->stats_ticks
);
4905 REG_WR(bp
, BNX2_HC_STAT_COLLECT_TICKS
, 0xbb8); /* 3ms */
4907 if (CHIP_ID(bp
) == CHIP_ID_5706_A1
)
4908 val
= BNX2_HC_CONFIG_COLLECT_STATS
;
4910 val
= BNX2_HC_CONFIG_RX_TMR_MODE
| BNX2_HC_CONFIG_TX_TMR_MODE
|
4911 BNX2_HC_CONFIG_COLLECT_STATS
;
4914 if (bp
->irq_nvecs
> 1) {
4915 REG_WR(bp
, BNX2_HC_MSIX_BIT_VECTOR
,
4916 BNX2_HC_MSIX_BIT_VECTOR_VAL
);
4918 val
|= BNX2_HC_CONFIG_SB_ADDR_INC_128B
;
4921 if (bp
->flags
& BNX2_FLAG_ONE_SHOT_MSI
)
4922 val
|= BNX2_HC_CONFIG_ONE_SHOT
;
4924 REG_WR(bp
, BNX2_HC_CONFIG
, val
);
4926 for (i
= 1; i
< bp
->irq_nvecs
; i
++) {
4927 u32 base
= ((i
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
4928 BNX2_HC_SB_CONFIG_1
;
4931 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE
|
4932 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE
|
4933 BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
4935 REG_WR(bp
, base
+ BNX2_HC_TX_QUICK_CONS_TRIP_OFF
,
4936 (bp
->tx_quick_cons_trip_int
<< 16) |
4937 bp
->tx_quick_cons_trip
);
4939 REG_WR(bp
, base
+ BNX2_HC_TX_TICKS_OFF
,
4940 (bp
->tx_ticks_int
<< 16) | bp
->tx_ticks
);
4942 REG_WR(bp
, base
+ BNX2_HC_RX_QUICK_CONS_TRIP_OFF
,
4943 (bp
->rx_quick_cons_trip_int
<< 16) |
4944 bp
->rx_quick_cons_trip
);
4946 REG_WR(bp
, base
+ BNX2_HC_RX_TICKS_OFF
,
4947 (bp
->rx_ticks_int
<< 16) | bp
->rx_ticks
);
4950 /* Clear internal stats counters. */
4951 REG_WR(bp
, BNX2_HC_COMMAND
, BNX2_HC_COMMAND_CLR_STAT_NOW
);
4953 REG_WR(bp
, BNX2_HC_ATTN_BITS_ENABLE
, STATUS_ATTN_EVENTS
);
4955 /* Initialize the receive filter. */
4956 bnx2_set_rx_mode(bp
->dev
);
4958 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
4959 val
= REG_RD(bp
, BNX2_MISC_NEW_CORE_CTL
);
4960 val
|= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE
;
4961 REG_WR(bp
, BNX2_MISC_NEW_CORE_CTL
, val
);
4963 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT2
| BNX2_DRV_MSG_CODE_RESET
,
4966 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
, BNX2_MISC_ENABLE_DEFAULT
);
4967 REG_RD(bp
, BNX2_MISC_ENABLE_SET_BITS
);
4971 bp
->hc_cmd
= REG_RD(bp
, BNX2_HC_COMMAND
);
4977 bnx2_clear_ring_states(struct bnx2
*bp
)
4979 struct bnx2_napi
*bnapi
;
4980 struct bnx2_tx_ring_info
*txr
;
4981 struct bnx2_rx_ring_info
*rxr
;
4984 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++) {
4985 bnapi
= &bp
->bnx2_napi
[i
];
4986 txr
= &bnapi
->tx_ring
;
4987 rxr
= &bnapi
->rx_ring
;
4990 txr
->hw_tx_cons
= 0;
4991 rxr
->rx_prod_bseq
= 0;
4994 rxr
->rx_pg_prod
= 0;
4995 rxr
->rx_pg_cons
= 0;
5000 bnx2_init_tx_context(struct bnx2
*bp
, u32 cid
, struct bnx2_tx_ring_info
*txr
)
5002 u32 val
, offset0
, offset1
, offset2
, offset3
;
5003 u32 cid_addr
= GET_CID_ADDR(cid
);
5005 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
5006 offset0
= BNX2_L2CTX_TYPE_XI
;
5007 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
5008 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
5009 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
5011 offset0
= BNX2_L2CTX_TYPE
;
5012 offset1
= BNX2_L2CTX_CMD_TYPE
;
5013 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
5014 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
5016 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
5017 bnx2_ctx_wr(bp
, cid_addr
, offset0
, val
);
5019 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
5020 bnx2_ctx_wr(bp
, cid_addr
, offset1
, val
);
5022 val
= (u64
) txr
->tx_desc_mapping
>> 32;
5023 bnx2_ctx_wr(bp
, cid_addr
, offset2
, val
);
5025 val
= (u64
) txr
->tx_desc_mapping
& 0xffffffff;
5026 bnx2_ctx_wr(bp
, cid_addr
, offset3
, val
);
5030 bnx2_init_tx_ring(struct bnx2
*bp
, int ring_num
)
5034 struct bnx2_napi
*bnapi
;
5035 struct bnx2_tx_ring_info
*txr
;
5037 bnapi
= &bp
->bnx2_napi
[ring_num
];
5038 txr
= &bnapi
->tx_ring
;
5043 cid
= TX_TSS_CID
+ ring_num
- 1;
5045 bp
->tx_wake_thresh
= bp
->tx_ring_size
/ 2;
5047 txbd
= &txr
->tx_desc_ring
[MAX_TX_DESC_CNT
];
5049 txbd
->tx_bd_haddr_hi
= (u64
) txr
->tx_desc_mapping
>> 32;
5050 txbd
->tx_bd_haddr_lo
= (u64
) txr
->tx_desc_mapping
& 0xffffffff;
5053 txr
->tx_prod_bseq
= 0;
5055 txr
->tx_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BIDX
;
5056 txr
->tx_bseq_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BSEQ
;
5058 bnx2_init_tx_context(bp
, cid
, txr
);
5062 bnx2_init_rxbd_rings(struct rx_bd
*rx_ring
[], dma_addr_t dma
[], u32 buf_size
,
5068 for (i
= 0; i
< num_rings
; i
++) {
5071 rxbd
= &rx_ring
[i
][0];
5072 for (j
= 0; j
< MAX_RX_DESC_CNT
; j
++, rxbd
++) {
5073 rxbd
->rx_bd_len
= buf_size
;
5074 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
5076 if (i
== (num_rings
- 1))
5080 rxbd
->rx_bd_haddr_hi
= (u64
) dma
[j
] >> 32;
5081 rxbd
->rx_bd_haddr_lo
= (u64
) dma
[j
] & 0xffffffff;
5086 bnx2_init_rx_ring(struct bnx2
*bp
, int ring_num
)
5089 u16 prod
, ring_prod
;
5090 u32 cid
, rx_cid_addr
, val
;
5091 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[ring_num
];
5092 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
5097 cid
= RX_RSS_CID
+ ring_num
- 1;
5099 rx_cid_addr
= GET_CID_ADDR(cid
);
5101 bnx2_init_rxbd_rings(rxr
->rx_desc_ring
, rxr
->rx_desc_mapping
,
5102 bp
->rx_buf_use_size
, bp
->rx_max_ring
);
5104 bnx2_init_rx_context(bp
, cid
);
5106 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
5107 val
= REG_RD(bp
, BNX2_MQ_MAP_L2_5
);
5108 REG_WR(bp
, BNX2_MQ_MAP_L2_5
, val
| BNX2_MQ_MAP_L2_5_ARM
);
5111 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_PG_BUF_SIZE
, 0);
5112 if (bp
->rx_pg_ring_size
) {
5113 bnx2_init_rxbd_rings(rxr
->rx_pg_desc_ring
,
5114 rxr
->rx_pg_desc_mapping
,
5115 PAGE_SIZE
, bp
->rx_max_pg_ring
);
5116 val
= (bp
->rx_buf_use_size
<< 16) | PAGE_SIZE
;
5117 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_PG_BUF_SIZE
, val
);
5118 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_RBDC_KEY
,
5119 BNX2_L2CTX_RBDC_JUMBO_KEY
- ring_num
);
5121 val
= (u64
) rxr
->rx_pg_desc_mapping
[0] >> 32;
5122 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_PG_BDHADDR_HI
, val
);
5124 val
= (u64
) rxr
->rx_pg_desc_mapping
[0] & 0xffffffff;
5125 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_PG_BDHADDR_LO
, val
);
5127 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5128 REG_WR(bp
, BNX2_MQ_MAP_L2_3
, BNX2_MQ_MAP_L2_3_DEFAULT
);
5131 val
= (u64
) rxr
->rx_desc_mapping
[0] >> 32;
5132 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
5134 val
= (u64
) rxr
->rx_desc_mapping
[0] & 0xffffffff;
5135 bnx2_ctx_wr(bp
, rx_cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
5137 ring_prod
= prod
= rxr
->rx_pg_prod
;
5138 for (i
= 0; i
< bp
->rx_pg_ring_size
; i
++) {
5139 if (bnx2_alloc_rx_page(bp
, rxr
, ring_prod
) < 0)
5141 prod
= NEXT_RX_BD(prod
);
5142 ring_prod
= RX_PG_RING_IDX(prod
);
5144 rxr
->rx_pg_prod
= prod
;
5146 ring_prod
= prod
= rxr
->rx_prod
;
5147 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
5148 if (bnx2_alloc_rx_skb(bp
, rxr
, ring_prod
) < 0)
5150 prod
= NEXT_RX_BD(prod
);
5151 ring_prod
= RX_RING_IDX(prod
);
5153 rxr
->rx_prod
= prod
;
5155 rxr
->rx_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_HOST_BDIDX
;
5156 rxr
->rx_bseq_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_HOST_BSEQ
;
5157 rxr
->rx_pg_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_HOST_PG_BDIDX
;
5159 REG_WR16(bp
, rxr
->rx_pg_bidx_addr
, rxr
->rx_pg_prod
);
5160 REG_WR16(bp
, rxr
->rx_bidx_addr
, prod
);
5162 REG_WR(bp
, rxr
->rx_bseq_addr
, rxr
->rx_prod_bseq
);
5166 bnx2_init_all_rings(struct bnx2
*bp
)
5171 bnx2_clear_ring_states(bp
);
5173 REG_WR(bp
, BNX2_TSCH_TSS_CFG
, 0);
5174 for (i
= 0; i
< bp
->num_tx_rings
; i
++)
5175 bnx2_init_tx_ring(bp
, i
);
5177 if (bp
->num_tx_rings
> 1)
5178 REG_WR(bp
, BNX2_TSCH_TSS_CFG
, ((bp
->num_tx_rings
- 1) << 24) |
5181 REG_WR(bp
, BNX2_RLUP_RSS_CONFIG
, 0);
5182 bnx2_reg_wr_ind(bp
, BNX2_RXP_SCRATCH_RSS_TBL_SZ
, 0);
5184 for (i
= 0; i
< bp
->num_rx_rings
; i
++)
5185 bnx2_init_rx_ring(bp
, i
);
5187 if (bp
->num_rx_rings
> 1) {
5189 u8
*tbl
= (u8
*) &tbl_32
;
5191 bnx2_reg_wr_ind(bp
, BNX2_RXP_SCRATCH_RSS_TBL_SZ
,
5192 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES
);
5194 for (i
= 0; i
< BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES
; i
++) {
5195 tbl
[i
% 4] = i
% (bp
->num_rx_rings
- 1);
5198 BNX2_RXP_SCRATCH_RSS_TBL
+ i
,
5199 cpu_to_be32(tbl_32
));
5202 val
= BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI
|
5203 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI
;
5205 REG_WR(bp
, BNX2_RLUP_RSS_CONFIG
, val
);
5210 static u32
bnx2_find_max_ring(u32 ring_size
, u32 max_size
)
5212 u32 max
, num_rings
= 1;
5214 while (ring_size
> MAX_RX_DESC_CNT
) {
5215 ring_size
-= MAX_RX_DESC_CNT
;
5218 /* round to next power of 2 */
5220 while ((max
& num_rings
) == 0)
5223 if (num_rings
!= max
)
5230 bnx2_set_rx_ring_size(struct bnx2
*bp
, u32 size
)
5232 u32 rx_size
, rx_space
, jumbo_size
;
5234 /* 8 for CRC and VLAN */
5235 rx_size
= bp
->dev
->mtu
+ ETH_HLEN
+ BNX2_RX_OFFSET
+ 8;
5237 rx_space
= SKB_DATA_ALIGN(rx_size
+ BNX2_RX_ALIGN
) + NET_SKB_PAD
+
5238 sizeof(struct skb_shared_info
);
5240 bp
->rx_copy_thresh
= BNX2_RX_COPY_THRESH
;
5241 bp
->rx_pg_ring_size
= 0;
5242 bp
->rx_max_pg_ring
= 0;
5243 bp
->rx_max_pg_ring_idx
= 0;
5244 if ((rx_space
> PAGE_SIZE
) && !(bp
->flags
& BNX2_FLAG_JUMBO_BROKEN
)) {
5245 int pages
= PAGE_ALIGN(bp
->dev
->mtu
- 40) >> PAGE_SHIFT
;
5247 jumbo_size
= size
* pages
;
5248 if (jumbo_size
> MAX_TOTAL_RX_PG_DESC_CNT
)
5249 jumbo_size
= MAX_TOTAL_RX_PG_DESC_CNT
;
5251 bp
->rx_pg_ring_size
= jumbo_size
;
5252 bp
->rx_max_pg_ring
= bnx2_find_max_ring(jumbo_size
,
5254 bp
->rx_max_pg_ring_idx
= (bp
->rx_max_pg_ring
* RX_DESC_CNT
) - 1;
5255 rx_size
= BNX2_RX_COPY_THRESH
+ BNX2_RX_OFFSET
;
5256 bp
->rx_copy_thresh
= 0;
5259 bp
->rx_buf_use_size
= rx_size
;
5261 bp
->rx_buf_size
= bp
->rx_buf_use_size
+ BNX2_RX_ALIGN
;
5262 bp
->rx_jumbo_thresh
= rx_size
- BNX2_RX_OFFSET
;
5263 bp
->rx_ring_size
= size
;
5264 bp
->rx_max_ring
= bnx2_find_max_ring(size
, MAX_RX_RINGS
);
5265 bp
->rx_max_ring_idx
= (bp
->rx_max_ring
* RX_DESC_CNT
) - 1;
5269 bnx2_free_tx_skbs(struct bnx2
*bp
)
5273 for (i
= 0; i
< bp
->num_tx_rings
; i
++) {
5274 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
5275 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
5278 if (txr
->tx_buf_ring
== NULL
)
5281 for (j
= 0; j
< TX_DESC_CNT
; ) {
5282 struct sw_tx_bd
*tx_buf
= &txr
->tx_buf_ring
[j
];
5283 struct sk_buff
*skb
= tx_buf
->skb
;
5290 skb_dma_unmap(&bp
->pdev
->dev
, skb
, DMA_TO_DEVICE
);
5294 j
+= skb_shinfo(skb
)->nr_frags
+ 1;
5301 bnx2_free_rx_skbs(struct bnx2
*bp
)
5305 for (i
= 0; i
< bp
->num_rx_rings
; i
++) {
5306 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
5307 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
5310 if (rxr
->rx_buf_ring
== NULL
)
5313 for (j
= 0; j
< bp
->rx_max_ring_idx
; j
++) {
5314 struct sw_bd
*rx_buf
= &rxr
->rx_buf_ring
[j
];
5315 struct sk_buff
*skb
= rx_buf
->skb
;
5320 pci_unmap_single(bp
->pdev
,
5321 pci_unmap_addr(rx_buf
, mapping
),
5322 bp
->rx_buf_use_size
,
5323 PCI_DMA_FROMDEVICE
);
5329 for (j
= 0; j
< bp
->rx_max_pg_ring_idx
; j
++)
5330 bnx2_free_rx_page(bp
, rxr
, j
);
5335 bnx2_free_skbs(struct bnx2
*bp
)
5337 bnx2_free_tx_skbs(bp
);
5338 bnx2_free_rx_skbs(bp
);
5342 bnx2_reset_nic(struct bnx2
*bp
, u32 reset_code
)
5346 rc
= bnx2_reset_chip(bp
, reset_code
);
5351 if ((rc
= bnx2_init_chip(bp
)) != 0)
5354 bnx2_init_all_rings(bp
);
5359 bnx2_init_nic(struct bnx2
*bp
, int reset_phy
)
5363 if ((rc
= bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
)) != 0)
5366 spin_lock_bh(&bp
->phy_lock
);
5367 bnx2_init_phy(bp
, reset_phy
);
5369 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
5370 bnx2_remote_phy_event(bp
);
5371 spin_unlock_bh(&bp
->phy_lock
);
5376 bnx2_shutdown_chip(struct bnx2
*bp
)
5380 if (bp
->flags
& BNX2_FLAG_NO_WOL
)
5381 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN
;
5383 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
5385 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
5387 return bnx2_reset_chip(bp
, reset_code
);
5391 bnx2_test_registers(struct bnx2
*bp
)
5395 static const struct {
5398 #define BNX2_FL_NOT_5709 1
5402 { 0x006c, 0, 0x00000000, 0x0000003f },
5403 { 0x0090, 0, 0xffffffff, 0x00000000 },
5404 { 0x0094, 0, 0x00000000, 0x00000000 },
5406 { 0x0404, BNX2_FL_NOT_5709
, 0x00003f00, 0x00000000 },
5407 { 0x0418, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5408 { 0x041c, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5409 { 0x0420, BNX2_FL_NOT_5709
, 0x00000000, 0x80ffffff },
5410 { 0x0424, BNX2_FL_NOT_5709
, 0x00000000, 0x00000000 },
5411 { 0x0428, BNX2_FL_NOT_5709
, 0x00000000, 0x00000001 },
5412 { 0x0450, BNX2_FL_NOT_5709
, 0x00000000, 0x0000ffff },
5413 { 0x0454, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5414 { 0x0458, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5416 { 0x0808, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5417 { 0x0854, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
5418 { 0x0868, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5419 { 0x086c, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5420 { 0x0870, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5421 { 0x0874, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
5423 { 0x0c00, BNX2_FL_NOT_5709
, 0x00000000, 0x00000001 },
5424 { 0x0c04, BNX2_FL_NOT_5709
, 0x00000000, 0x03ff0001 },
5425 { 0x0c08, BNX2_FL_NOT_5709
, 0x0f0ff073, 0x00000000 },
5427 { 0x1000, 0, 0x00000000, 0x00000001 },
5428 { 0x1004, BNX2_FL_NOT_5709
, 0x00000000, 0x000f0001 },
5430 { 0x1408, 0, 0x01c00800, 0x00000000 },
5431 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5432 { 0x14a8, 0, 0x00000000, 0x000001ff },
5433 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5434 { 0x14b0, 0, 0x00000002, 0x00000001 },
5435 { 0x14b8, 0, 0x00000000, 0x00000000 },
5436 { 0x14c0, 0, 0x00000000, 0x00000009 },
5437 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5438 { 0x14cc, 0, 0x00000000, 0x00000001 },
5439 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5441 { 0x1800, 0, 0x00000000, 0x00000001 },
5442 { 0x1804, 0, 0x00000000, 0x00000003 },
5444 { 0x2800, 0, 0x00000000, 0x00000001 },
5445 { 0x2804, 0, 0x00000000, 0x00003f01 },
5446 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5447 { 0x2810, 0, 0xffff0000, 0x00000000 },
5448 { 0x2814, 0, 0xffff0000, 0x00000000 },
5449 { 0x2818, 0, 0xffff0000, 0x00000000 },
5450 { 0x281c, 0, 0xffff0000, 0x00000000 },
5451 { 0x2834, 0, 0xffffffff, 0x00000000 },
5452 { 0x2840, 0, 0x00000000, 0xffffffff },
5453 { 0x2844, 0, 0x00000000, 0xffffffff },
5454 { 0x2848, 0, 0xffffffff, 0x00000000 },
5455 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5457 { 0x2c00, 0, 0x00000000, 0x00000011 },
5458 { 0x2c04, 0, 0x00000000, 0x00030007 },
5460 { 0x3c00, 0, 0x00000000, 0x00000001 },
5461 { 0x3c04, 0, 0x00000000, 0x00070000 },
5462 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5463 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5464 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5465 { 0x3c14, 0, 0x00000000, 0xffffffff },
5466 { 0x3c18, 0, 0x00000000, 0xffffffff },
5467 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5468 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5470 { 0x5004, 0, 0x00000000, 0x0000007f },
5471 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5473 { 0x5c00, 0, 0x00000000, 0x00000001 },
5474 { 0x5c04, 0, 0x00000000, 0x0003000f },
5475 { 0x5c08, 0, 0x00000003, 0x00000000 },
5476 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5477 { 0x5c10, 0, 0x00000000, 0xffffffff },
5478 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5479 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5480 { 0x5c88, 0, 0x00000000, 0x00077373 },
5481 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5483 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5484 { 0x680c, 0, 0xffffffff, 0x00000000 },
5485 { 0x6810, 0, 0xffffffff, 0x00000000 },
5486 { 0x6814, 0, 0xffffffff, 0x00000000 },
5487 { 0x6818, 0, 0xffffffff, 0x00000000 },
5488 { 0x681c, 0, 0xffffffff, 0x00000000 },
5489 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5490 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5491 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5492 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5493 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5494 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5495 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5496 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5497 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5498 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5499 { 0x684c, 0, 0xffffffff, 0x00000000 },
5500 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5501 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5502 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5503 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5504 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5505 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5507 { 0xffff, 0, 0x00000000, 0x00000000 },
5512 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5515 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
5516 u32 offset
, rw_mask
, ro_mask
, save_val
, val
;
5517 u16 flags
= reg_tbl
[i
].flags
;
5519 if (is_5709
&& (flags
& BNX2_FL_NOT_5709
))
5522 offset
= (u32
) reg_tbl
[i
].offset
;
5523 rw_mask
= reg_tbl
[i
].rw_mask
;
5524 ro_mask
= reg_tbl
[i
].ro_mask
;
5526 save_val
= readl(bp
->regview
+ offset
);
5528 writel(0, bp
->regview
+ offset
);
5530 val
= readl(bp
->regview
+ offset
);
5531 if ((val
& rw_mask
) != 0) {
5535 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
5539 writel(0xffffffff, bp
->regview
+ offset
);
5541 val
= readl(bp
->regview
+ offset
);
5542 if ((val
& rw_mask
) != rw_mask
) {
5546 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
5550 writel(save_val
, bp
->regview
+ offset
);
5554 writel(save_val
, bp
->regview
+ offset
);
5562 bnx2_do_mem_test(struct bnx2
*bp
, u32 start
, u32 size
)
5564 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0x55555555,
5565 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5568 for (i
= 0; i
< sizeof(test_pattern
) / 4; i
++) {
5571 for (offset
= 0; offset
< size
; offset
+= 4) {
5573 bnx2_reg_wr_ind(bp
, start
+ offset
, test_pattern
[i
]);
5575 if (bnx2_reg_rd_ind(bp
, start
+ offset
) !=
5585 bnx2_test_memory(struct bnx2
*bp
)
5589 static struct mem_entry
{
5592 } mem_tbl_5706
[] = {
5593 { 0x60000, 0x4000 },
5594 { 0xa0000, 0x3000 },
5595 { 0xe0000, 0x4000 },
5596 { 0x120000, 0x4000 },
5597 { 0x1a0000, 0x4000 },
5598 { 0x160000, 0x4000 },
5602 { 0x60000, 0x4000 },
5603 { 0xa0000, 0x3000 },
5604 { 0xe0000, 0x4000 },
5605 { 0x120000, 0x4000 },
5606 { 0x1a0000, 0x4000 },
5609 struct mem_entry
*mem_tbl
;
5611 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5612 mem_tbl
= mem_tbl_5709
;
5614 mem_tbl
= mem_tbl_5706
;
5616 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
5617 if ((ret
= bnx2_do_mem_test(bp
, mem_tbl
[i
].offset
,
5618 mem_tbl
[i
].len
)) != 0) {
5626 #define BNX2_MAC_LOOPBACK 0
5627 #define BNX2_PHY_LOOPBACK 1
5630 bnx2_run_loopback(struct bnx2
*bp
, int loopback_mode
)
5632 unsigned int pkt_size
, num_pkts
, i
;
5633 struct sk_buff
*skb
, *rx_skb
;
5634 unsigned char *packet
;
5635 u16 rx_start_idx
, rx_idx
;
5638 struct sw_bd
*rx_buf
;
5639 struct l2_fhdr
*rx_hdr
;
5641 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[0], *tx_napi
;
5642 struct bnx2_tx_ring_info
*txr
= &bnapi
->tx_ring
;
5643 struct bnx2_rx_ring_info
*rxr
= &bnapi
->rx_ring
;
5647 txr
= &tx_napi
->tx_ring
;
5648 rxr
= &bnapi
->rx_ring
;
5649 if (loopback_mode
== BNX2_MAC_LOOPBACK
) {
5650 bp
->loopback
= MAC_LOOPBACK
;
5651 bnx2_set_mac_loopback(bp
);
5653 else if (loopback_mode
== BNX2_PHY_LOOPBACK
) {
5654 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
5657 bp
->loopback
= PHY_LOOPBACK
;
5658 bnx2_set_phy_loopback(bp
);
5663 pkt_size
= min(bp
->dev
->mtu
+ ETH_HLEN
, bp
->rx_jumbo_thresh
- 4);
5664 skb
= netdev_alloc_skb(bp
->dev
, pkt_size
);
5667 packet
= skb_put(skb
, pkt_size
);
5668 memcpy(packet
, bp
->dev
->dev_addr
, 6);
5669 memset(packet
+ 6, 0x0, 8);
5670 for (i
= 14; i
< pkt_size
; i
++)
5671 packet
[i
] = (unsigned char) (i
& 0xff);
5673 if (skb_dma_map(&bp
->pdev
->dev
, skb
, DMA_TO_DEVICE
)) {
5677 map
= skb_shinfo(skb
)->dma_head
;
5679 REG_WR(bp
, BNX2_HC_COMMAND
,
5680 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
5682 REG_RD(bp
, BNX2_HC_COMMAND
);
5685 rx_start_idx
= bnx2_get_hw_rx_cons(bnapi
);
5689 txbd
= &txr
->tx_desc_ring
[TX_RING_IDX(txr
->tx_prod
)];
5691 txbd
->tx_bd_haddr_hi
= (u64
) map
>> 32;
5692 txbd
->tx_bd_haddr_lo
= (u64
) map
& 0xffffffff;
5693 txbd
->tx_bd_mss_nbytes
= pkt_size
;
5694 txbd
->tx_bd_vlan_tag_flags
= TX_BD_FLAGS_START
| TX_BD_FLAGS_END
;
5697 txr
->tx_prod
= NEXT_TX_BD(txr
->tx_prod
);
5698 txr
->tx_prod_bseq
+= pkt_size
;
5700 REG_WR16(bp
, txr
->tx_bidx_addr
, txr
->tx_prod
);
5701 REG_WR(bp
, txr
->tx_bseq_addr
, txr
->tx_prod_bseq
);
5705 REG_WR(bp
, BNX2_HC_COMMAND
,
5706 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
5708 REG_RD(bp
, BNX2_HC_COMMAND
);
5712 skb_dma_unmap(&bp
->pdev
->dev
, skb
, DMA_TO_DEVICE
);
5715 if (bnx2_get_hw_tx_cons(tx_napi
) != txr
->tx_prod
)
5716 goto loopback_test_done
;
5718 rx_idx
= bnx2_get_hw_rx_cons(bnapi
);
5719 if (rx_idx
!= rx_start_idx
+ num_pkts
) {
5720 goto loopback_test_done
;
5723 rx_buf
= &rxr
->rx_buf_ring
[rx_start_idx
];
5724 rx_skb
= rx_buf
->skb
;
5726 rx_hdr
= (struct l2_fhdr
*) rx_skb
->data
;
5727 skb_reserve(rx_skb
, BNX2_RX_OFFSET
);
5729 pci_dma_sync_single_for_cpu(bp
->pdev
,
5730 pci_unmap_addr(rx_buf
, mapping
),
5731 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
5733 if (rx_hdr
->l2_fhdr_status
&
5734 (L2_FHDR_ERRORS_BAD_CRC
|
5735 L2_FHDR_ERRORS_PHY_DECODE
|
5736 L2_FHDR_ERRORS_ALIGNMENT
|
5737 L2_FHDR_ERRORS_TOO_SHORT
|
5738 L2_FHDR_ERRORS_GIANT_FRAME
)) {
5740 goto loopback_test_done
;
5743 if ((rx_hdr
->l2_fhdr_pkt_len
- 4) != pkt_size
) {
5744 goto loopback_test_done
;
5747 for (i
= 14; i
< pkt_size
; i
++) {
5748 if (*(rx_skb
->data
+ i
) != (unsigned char) (i
& 0xff)) {
5749 goto loopback_test_done
;
5760 #define BNX2_MAC_LOOPBACK_FAILED 1
5761 #define BNX2_PHY_LOOPBACK_FAILED 2
5762 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5763 BNX2_PHY_LOOPBACK_FAILED)
5766 bnx2_test_loopback(struct bnx2
*bp
)
5770 if (!netif_running(bp
->dev
))
5771 return BNX2_LOOPBACK_FAILED
;
5773 bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
);
5774 spin_lock_bh(&bp
->phy_lock
);
5775 bnx2_init_phy(bp
, 1);
5776 spin_unlock_bh(&bp
->phy_lock
);
5777 if (bnx2_run_loopback(bp
, BNX2_MAC_LOOPBACK
))
5778 rc
|= BNX2_MAC_LOOPBACK_FAILED
;
5779 if (bnx2_run_loopback(bp
, BNX2_PHY_LOOPBACK
))
5780 rc
|= BNX2_PHY_LOOPBACK_FAILED
;
5784 #define NVRAM_SIZE 0x200
5785 #define CRC32_RESIDUAL 0xdebb20e3
5788 bnx2_test_nvram(struct bnx2
*bp
)
5790 __be32 buf
[NVRAM_SIZE
/ 4];
5791 u8
*data
= (u8
*) buf
;
5795 if ((rc
= bnx2_nvram_read(bp
, 0, data
, 4)) != 0)
5796 goto test_nvram_done
;
5798 magic
= be32_to_cpu(buf
[0]);
5799 if (magic
!= 0x669955aa) {
5801 goto test_nvram_done
;
5804 if ((rc
= bnx2_nvram_read(bp
, 0x100, data
, NVRAM_SIZE
)) != 0)
5805 goto test_nvram_done
;
5807 csum
= ether_crc_le(0x100, data
);
5808 if (csum
!= CRC32_RESIDUAL
) {
5810 goto test_nvram_done
;
5813 csum
= ether_crc_le(0x100, data
+ 0x100);
5814 if (csum
!= CRC32_RESIDUAL
) {
5823 bnx2_test_link(struct bnx2
*bp
)
5827 if (!netif_running(bp
->dev
))
5830 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
5835 spin_lock_bh(&bp
->phy_lock
);
5836 bnx2_enable_bmsr1(bp
);
5837 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
5838 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
5839 bnx2_disable_bmsr1(bp
);
5840 spin_unlock_bh(&bp
->phy_lock
);
5842 if (bmsr
& BMSR_LSTATUS
) {
5849 bnx2_test_intr(struct bnx2
*bp
)
5854 if (!netif_running(bp
->dev
))
5857 status_idx
= REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff;
5859 /* This register is not touched during run-time. */
5860 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
5861 REG_RD(bp
, BNX2_HC_COMMAND
);
5863 for (i
= 0; i
< 10; i
++) {
5864 if ((REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff) !=
5870 msleep_interruptible(10);
5878 /* Determining link for parallel detection. */
5880 bnx2_5706_serdes_has_link(struct bnx2
*bp
)
5882 u32 mode_ctl
, an_dbg
, exp
;
5884 if (bp
->phy_flags
& BNX2_PHY_FLAG_NO_PARALLEL
)
5887 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_MODE_CTL
);
5888 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &mode_ctl
);
5890 if (!(mode_ctl
& MISC_SHDW_MODE_CTL_SIG_DET
))
5893 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_AN_DBG
);
5894 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
5895 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &an_dbg
);
5897 if (an_dbg
& (MISC_SHDW_AN_DBG_NOSYNC
| MISC_SHDW_AN_DBG_RUDI_INVALID
))
5900 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
, MII_EXPAND_REG1
);
5901 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &exp
);
5902 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &exp
);
5904 if (exp
& MII_EXPAND_REG1_RUDI_C
) /* receiving CONFIG */
5911 bnx2_5706_serdes_timer(struct bnx2
*bp
)
5915 spin_lock(&bp
->phy_lock
);
5916 if (bp
->serdes_an_pending
) {
5917 bp
->serdes_an_pending
--;
5919 } else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
5922 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
5924 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
5926 if (bmcr
& BMCR_ANENABLE
) {
5927 if (bnx2_5706_serdes_has_link(bp
)) {
5928 bmcr
&= ~BMCR_ANENABLE
;
5929 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5930 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
5931 bp
->phy_flags
|= BNX2_PHY_FLAG_PARALLEL_DETECT
;
5935 else if ((bp
->link_up
) && (bp
->autoneg
& AUTONEG_SPEED
) &&
5936 (bp
->phy_flags
& BNX2_PHY_FLAG_PARALLEL_DETECT
)) {
5939 bnx2_write_phy(bp
, 0x17, 0x0f01);
5940 bnx2_read_phy(bp
, 0x15, &phy2
);
5944 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
5945 bmcr
|= BMCR_ANENABLE
;
5946 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
5948 bp
->phy_flags
&= ~BNX2_PHY_FLAG_PARALLEL_DETECT
;
5951 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
5956 bnx2_write_phy(bp
, MII_BNX2_MISC_SHADOW
, MISC_SHDW_AN_DBG
);
5957 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &val
);
5958 bnx2_read_phy(bp
, MII_BNX2_MISC_SHADOW
, &val
);
5960 if (bp
->link_up
&& (val
& MISC_SHDW_AN_DBG_NOSYNC
)) {
5961 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_FORCED_DOWN
)) {
5962 bnx2_5706s_force_link_dn(bp
, 1);
5963 bp
->phy_flags
|= BNX2_PHY_FLAG_FORCED_DOWN
;
5966 } else if (!bp
->link_up
&& !(val
& MISC_SHDW_AN_DBG_NOSYNC
))
5969 spin_unlock(&bp
->phy_lock
);
5973 bnx2_5708_serdes_timer(struct bnx2
*bp
)
5975 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
5978 if ((bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
) == 0) {
5979 bp
->serdes_an_pending
= 0;
5983 spin_lock(&bp
->phy_lock
);
5984 if (bp
->serdes_an_pending
)
5985 bp
->serdes_an_pending
--;
5986 else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
5989 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
5990 if (bmcr
& BMCR_ANENABLE
) {
5991 bnx2_enable_forced_2g5(bp
);
5992 bp
->current_interval
= BNX2_SERDES_FORCED_TIMEOUT
;
5994 bnx2_disable_forced_2g5(bp
);
5995 bp
->serdes_an_pending
= 2;
5996 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
6000 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
6002 spin_unlock(&bp
->phy_lock
);
6006 bnx2_timer(unsigned long data
)
6008 struct bnx2
*bp
= (struct bnx2
*) data
;
6010 if (!netif_running(bp
->dev
))
6013 if (atomic_read(&bp
->intr_sem
) != 0)
6014 goto bnx2_restart_timer
;
6016 if ((bp
->flags
& (BNX2_FLAG_USING_MSI
| BNX2_FLAG_ONE_SHOT_MSI
)) ==
6017 BNX2_FLAG_USING_MSI
)
6018 bnx2_chk_missed_msi(bp
);
6020 bnx2_send_heart_beat(bp
);
6022 bp
->stats_blk
->stat_FwRxDrop
=
6023 bnx2_reg_rd_ind(bp
, BNX2_FW_RX_DROP_COUNT
);
6025 /* workaround occasional corrupted counters */
6026 if (CHIP_NUM(bp
) == CHIP_NUM_5708
&& bp
->stats_ticks
)
6027 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
|
6028 BNX2_HC_COMMAND_STATS_NOW
);
6030 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
6031 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
6032 bnx2_5706_serdes_timer(bp
);
6034 bnx2_5708_serdes_timer(bp
);
6038 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6042 bnx2_request_irq(struct bnx2
*bp
)
6044 unsigned long flags
;
6045 struct bnx2_irq
*irq
;
6048 if (bp
->flags
& BNX2_FLAG_USING_MSI_OR_MSIX
)
6051 flags
= IRQF_SHARED
;
6053 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
6054 irq
= &bp
->irq_tbl
[i
];
6055 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
6065 bnx2_free_irq(struct bnx2
*bp
)
6067 struct bnx2_irq
*irq
;
6070 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
6071 irq
= &bp
->irq_tbl
[i
];
6073 free_irq(irq
->vector
, &bp
->bnx2_napi
[i
]);
6076 if (bp
->flags
& BNX2_FLAG_USING_MSI
)
6077 pci_disable_msi(bp
->pdev
);
6078 else if (bp
->flags
& BNX2_FLAG_USING_MSIX
)
6079 pci_disable_msix(bp
->pdev
);
6081 bp
->flags
&= ~(BNX2_FLAG_USING_MSI_OR_MSIX
| BNX2_FLAG_ONE_SHOT_MSI
);
6085 bnx2_enable_msix(struct bnx2
*bp
, int msix_vecs
)
6088 struct msix_entry msix_ent
[BNX2_MAX_MSIX_VEC
];
6089 struct net_device
*dev
= bp
->dev
;
6090 const int len
= sizeof(bp
->irq_tbl
[0].name
);
6092 bnx2_setup_msix_tbl(bp
);
6093 REG_WR(bp
, BNX2_PCI_MSIX_CONTROL
, BNX2_MAX_MSIX_HW_VEC
- 1);
6094 REG_WR(bp
, BNX2_PCI_MSIX_TBL_OFF_BIR
, BNX2_PCI_GRC_WINDOW2_BASE
);
6095 REG_WR(bp
, BNX2_PCI_MSIX_PBA_OFF_BIT
, BNX2_PCI_GRC_WINDOW3_BASE
);
6097 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++) {
6098 msix_ent
[i
].entry
= i
;
6099 msix_ent
[i
].vector
= 0;
6102 rc
= pci_enable_msix(bp
->pdev
, msix_ent
, BNX2_MAX_MSIX_VEC
);
6106 bp
->irq_nvecs
= msix_vecs
;
6107 bp
->flags
|= BNX2_FLAG_USING_MSIX
| BNX2_FLAG_ONE_SHOT_MSI
;
6108 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++) {
6109 bp
->irq_tbl
[i
].vector
= msix_ent
[i
].vector
;
6110 snprintf(bp
->irq_tbl
[i
].name
, len
, "%s-%d", dev
->name
, i
);
6111 bp
->irq_tbl
[i
].handler
= bnx2_msi_1shot
;
6116 bnx2_setup_int_mode(struct bnx2
*bp
, int dis_msi
)
6118 int cpus
= num_online_cpus();
6119 int msix_vecs
= min(cpus
+ 1, RX_MAX_RINGS
);
6121 bp
->irq_tbl
[0].handler
= bnx2_interrupt
;
6122 strcpy(bp
->irq_tbl
[0].name
, bp
->dev
->name
);
6124 bp
->irq_tbl
[0].vector
= bp
->pdev
->irq
;
6126 if ((bp
->flags
& BNX2_FLAG_MSIX_CAP
) && !dis_msi
&& cpus
> 1)
6127 bnx2_enable_msix(bp
, msix_vecs
);
6129 if ((bp
->flags
& BNX2_FLAG_MSI_CAP
) && !dis_msi
&&
6130 !(bp
->flags
& BNX2_FLAG_USING_MSIX
)) {
6131 if (pci_enable_msi(bp
->pdev
) == 0) {
6132 bp
->flags
|= BNX2_FLAG_USING_MSI
;
6133 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
6134 bp
->flags
|= BNX2_FLAG_ONE_SHOT_MSI
;
6135 bp
->irq_tbl
[0].handler
= bnx2_msi_1shot
;
6137 bp
->irq_tbl
[0].handler
= bnx2_msi
;
6139 bp
->irq_tbl
[0].vector
= bp
->pdev
->irq
;
6143 bp
->num_tx_rings
= rounddown_pow_of_two(bp
->irq_nvecs
);
6144 bp
->dev
->real_num_tx_queues
= bp
->num_tx_rings
;
6146 bp
->num_rx_rings
= bp
->irq_nvecs
;
6149 /* Called with rtnl_lock */
6151 bnx2_open(struct net_device
*dev
)
6153 struct bnx2
*bp
= netdev_priv(dev
);
6156 netif_carrier_off(dev
);
6158 bnx2_set_power_state(bp
, PCI_D0
);
6159 bnx2_disable_int(bp
);
6161 bnx2_setup_int_mode(bp
, disable_msi
);
6162 bnx2_napi_enable(bp
);
6163 rc
= bnx2_alloc_mem(bp
);
6167 rc
= bnx2_request_irq(bp
);
6171 rc
= bnx2_init_nic(bp
, 1);
6175 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6177 atomic_set(&bp
->intr_sem
, 0);
6179 bnx2_enable_int(bp
);
6181 if (bp
->flags
& BNX2_FLAG_USING_MSI
) {
6182 /* Test MSI to make sure it is working
6183 * If MSI test fails, go back to INTx mode
6185 if (bnx2_test_intr(bp
) != 0) {
6186 printk(KERN_WARNING PFX
"%s: No interrupt was generated"
6187 " using MSI, switching to INTx mode. Please"
6188 " report this failure to the PCI maintainer"
6189 " and include system chipset information.\n",
6192 bnx2_disable_int(bp
);
6195 bnx2_setup_int_mode(bp
, 1);
6197 rc
= bnx2_init_nic(bp
, 0);
6200 rc
= bnx2_request_irq(bp
);
6203 del_timer_sync(&bp
->timer
);
6206 bnx2_enable_int(bp
);
6209 if (bp
->flags
& BNX2_FLAG_USING_MSI
)
6210 printk(KERN_INFO PFX
"%s: using MSI\n", dev
->name
);
6211 else if (bp
->flags
& BNX2_FLAG_USING_MSIX
)
6212 printk(KERN_INFO PFX
"%s: using MSIX\n", dev
->name
);
6214 netif_tx_start_all_queues(dev
);
6219 bnx2_napi_disable(bp
);
6227 bnx2_reset_task(struct work_struct
*work
)
6229 struct bnx2
*bp
= container_of(work
, struct bnx2
, reset_task
);
6231 if (!netif_running(bp
->dev
))
6234 bnx2_netif_stop(bp
);
6236 bnx2_init_nic(bp
, 1);
6238 atomic_set(&bp
->intr_sem
, 1);
6239 bnx2_netif_start(bp
);
6243 bnx2_tx_timeout(struct net_device
*dev
)
6245 struct bnx2
*bp
= netdev_priv(dev
);
6247 /* This allows the netif to be shutdown gracefully before resetting */
6248 schedule_work(&bp
->reset_task
);
6252 /* Called with rtnl_lock */
6254 bnx2_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*vlgrp
)
6256 struct bnx2
*bp
= netdev_priv(dev
);
6258 bnx2_netif_stop(bp
);
6261 bnx2_set_rx_mode(dev
);
6262 if (bp
->flags
& BNX2_FLAG_CAN_KEEP_VLAN
)
6263 bnx2_fw_sync(bp
, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE
, 0, 1);
6265 bnx2_netif_start(bp
);
6269 /* Called with netif_tx_lock.
6270 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6271 * netif_wake_queue().
6274 bnx2_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6276 struct bnx2
*bp
= netdev_priv(dev
);
6279 struct sw_tx_bd
*tx_buf
;
6280 u32 len
, vlan_tag_flags
, last_frag
, mss
;
6281 u16 prod
, ring_prod
;
6283 struct bnx2_napi
*bnapi
;
6284 struct bnx2_tx_ring_info
*txr
;
6285 struct netdev_queue
*txq
;
6286 struct skb_shared_info
*sp
;
6288 /* Determine which tx ring we will be placed on */
6289 i
= skb_get_queue_mapping(skb
);
6290 bnapi
= &bp
->bnx2_napi
[i
];
6291 txr
= &bnapi
->tx_ring
;
6292 txq
= netdev_get_tx_queue(dev
, i
);
6294 if (unlikely(bnx2_tx_avail(bp
, txr
) <
6295 (skb_shinfo(skb
)->nr_frags
+ 1))) {
6296 netif_tx_stop_queue(txq
);
6297 printk(KERN_ERR PFX
"%s: BUG! Tx ring full when queue awake!\n",
6300 return NETDEV_TX_BUSY
;
6302 len
= skb_headlen(skb
);
6303 prod
= txr
->tx_prod
;
6304 ring_prod
= TX_RING_IDX(prod
);
6307 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
6308 vlan_tag_flags
|= TX_BD_FLAGS_TCP_UDP_CKSUM
;
6312 if (bp
->vlgrp
&& vlan_tx_tag_present(skb
)) {
6314 (TX_BD_FLAGS_VLAN_TAG
| (vlan_tx_tag_get(skb
) << 16));
6317 if ((mss
= skb_shinfo(skb
)->gso_size
)) {
6321 vlan_tag_flags
|= TX_BD_FLAGS_SW_LSO
;
6323 tcp_opt_len
= tcp_optlen(skb
);
6325 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
6326 u32 tcp_off
= skb_transport_offset(skb
) -
6327 sizeof(struct ipv6hdr
) - ETH_HLEN
;
6329 vlan_tag_flags
|= ((tcp_opt_len
>> 2) << 8) |
6330 TX_BD_FLAGS_SW_FLAGS
;
6331 if (likely(tcp_off
== 0))
6332 vlan_tag_flags
&= ~TX_BD_FLAGS_TCP6_OFF0_MSK
;
6335 vlan_tag_flags
|= ((tcp_off
& 0x3) <<
6336 TX_BD_FLAGS_TCP6_OFF0_SHL
) |
6337 ((tcp_off
& 0x10) <<
6338 TX_BD_FLAGS_TCP6_OFF4_SHL
);
6339 mss
|= (tcp_off
& 0xc) << TX_BD_TCP6_OFF2_SHL
;
6343 if (tcp_opt_len
|| (iph
->ihl
> 5)) {
6344 vlan_tag_flags
|= ((iph
->ihl
- 5) +
6345 (tcp_opt_len
>> 2)) << 8;
6351 if (skb_dma_map(&bp
->pdev
->dev
, skb
, DMA_TO_DEVICE
)) {
6353 return NETDEV_TX_OK
;
6356 sp
= skb_shinfo(skb
);
6357 mapping
= sp
->dma_head
;
6359 tx_buf
= &txr
->tx_buf_ring
[ring_prod
];
6362 txbd
= &txr
->tx_desc_ring
[ring_prod
];
6364 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
6365 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
6366 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
6367 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
| TX_BD_FLAGS_START
;
6369 last_frag
= skb_shinfo(skb
)->nr_frags
;
6370 tx_buf
->nr_frags
= last_frag
;
6371 tx_buf
->is_gso
= skb_is_gso(skb
);
6373 for (i
= 0; i
< last_frag
; i
++) {
6374 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6376 prod
= NEXT_TX_BD(prod
);
6377 ring_prod
= TX_RING_IDX(prod
);
6378 txbd
= &txr
->tx_desc_ring
[ring_prod
];
6381 mapping
= sp
->dma_maps
[i
];
6383 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
6384 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
6385 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
6386 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
;
6389 txbd
->tx_bd_vlan_tag_flags
|= TX_BD_FLAGS_END
;
6391 prod
= NEXT_TX_BD(prod
);
6392 txr
->tx_prod_bseq
+= skb
->len
;
6394 REG_WR16(bp
, txr
->tx_bidx_addr
, prod
);
6395 REG_WR(bp
, txr
->tx_bseq_addr
, txr
->tx_prod_bseq
);
6399 txr
->tx_prod
= prod
;
6401 if (unlikely(bnx2_tx_avail(bp
, txr
) <= MAX_SKB_FRAGS
)) {
6402 netif_tx_stop_queue(txq
);
6403 if (bnx2_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)
6404 netif_tx_wake_queue(txq
);
6407 return NETDEV_TX_OK
;
6410 /* Called with rtnl_lock */
6412 bnx2_close(struct net_device
*dev
)
6414 struct bnx2
*bp
= netdev_priv(dev
);
6416 cancel_work_sync(&bp
->reset_task
);
6418 bnx2_disable_int_sync(bp
);
6419 bnx2_napi_disable(bp
);
6420 del_timer_sync(&bp
->timer
);
6421 bnx2_shutdown_chip(bp
);
6426 netif_carrier_off(bp
->dev
);
6427 bnx2_set_power_state(bp
, PCI_D3hot
);
6431 #define GET_NET_STATS64(ctr) \
6432 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6433 (unsigned long) (ctr##_lo)
6435 #define GET_NET_STATS32(ctr) \
6438 #if (BITS_PER_LONG == 64)
6439 #define GET_NET_STATS GET_NET_STATS64
6441 #define GET_NET_STATS GET_NET_STATS32
6444 static struct net_device_stats
*
6445 bnx2_get_stats(struct net_device
*dev
)
6447 struct bnx2
*bp
= netdev_priv(dev
);
6448 struct statistics_block
*stats_blk
= bp
->stats_blk
;
6449 struct net_device_stats
*net_stats
= &dev
->stats
;
6451 if (bp
->stats_blk
== NULL
) {
6454 net_stats
->rx_packets
=
6455 GET_NET_STATS(stats_blk
->stat_IfHCInUcastPkts
) +
6456 GET_NET_STATS(stats_blk
->stat_IfHCInMulticastPkts
) +
6457 GET_NET_STATS(stats_blk
->stat_IfHCInBroadcastPkts
);
6459 net_stats
->tx_packets
=
6460 GET_NET_STATS(stats_blk
->stat_IfHCOutUcastPkts
) +
6461 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
) +
6462 GET_NET_STATS(stats_blk
->stat_IfHCOutBroadcastPkts
);
6464 net_stats
->rx_bytes
=
6465 GET_NET_STATS(stats_blk
->stat_IfHCInOctets
);
6467 net_stats
->tx_bytes
=
6468 GET_NET_STATS(stats_blk
->stat_IfHCOutOctets
);
6470 net_stats
->multicast
=
6471 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
);
6473 net_stats
->collisions
=
6474 (unsigned long) stats_blk
->stat_EtherStatsCollisions
;
6476 net_stats
->rx_length_errors
=
6477 (unsigned long) (stats_blk
->stat_EtherStatsUndersizePkts
+
6478 stats_blk
->stat_EtherStatsOverrsizePkts
);
6480 net_stats
->rx_over_errors
=
6481 (unsigned long) stats_blk
->stat_IfInMBUFDiscards
;
6483 net_stats
->rx_frame_errors
=
6484 (unsigned long) stats_blk
->stat_Dot3StatsAlignmentErrors
;
6486 net_stats
->rx_crc_errors
=
6487 (unsigned long) stats_blk
->stat_Dot3StatsFCSErrors
;
6489 net_stats
->rx_errors
= net_stats
->rx_length_errors
+
6490 net_stats
->rx_over_errors
+ net_stats
->rx_frame_errors
+
6491 net_stats
->rx_crc_errors
;
6493 net_stats
->tx_aborted_errors
=
6494 (unsigned long) (stats_blk
->stat_Dot3StatsExcessiveCollisions
+
6495 stats_blk
->stat_Dot3StatsLateCollisions
);
6497 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) ||
6498 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
6499 net_stats
->tx_carrier_errors
= 0;
6501 net_stats
->tx_carrier_errors
=
6503 stats_blk
->stat_Dot3StatsCarrierSenseErrors
;
6506 net_stats
->tx_errors
=
6508 stats_blk
->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6510 net_stats
->tx_aborted_errors
+
6511 net_stats
->tx_carrier_errors
;
6513 net_stats
->rx_missed_errors
=
6514 (unsigned long) (stats_blk
->stat_IfInMBUFDiscards
+
6515 stats_blk
->stat_FwRxDrop
);
6520 /* All ethtool functions called with rtnl_lock */
6523 bnx2_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6525 struct bnx2
*bp
= netdev_priv(dev
);
6526 int support_serdes
= 0, support_copper
= 0;
6528 cmd
->supported
= SUPPORTED_Autoneg
;
6529 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
6532 } else if (bp
->phy_port
== PORT_FIBRE
)
6537 if (support_serdes
) {
6538 cmd
->supported
|= SUPPORTED_1000baseT_Full
|
6540 if (bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
)
6541 cmd
->supported
|= SUPPORTED_2500baseX_Full
;
6544 if (support_copper
) {
6545 cmd
->supported
|= SUPPORTED_10baseT_Half
|
6546 SUPPORTED_10baseT_Full
|
6547 SUPPORTED_100baseT_Half
|
6548 SUPPORTED_100baseT_Full
|
6549 SUPPORTED_1000baseT_Full
|
6554 spin_lock_bh(&bp
->phy_lock
);
6555 cmd
->port
= bp
->phy_port
;
6556 cmd
->advertising
= bp
->advertising
;
6558 if (bp
->autoneg
& AUTONEG_SPEED
) {
6559 cmd
->autoneg
= AUTONEG_ENABLE
;
6562 cmd
->autoneg
= AUTONEG_DISABLE
;
6565 if (netif_carrier_ok(dev
)) {
6566 cmd
->speed
= bp
->line_speed
;
6567 cmd
->duplex
= bp
->duplex
;
6573 spin_unlock_bh(&bp
->phy_lock
);
6575 cmd
->transceiver
= XCVR_INTERNAL
;
6576 cmd
->phy_address
= bp
->phy_addr
;
6582 bnx2_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6584 struct bnx2
*bp
= netdev_priv(dev
);
6585 u8 autoneg
= bp
->autoneg
;
6586 u8 req_duplex
= bp
->req_duplex
;
6587 u16 req_line_speed
= bp
->req_line_speed
;
6588 u32 advertising
= bp
->advertising
;
6591 spin_lock_bh(&bp
->phy_lock
);
6593 if (cmd
->port
!= PORT_TP
&& cmd
->port
!= PORT_FIBRE
)
6594 goto err_out_unlock
;
6596 if (cmd
->port
!= bp
->phy_port
&&
6597 !(bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
))
6598 goto err_out_unlock
;
6600 /* If device is down, we can store the settings only if the user
6601 * is setting the currently active port.
6603 if (!netif_running(dev
) && cmd
->port
!= bp
->phy_port
)
6604 goto err_out_unlock
;
6606 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
6607 autoneg
|= AUTONEG_SPEED
;
6609 cmd
->advertising
&= ETHTOOL_ALL_COPPER_SPEED
;
6611 /* allow advertising 1 speed */
6612 if ((cmd
->advertising
== ADVERTISED_10baseT_Half
) ||
6613 (cmd
->advertising
== ADVERTISED_10baseT_Full
) ||
6614 (cmd
->advertising
== ADVERTISED_100baseT_Half
) ||
6615 (cmd
->advertising
== ADVERTISED_100baseT_Full
)) {
6617 if (cmd
->port
== PORT_FIBRE
)
6618 goto err_out_unlock
;
6620 advertising
= cmd
->advertising
;
6622 } else if (cmd
->advertising
== ADVERTISED_2500baseX_Full
) {
6623 if (!(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
) ||
6624 (cmd
->port
== PORT_TP
))
6625 goto err_out_unlock
;
6626 } else if (cmd
->advertising
== ADVERTISED_1000baseT_Full
)
6627 advertising
= cmd
->advertising
;
6628 else if (cmd
->advertising
== ADVERTISED_1000baseT_Half
)
6629 goto err_out_unlock
;
6631 if (cmd
->port
== PORT_FIBRE
)
6632 advertising
= ETHTOOL_ALL_FIBRE_SPEED
;
6634 advertising
= ETHTOOL_ALL_COPPER_SPEED
;
6636 advertising
|= ADVERTISED_Autoneg
;
6639 if (cmd
->port
== PORT_FIBRE
) {
6640 if ((cmd
->speed
!= SPEED_1000
&&
6641 cmd
->speed
!= SPEED_2500
) ||
6642 (cmd
->duplex
!= DUPLEX_FULL
))
6643 goto err_out_unlock
;
6645 if (cmd
->speed
== SPEED_2500
&&
6646 !(bp
->phy_flags
& BNX2_PHY_FLAG_2_5G_CAPABLE
))
6647 goto err_out_unlock
;
6649 else if (cmd
->speed
== SPEED_1000
|| cmd
->speed
== SPEED_2500
)
6650 goto err_out_unlock
;
6652 autoneg
&= ~AUTONEG_SPEED
;
6653 req_line_speed
= cmd
->speed
;
6654 req_duplex
= cmd
->duplex
;
6658 bp
->autoneg
= autoneg
;
6659 bp
->advertising
= advertising
;
6660 bp
->req_line_speed
= req_line_speed
;
6661 bp
->req_duplex
= req_duplex
;
6664 /* If device is down, the new settings will be picked up when it is
6667 if (netif_running(dev
))
6668 err
= bnx2_setup_phy(bp
, cmd
->port
);
6671 spin_unlock_bh(&bp
->phy_lock
);
6677 bnx2_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
6679 struct bnx2
*bp
= netdev_priv(dev
);
6681 strcpy(info
->driver
, DRV_MODULE_NAME
);
6682 strcpy(info
->version
, DRV_MODULE_VERSION
);
6683 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
6684 strcpy(info
->fw_version
, bp
->fw_version
);
6687 #define BNX2_REGDUMP_LEN (32 * 1024)
6690 bnx2_get_regs_len(struct net_device
*dev
)
6692 return BNX2_REGDUMP_LEN
;
6696 bnx2_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *_p
)
6698 u32
*p
= _p
, i
, offset
;
6700 struct bnx2
*bp
= netdev_priv(dev
);
6701 u32 reg_boundaries
[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6702 0x0800, 0x0880, 0x0c00, 0x0c10,
6703 0x0c30, 0x0d08, 0x1000, 0x101c,
6704 0x1040, 0x1048, 0x1080, 0x10a4,
6705 0x1400, 0x1490, 0x1498, 0x14f0,
6706 0x1500, 0x155c, 0x1580, 0x15dc,
6707 0x1600, 0x1658, 0x1680, 0x16d8,
6708 0x1800, 0x1820, 0x1840, 0x1854,
6709 0x1880, 0x1894, 0x1900, 0x1984,
6710 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6711 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6712 0x2000, 0x2030, 0x23c0, 0x2400,
6713 0x2800, 0x2820, 0x2830, 0x2850,
6714 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6715 0x3c00, 0x3c94, 0x4000, 0x4010,
6716 0x4080, 0x4090, 0x43c0, 0x4458,
6717 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6718 0x4fc0, 0x5010, 0x53c0, 0x5444,
6719 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6720 0x5fc0, 0x6000, 0x6400, 0x6428,
6721 0x6800, 0x6848, 0x684c, 0x6860,
6722 0x6888, 0x6910, 0x8000 };
6726 memset(p
, 0, BNX2_REGDUMP_LEN
);
6728 if (!netif_running(bp
->dev
))
6732 offset
= reg_boundaries
[0];
6734 while (offset
< BNX2_REGDUMP_LEN
) {
6735 *p
++ = REG_RD(bp
, offset
);
6737 if (offset
== reg_boundaries
[i
+ 1]) {
6738 offset
= reg_boundaries
[i
+ 2];
6739 p
= (u32
*) (orig_p
+ offset
);
6746 bnx2_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
6748 struct bnx2
*bp
= netdev_priv(dev
);
6750 if (bp
->flags
& BNX2_FLAG_NO_WOL
) {
6755 wol
->supported
= WAKE_MAGIC
;
6757 wol
->wolopts
= WAKE_MAGIC
;
6761 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
6765 bnx2_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
6767 struct bnx2
*bp
= netdev_priv(dev
);
6769 if (wol
->wolopts
& ~WAKE_MAGIC
)
6772 if (wol
->wolopts
& WAKE_MAGIC
) {
6773 if (bp
->flags
& BNX2_FLAG_NO_WOL
)
6785 bnx2_nway_reset(struct net_device
*dev
)
6787 struct bnx2
*bp
= netdev_priv(dev
);
6790 if (!netif_running(dev
))
6793 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
6797 spin_lock_bh(&bp
->phy_lock
);
6799 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
) {
6802 rc
= bnx2_setup_remote_phy(bp
, bp
->phy_port
);
6803 spin_unlock_bh(&bp
->phy_lock
);
6807 /* Force a link down visible on the other side */
6808 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
6809 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
6810 spin_unlock_bh(&bp
->phy_lock
);
6814 spin_lock_bh(&bp
->phy_lock
);
6816 bp
->current_interval
= BNX2_SERDES_AN_TIMEOUT
;
6817 bp
->serdes_an_pending
= 1;
6818 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6821 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
6822 bmcr
&= ~BMCR_LOOPBACK
;
6823 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
| BMCR_ANRESTART
| BMCR_ANENABLE
);
6825 spin_unlock_bh(&bp
->phy_lock
);
6831 bnx2_get_link(struct net_device
*dev
)
6833 struct bnx2
*bp
= netdev_priv(dev
);
6839 bnx2_get_eeprom_len(struct net_device
*dev
)
6841 struct bnx2
*bp
= netdev_priv(dev
);
6843 if (bp
->flash_info
== NULL
)
6846 return (int) bp
->flash_size
;
6850 bnx2_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
6853 struct bnx2
*bp
= netdev_priv(dev
);
6856 if (!netif_running(dev
))
6859 /* parameters already validated in ethtool_get_eeprom */
6861 rc
= bnx2_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
6867 bnx2_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
6870 struct bnx2
*bp
= netdev_priv(dev
);
6873 if (!netif_running(dev
))
6876 /* parameters already validated in ethtool_set_eeprom */
6878 rc
= bnx2_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
6884 bnx2_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
6886 struct bnx2
*bp
= netdev_priv(dev
);
6888 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
6890 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
6891 coal
->rx_max_coalesced_frames
= bp
->rx_quick_cons_trip
;
6892 coal
->rx_coalesce_usecs_irq
= bp
->rx_ticks_int
;
6893 coal
->rx_max_coalesced_frames_irq
= bp
->rx_quick_cons_trip_int
;
6895 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
6896 coal
->tx_max_coalesced_frames
= bp
->tx_quick_cons_trip
;
6897 coal
->tx_coalesce_usecs_irq
= bp
->tx_ticks_int
;
6898 coal
->tx_max_coalesced_frames_irq
= bp
->tx_quick_cons_trip_int
;
6900 coal
->stats_block_coalesce_usecs
= bp
->stats_ticks
;
6906 bnx2_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
6908 struct bnx2
*bp
= netdev_priv(dev
);
6910 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
6911 if (bp
->rx_ticks
> 0x3ff) bp
->rx_ticks
= 0x3ff;
6913 bp
->rx_quick_cons_trip
= (u16
) coal
->rx_max_coalesced_frames
;
6914 if (bp
->rx_quick_cons_trip
> 0xff) bp
->rx_quick_cons_trip
= 0xff;
6916 bp
->rx_ticks_int
= (u16
) coal
->rx_coalesce_usecs_irq
;
6917 if (bp
->rx_ticks_int
> 0x3ff) bp
->rx_ticks_int
= 0x3ff;
6919 bp
->rx_quick_cons_trip_int
= (u16
) coal
->rx_max_coalesced_frames_irq
;
6920 if (bp
->rx_quick_cons_trip_int
> 0xff)
6921 bp
->rx_quick_cons_trip_int
= 0xff;
6923 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
6924 if (bp
->tx_ticks
> 0x3ff) bp
->tx_ticks
= 0x3ff;
6926 bp
->tx_quick_cons_trip
= (u16
) coal
->tx_max_coalesced_frames
;
6927 if (bp
->tx_quick_cons_trip
> 0xff) bp
->tx_quick_cons_trip
= 0xff;
6929 bp
->tx_ticks_int
= (u16
) coal
->tx_coalesce_usecs_irq
;
6930 if (bp
->tx_ticks_int
> 0x3ff) bp
->tx_ticks_int
= 0x3ff;
6932 bp
->tx_quick_cons_trip_int
= (u16
) coal
->tx_max_coalesced_frames_irq
;
6933 if (bp
->tx_quick_cons_trip_int
> 0xff) bp
->tx_quick_cons_trip_int
=
6936 bp
->stats_ticks
= coal
->stats_block_coalesce_usecs
;
6937 if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
6938 if (bp
->stats_ticks
!= 0 && bp
->stats_ticks
!= USEC_PER_SEC
)
6939 bp
->stats_ticks
= USEC_PER_SEC
;
6941 if (bp
->stats_ticks
> BNX2_HC_STATS_TICKS_HC_STAT_TICKS
)
6942 bp
->stats_ticks
= BNX2_HC_STATS_TICKS_HC_STAT_TICKS
;
6943 bp
->stats_ticks
&= BNX2_HC_STATS_TICKS_HC_STAT_TICKS
;
6945 if (netif_running(bp
->dev
)) {
6946 bnx2_netif_stop(bp
);
6947 bnx2_init_nic(bp
, 0);
6948 bnx2_netif_start(bp
);
6955 bnx2_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
6957 struct bnx2
*bp
= netdev_priv(dev
);
6959 ering
->rx_max_pending
= MAX_TOTAL_RX_DESC_CNT
;
6960 ering
->rx_mini_max_pending
= 0;
6961 ering
->rx_jumbo_max_pending
= MAX_TOTAL_RX_PG_DESC_CNT
;
6963 ering
->rx_pending
= bp
->rx_ring_size
;
6964 ering
->rx_mini_pending
= 0;
6965 ering
->rx_jumbo_pending
= bp
->rx_pg_ring_size
;
6967 ering
->tx_max_pending
= MAX_TX_DESC_CNT
;
6968 ering
->tx_pending
= bp
->tx_ring_size
;
6972 bnx2_change_ring_size(struct bnx2
*bp
, u32 rx
, u32 tx
)
6974 if (netif_running(bp
->dev
)) {
6975 bnx2_netif_stop(bp
);
6976 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
6981 bnx2_set_rx_ring_size(bp
, rx
);
6982 bp
->tx_ring_size
= tx
;
6984 if (netif_running(bp
->dev
)) {
6987 rc
= bnx2_alloc_mem(bp
);
6990 bnx2_init_nic(bp
, 0);
6991 bnx2_netif_start(bp
);
6997 bnx2_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
6999 struct bnx2
*bp
= netdev_priv(dev
);
7002 if ((ering
->rx_pending
> MAX_TOTAL_RX_DESC_CNT
) ||
7003 (ering
->tx_pending
> MAX_TX_DESC_CNT
) ||
7004 (ering
->tx_pending
<= MAX_SKB_FRAGS
)) {
7008 rc
= bnx2_change_ring_size(bp
, ering
->rx_pending
, ering
->tx_pending
);
7013 bnx2_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7015 struct bnx2
*bp
= netdev_priv(dev
);
7017 epause
->autoneg
= ((bp
->autoneg
& AUTONEG_FLOW_CTRL
) != 0);
7018 epause
->rx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_RX
) != 0);
7019 epause
->tx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_TX
) != 0);
7023 bnx2_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7025 struct bnx2
*bp
= netdev_priv(dev
);
7027 bp
->req_flow_ctrl
= 0;
7028 if (epause
->rx_pause
)
7029 bp
->req_flow_ctrl
|= FLOW_CTRL_RX
;
7030 if (epause
->tx_pause
)
7031 bp
->req_flow_ctrl
|= FLOW_CTRL_TX
;
7033 if (epause
->autoneg
) {
7034 bp
->autoneg
|= AUTONEG_FLOW_CTRL
;
7037 bp
->autoneg
&= ~AUTONEG_FLOW_CTRL
;
7040 if (netif_running(dev
)) {
7041 spin_lock_bh(&bp
->phy_lock
);
7042 bnx2_setup_phy(bp
, bp
->phy_port
);
7043 spin_unlock_bh(&bp
->phy_lock
);
7050 bnx2_get_rx_csum(struct net_device
*dev
)
7052 struct bnx2
*bp
= netdev_priv(dev
);
7058 bnx2_set_rx_csum(struct net_device
*dev
, u32 data
)
7060 struct bnx2
*bp
= netdev_priv(dev
);
7067 bnx2_set_tso(struct net_device
*dev
, u32 data
)
7069 struct bnx2
*bp
= netdev_priv(dev
);
7072 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
7073 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
7074 dev
->features
|= NETIF_F_TSO6
;
7076 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
|
7081 #define BNX2_NUM_STATS 46
7084 char string
[ETH_GSTRING_LEN
];
7085 } bnx2_stats_str_arr
[BNX2_NUM_STATS
] = {
7087 { "rx_error_bytes" },
7089 { "tx_error_bytes" },
7090 { "rx_ucast_packets" },
7091 { "rx_mcast_packets" },
7092 { "rx_bcast_packets" },
7093 { "tx_ucast_packets" },
7094 { "tx_mcast_packets" },
7095 { "tx_bcast_packets" },
7096 { "tx_mac_errors" },
7097 { "tx_carrier_errors" },
7098 { "rx_crc_errors" },
7099 { "rx_align_errors" },
7100 { "tx_single_collisions" },
7101 { "tx_multi_collisions" },
7103 { "tx_excess_collisions" },
7104 { "tx_late_collisions" },
7105 { "tx_total_collisions" },
7108 { "rx_undersize_packets" },
7109 { "rx_oversize_packets" },
7110 { "rx_64_byte_packets" },
7111 { "rx_65_to_127_byte_packets" },
7112 { "rx_128_to_255_byte_packets" },
7113 { "rx_256_to_511_byte_packets" },
7114 { "rx_512_to_1023_byte_packets" },
7115 { "rx_1024_to_1522_byte_packets" },
7116 { "rx_1523_to_9022_byte_packets" },
7117 { "tx_64_byte_packets" },
7118 { "tx_65_to_127_byte_packets" },
7119 { "tx_128_to_255_byte_packets" },
7120 { "tx_256_to_511_byte_packets" },
7121 { "tx_512_to_1023_byte_packets" },
7122 { "tx_1024_to_1522_byte_packets" },
7123 { "tx_1523_to_9022_byte_packets" },
7124 { "rx_xon_frames" },
7125 { "rx_xoff_frames" },
7126 { "tx_xon_frames" },
7127 { "tx_xoff_frames" },
7128 { "rx_mac_ctrl_frames" },
7129 { "rx_filtered_packets" },
7131 { "rx_fw_discards" },
7134 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7136 static const unsigned long bnx2_stats_offset_arr
[BNX2_NUM_STATS
] = {
7137 STATS_OFFSET32(stat_IfHCInOctets_hi
),
7138 STATS_OFFSET32(stat_IfHCInBadOctets_hi
),
7139 STATS_OFFSET32(stat_IfHCOutOctets_hi
),
7140 STATS_OFFSET32(stat_IfHCOutBadOctets_hi
),
7141 STATS_OFFSET32(stat_IfHCInUcastPkts_hi
),
7142 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi
),
7143 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi
),
7144 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi
),
7145 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi
),
7146 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi
),
7147 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors
),
7148 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors
),
7149 STATS_OFFSET32(stat_Dot3StatsFCSErrors
),
7150 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors
),
7151 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames
),
7152 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames
),
7153 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions
),
7154 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions
),
7155 STATS_OFFSET32(stat_Dot3StatsLateCollisions
),
7156 STATS_OFFSET32(stat_EtherStatsCollisions
),
7157 STATS_OFFSET32(stat_EtherStatsFragments
),
7158 STATS_OFFSET32(stat_EtherStatsJabbers
),
7159 STATS_OFFSET32(stat_EtherStatsUndersizePkts
),
7160 STATS_OFFSET32(stat_EtherStatsOverrsizePkts
),
7161 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets
),
7162 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets
),
7163 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets
),
7164 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets
),
7165 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets
),
7166 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets
),
7167 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets
),
7168 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets
),
7169 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets
),
7170 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets
),
7171 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets
),
7172 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets
),
7173 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets
),
7174 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets
),
7175 STATS_OFFSET32(stat_XonPauseFramesReceived
),
7176 STATS_OFFSET32(stat_XoffPauseFramesReceived
),
7177 STATS_OFFSET32(stat_OutXonSent
),
7178 STATS_OFFSET32(stat_OutXoffSent
),
7179 STATS_OFFSET32(stat_MacControlFramesReceived
),
7180 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards
),
7181 STATS_OFFSET32(stat_IfInMBUFDiscards
),
7182 STATS_OFFSET32(stat_FwRxDrop
),
7185 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7186 * skipped because of errata.
7188 static u8 bnx2_5706_stats_len_arr
[BNX2_NUM_STATS
] = {
7189 8,0,8,8,8,8,8,8,8,8,
7190 4,0,4,4,4,4,4,4,4,4,
7191 4,4,4,4,4,4,4,4,4,4,
7192 4,4,4,4,4,4,4,4,4,4,
7196 static u8 bnx2_5708_stats_len_arr
[BNX2_NUM_STATS
] = {
7197 8,0,8,8,8,8,8,8,8,8,
7198 4,4,4,4,4,4,4,4,4,4,
7199 4,4,4,4,4,4,4,4,4,4,
7200 4,4,4,4,4,4,4,4,4,4,
7204 #define BNX2_NUM_TESTS 6
7207 char string
[ETH_GSTRING_LEN
];
7208 } bnx2_tests_str_arr
[BNX2_NUM_TESTS
] = {
7209 { "register_test (offline)" },
7210 { "memory_test (offline)" },
7211 { "loopback_test (offline)" },
7212 { "nvram_test (online)" },
7213 { "interrupt_test (online)" },
7214 { "link_test (online)" },
7218 bnx2_get_sset_count(struct net_device
*dev
, int sset
)
7222 return BNX2_NUM_TESTS
;
7224 return BNX2_NUM_STATS
;
7231 bnx2_self_test(struct net_device
*dev
, struct ethtool_test
*etest
, u64
*buf
)
7233 struct bnx2
*bp
= netdev_priv(dev
);
7235 bnx2_set_power_state(bp
, PCI_D0
);
7237 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_TESTS
);
7238 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
7241 bnx2_netif_stop(bp
);
7242 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_DIAG
);
7245 if (bnx2_test_registers(bp
) != 0) {
7247 etest
->flags
|= ETH_TEST_FL_FAILED
;
7249 if (bnx2_test_memory(bp
) != 0) {
7251 etest
->flags
|= ETH_TEST_FL_FAILED
;
7253 if ((buf
[2] = bnx2_test_loopback(bp
)) != 0)
7254 etest
->flags
|= ETH_TEST_FL_FAILED
;
7256 if (!netif_running(bp
->dev
))
7257 bnx2_shutdown_chip(bp
);
7259 bnx2_init_nic(bp
, 1);
7260 bnx2_netif_start(bp
);
7263 /* wait for link up */
7264 for (i
= 0; i
< 7; i
++) {
7267 msleep_interruptible(1000);
7271 if (bnx2_test_nvram(bp
) != 0) {
7273 etest
->flags
|= ETH_TEST_FL_FAILED
;
7275 if (bnx2_test_intr(bp
) != 0) {
7277 etest
->flags
|= ETH_TEST_FL_FAILED
;
7280 if (bnx2_test_link(bp
) != 0) {
7282 etest
->flags
|= ETH_TEST_FL_FAILED
;
7285 if (!netif_running(bp
->dev
))
7286 bnx2_set_power_state(bp
, PCI_D3hot
);
7290 bnx2_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
7292 switch (stringset
) {
7294 memcpy(buf
, bnx2_stats_str_arr
,
7295 sizeof(bnx2_stats_str_arr
));
7298 memcpy(buf
, bnx2_tests_str_arr
,
7299 sizeof(bnx2_tests_str_arr
));
7305 bnx2_get_ethtool_stats(struct net_device
*dev
,
7306 struct ethtool_stats
*stats
, u64
*buf
)
7308 struct bnx2
*bp
= netdev_priv(dev
);
7310 u32
*hw_stats
= (u32
*) bp
->stats_blk
;
7311 u8
*stats_len_arr
= NULL
;
7313 if (hw_stats
== NULL
) {
7314 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_STATS
);
7318 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
7319 (CHIP_ID(bp
) == CHIP_ID_5706_A1
) ||
7320 (CHIP_ID(bp
) == CHIP_ID_5706_A2
) ||
7321 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
7322 stats_len_arr
= bnx2_5706_stats_len_arr
;
7324 stats_len_arr
= bnx2_5708_stats_len_arr
;
7326 for (i
= 0; i
< BNX2_NUM_STATS
; i
++) {
7327 if (stats_len_arr
[i
] == 0) {
7328 /* skip this counter */
7332 if (stats_len_arr
[i
] == 4) {
7333 /* 4-byte counter */
7335 *(hw_stats
+ bnx2_stats_offset_arr
[i
]);
7338 /* 8-byte counter */
7339 buf
[i
] = (((u64
) *(hw_stats
+
7340 bnx2_stats_offset_arr
[i
])) << 32) +
7341 *(hw_stats
+ bnx2_stats_offset_arr
[i
] + 1);
7346 bnx2_phys_id(struct net_device
*dev
, u32 data
)
7348 struct bnx2
*bp
= netdev_priv(dev
);
7352 bnx2_set_power_state(bp
, PCI_D0
);
7357 save
= REG_RD(bp
, BNX2_MISC_CFG
);
7358 REG_WR(bp
, BNX2_MISC_CFG
, BNX2_MISC_CFG_LEDMODE_MAC
);
7360 for (i
= 0; i
< (data
* 2); i
++) {
7362 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
);
7365 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
|
7366 BNX2_EMAC_LED_1000MB_OVERRIDE
|
7367 BNX2_EMAC_LED_100MB_OVERRIDE
|
7368 BNX2_EMAC_LED_10MB_OVERRIDE
|
7369 BNX2_EMAC_LED_TRAFFIC_OVERRIDE
|
7370 BNX2_EMAC_LED_TRAFFIC
);
7372 msleep_interruptible(500);
7373 if (signal_pending(current
))
7376 REG_WR(bp
, BNX2_EMAC_LED
, 0);
7377 REG_WR(bp
, BNX2_MISC_CFG
, save
);
7379 if (!netif_running(dev
))
7380 bnx2_set_power_state(bp
, PCI_D3hot
);
7386 bnx2_set_tx_csum(struct net_device
*dev
, u32 data
)
7388 struct bnx2
*bp
= netdev_priv(dev
);
7390 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
7391 return (ethtool_op_set_tx_ipv6_csum(dev
, data
));
7393 return (ethtool_op_set_tx_csum(dev
, data
));
7396 static const struct ethtool_ops bnx2_ethtool_ops
= {
7397 .get_settings
= bnx2_get_settings
,
7398 .set_settings
= bnx2_set_settings
,
7399 .get_drvinfo
= bnx2_get_drvinfo
,
7400 .get_regs_len
= bnx2_get_regs_len
,
7401 .get_regs
= bnx2_get_regs
,
7402 .get_wol
= bnx2_get_wol
,
7403 .set_wol
= bnx2_set_wol
,
7404 .nway_reset
= bnx2_nway_reset
,
7405 .get_link
= bnx2_get_link
,
7406 .get_eeprom_len
= bnx2_get_eeprom_len
,
7407 .get_eeprom
= bnx2_get_eeprom
,
7408 .set_eeprom
= bnx2_set_eeprom
,
7409 .get_coalesce
= bnx2_get_coalesce
,
7410 .set_coalesce
= bnx2_set_coalesce
,
7411 .get_ringparam
= bnx2_get_ringparam
,
7412 .set_ringparam
= bnx2_set_ringparam
,
7413 .get_pauseparam
= bnx2_get_pauseparam
,
7414 .set_pauseparam
= bnx2_set_pauseparam
,
7415 .get_rx_csum
= bnx2_get_rx_csum
,
7416 .set_rx_csum
= bnx2_set_rx_csum
,
7417 .set_tx_csum
= bnx2_set_tx_csum
,
7418 .set_sg
= ethtool_op_set_sg
,
7419 .set_tso
= bnx2_set_tso
,
7420 .self_test
= bnx2_self_test
,
7421 .get_strings
= bnx2_get_strings
,
7422 .phys_id
= bnx2_phys_id
,
7423 .get_ethtool_stats
= bnx2_get_ethtool_stats
,
7424 .get_sset_count
= bnx2_get_sset_count
,
7427 /* Called with rtnl_lock */
7429 bnx2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
7431 struct mii_ioctl_data
*data
= if_mii(ifr
);
7432 struct bnx2
*bp
= netdev_priv(dev
);
7437 data
->phy_id
= bp
->phy_addr
;
7443 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
7446 if (!netif_running(dev
))
7449 spin_lock_bh(&bp
->phy_lock
);
7450 err
= bnx2_read_phy(bp
, data
->reg_num
& 0x1f, &mii_regval
);
7451 spin_unlock_bh(&bp
->phy_lock
);
7453 data
->val_out
= mii_regval
;
7459 if (!capable(CAP_NET_ADMIN
))
7462 if (bp
->phy_flags
& BNX2_PHY_FLAG_REMOTE_PHY_CAP
)
7465 if (!netif_running(dev
))
7468 spin_lock_bh(&bp
->phy_lock
);
7469 err
= bnx2_write_phy(bp
, data
->reg_num
& 0x1f, data
->val_in
);
7470 spin_unlock_bh(&bp
->phy_lock
);
7481 /* Called with rtnl_lock */
7483 bnx2_change_mac_addr(struct net_device
*dev
, void *p
)
7485 struct sockaddr
*addr
= p
;
7486 struct bnx2
*bp
= netdev_priv(dev
);
7488 if (!is_valid_ether_addr(addr
->sa_data
))
7491 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7492 if (netif_running(dev
))
7493 bnx2_set_mac_addr(bp
, bp
->dev
->dev_addr
, 0);
7498 /* Called with rtnl_lock */
7500 bnx2_change_mtu(struct net_device
*dev
, int new_mtu
)
7502 struct bnx2
*bp
= netdev_priv(dev
);
7504 if (((new_mtu
+ ETH_HLEN
) > MAX_ETHERNET_JUMBO_PACKET_SIZE
) ||
7505 ((new_mtu
+ ETH_HLEN
) < MIN_ETHERNET_PACKET_SIZE
))
7509 return (bnx2_change_ring_size(bp
, bp
->rx_ring_size
, bp
->tx_ring_size
));
7512 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7514 poll_bnx2(struct net_device
*dev
)
7516 struct bnx2
*bp
= netdev_priv(dev
);
7519 for (i
= 0; i
< bp
->irq_nvecs
; i
++) {
7520 disable_irq(bp
->irq_tbl
[i
].vector
);
7521 bnx2_interrupt(bp
->irq_tbl
[i
].vector
, &bp
->bnx2_napi
[i
]);
7522 enable_irq(bp
->irq_tbl
[i
].vector
);
7527 static void __devinit
7528 bnx2_get_5709_media(struct bnx2
*bp
)
7530 u32 val
= REG_RD(bp
, BNX2_MISC_DUAL_MEDIA_CTRL
);
7531 u32 bond_id
= val
& BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID
;
7534 if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C
)
7536 else if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S
) {
7537 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
7541 if (val
& BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE
)
7542 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL
) >> 21;
7544 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP
) >> 8;
7546 if (PCI_FUNC(bp
->pdev
->devfn
) == 0) {
7551 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
7559 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
7565 static void __devinit
7566 bnx2_get_pci_speed(struct bnx2
*bp
)
7570 reg
= REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
);
7571 if (reg
& BNX2_PCICFG_MISC_STATUS_PCIX_DET
) {
7574 bp
->flags
|= BNX2_FLAG_PCIX
;
7576 clkreg
= REG_RD(bp
, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS
);
7578 clkreg
&= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET
;
7580 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ
:
7581 bp
->bus_speed_mhz
= 133;
7584 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ
:
7585 bp
->bus_speed_mhz
= 100;
7588 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ
:
7589 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ
:
7590 bp
->bus_speed_mhz
= 66;
7593 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ
:
7594 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ
:
7595 bp
->bus_speed_mhz
= 50;
7598 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW
:
7599 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ
:
7600 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ
:
7601 bp
->bus_speed_mhz
= 33;
7606 if (reg
& BNX2_PCICFG_MISC_STATUS_M66EN
)
7607 bp
->bus_speed_mhz
= 66;
7609 bp
->bus_speed_mhz
= 33;
7612 if (reg
& BNX2_PCICFG_MISC_STATUS_32BIT_DET
)
7613 bp
->flags
|= BNX2_FLAG_PCI_32BIT
;
7617 static int __devinit
7618 bnx2_init_board(struct pci_dev
*pdev
, struct net_device
*dev
)
7621 unsigned long mem_len
;
7624 u64 dma_mask
, persist_dma_mask
;
7626 SET_NETDEV_DEV(dev
, &pdev
->dev
);
7627 bp
= netdev_priv(dev
);
7632 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7633 rc
= pci_enable_device(pdev
);
7635 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting.\n");
7639 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
7641 "Cannot find PCI device base address, aborting.\n");
7643 goto err_out_disable
;
7646 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
7648 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting.\n");
7649 goto err_out_disable
;
7652 pci_set_master(pdev
);
7653 pci_save_state(pdev
);
7655 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
7656 if (bp
->pm_cap
== 0) {
7658 "Cannot find power management capability, aborting.\n");
7660 goto err_out_release
;
7666 spin_lock_init(&bp
->phy_lock
);
7667 spin_lock_init(&bp
->indirect_lock
);
7669 mutex_init(&bp
->cnic_lock
);
7671 INIT_WORK(&bp
->reset_task
, bnx2_reset_task
);
7673 dev
->base_addr
= dev
->mem_start
= pci_resource_start(pdev
, 0);
7674 mem_len
= MB_GET_CID_ADDR(TX_TSS_CID
+ TX_MAX_TSS_RINGS
+ 1);
7675 dev
->mem_end
= dev
->mem_start
+ mem_len
;
7676 dev
->irq
= pdev
->irq
;
7678 bp
->regview
= ioremap_nocache(dev
->base_addr
, mem_len
);
7681 dev_err(&pdev
->dev
, "Cannot map register space, aborting.\n");
7683 goto err_out_release
;
7686 /* Configure byte swap and enable write to the reg_window registers.
7687 * Rely on CPU to do target byte swapping on big endian systems
7688 * The chip's target access swapping will not swap all accesses
7690 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
,
7691 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
7692 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
);
7694 bnx2_set_power_state(bp
, PCI_D0
);
7696 bp
->chip_id
= REG_RD(bp
, BNX2_MISC_ID
);
7698 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
7699 if (pci_find_capability(pdev
, PCI_CAP_ID_EXP
) == 0) {
7701 "Cannot find PCIE capability, aborting.\n");
7705 bp
->flags
|= BNX2_FLAG_PCIE
;
7706 if (CHIP_REV(bp
) == CHIP_REV_Ax
)
7707 bp
->flags
|= BNX2_FLAG_JUMBO_BROKEN
;
7709 bp
->pcix_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PCIX
);
7710 if (bp
->pcix_cap
== 0) {
7712 "Cannot find PCIX capability, aborting.\n");
7718 if (CHIP_NUM(bp
) == CHIP_NUM_5709
&& CHIP_REV(bp
) != CHIP_REV_Ax
) {
7719 if (pci_find_capability(pdev
, PCI_CAP_ID_MSIX
))
7720 bp
->flags
|= BNX2_FLAG_MSIX_CAP
;
7723 if (CHIP_ID(bp
) != CHIP_ID_5706_A0
&& CHIP_ID(bp
) != CHIP_ID_5706_A1
) {
7724 if (pci_find_capability(pdev
, PCI_CAP_ID_MSI
))
7725 bp
->flags
|= BNX2_FLAG_MSI_CAP
;
7728 /* 5708 cannot support DMA addresses > 40-bit. */
7729 if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
7730 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
7732 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
7734 /* Configure DMA attributes. */
7735 if (pci_set_dma_mask(pdev
, dma_mask
) == 0) {
7736 dev
->features
|= NETIF_F_HIGHDMA
;
7737 rc
= pci_set_consistent_dma_mask(pdev
, persist_dma_mask
);
7740 "pci_set_consistent_dma_mask failed, aborting.\n");
7743 } else if ((rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) != 0) {
7744 dev_err(&pdev
->dev
, "System does not support DMA, aborting.\n");
7748 if (!(bp
->flags
& BNX2_FLAG_PCIE
))
7749 bnx2_get_pci_speed(bp
);
7751 /* 5706A0 may falsely detect SERR and PERR. */
7752 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
7753 reg
= REG_RD(bp
, PCI_COMMAND
);
7754 reg
&= ~(PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
);
7755 REG_WR(bp
, PCI_COMMAND
, reg
);
7757 else if ((CHIP_ID(bp
) == CHIP_ID_5706_A1
) &&
7758 !(bp
->flags
& BNX2_FLAG_PCIX
)) {
7761 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7765 bnx2_init_nvram(bp
);
7767 reg
= bnx2_reg_rd_ind(bp
, BNX2_SHM_HDR_SIGNATURE
);
7769 if ((reg
& BNX2_SHM_HDR_SIGNATURE_SIG_MASK
) ==
7770 BNX2_SHM_HDR_SIGNATURE_SIG
) {
7771 u32 off
= PCI_FUNC(pdev
->devfn
) << 2;
7773 bp
->shmem_base
= bnx2_reg_rd_ind(bp
, BNX2_SHM_HDR_ADDR_0
+ off
);
7775 bp
->shmem_base
= HOST_VIEW_SHMEM_BASE
;
7777 /* Get the permanent MAC address. First we need to make sure the
7778 * firmware is actually running.
7780 reg
= bnx2_shmem_rd(bp
, BNX2_DEV_INFO_SIGNATURE
);
7782 if ((reg
& BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK
) !=
7783 BNX2_DEV_INFO_SIGNATURE_MAGIC
) {
7784 dev_err(&pdev
->dev
, "Firmware not running, aborting.\n");
7789 reg
= bnx2_shmem_rd(bp
, BNX2_DEV_INFO_BC_REV
);
7790 for (i
= 0, j
= 0; i
< 3; i
++) {
7793 num
= (u8
) (reg
>> (24 - (i
* 8)));
7794 for (k
= 100, skip0
= 1; k
>= 1; num
%= k
, k
/= 10) {
7795 if (num
>= k
|| !skip0
|| k
== 1) {
7796 bp
->fw_version
[j
++] = (num
/ k
) + '0';
7801 bp
->fw_version
[j
++] = '.';
7803 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_FEATURE
);
7804 if (reg
& BNX2_PORT_FEATURE_WOL_ENABLED
)
7807 if (reg
& BNX2_PORT_FEATURE_ASF_ENABLED
) {
7808 bp
->flags
|= BNX2_FLAG_ASF_ENABLE
;
7810 for (i
= 0; i
< 30; i
++) {
7811 reg
= bnx2_shmem_rd(bp
, BNX2_BC_STATE_CONDITION
);
7812 if (reg
& BNX2_CONDITION_MFW_RUN_MASK
)
7817 reg
= bnx2_shmem_rd(bp
, BNX2_BC_STATE_CONDITION
);
7818 reg
&= BNX2_CONDITION_MFW_RUN_MASK
;
7819 if (reg
!= BNX2_CONDITION_MFW_RUN_UNKNOWN
&&
7820 reg
!= BNX2_CONDITION_MFW_RUN_NONE
) {
7821 u32 addr
= bnx2_shmem_rd(bp
, BNX2_MFW_VER_PTR
);
7823 bp
->fw_version
[j
++] = ' ';
7824 for (i
= 0; i
< 3; i
++) {
7825 reg
= bnx2_reg_rd_ind(bp
, addr
+ i
* 4);
7827 memcpy(&bp
->fw_version
[j
], ®
, 4);
7832 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_MAC_UPPER
);
7833 bp
->mac_addr
[0] = (u8
) (reg
>> 8);
7834 bp
->mac_addr
[1] = (u8
) reg
;
7836 reg
= bnx2_shmem_rd(bp
, BNX2_PORT_HW_CFG_MAC_LOWER
);
7837 bp
->mac_addr
[2] = (u8
) (reg
>> 24);
7838 bp
->mac_addr
[3] = (u8
) (reg
>> 16);
7839 bp
->mac_addr
[4] = (u8
) (reg
>> 8);
7840 bp
->mac_addr
[5] = (u8
) reg
;
7842 bp
->tx_ring_size
= MAX_TX_DESC_CNT
;
7843 bnx2_set_rx_ring_size(bp
, 255);
7847 bp
->tx_quick_cons_trip_int
= 20;
7848 bp
->tx_quick_cons_trip
= 20;
7849 bp
->tx_ticks_int
= 80;
7852 bp
->rx_quick_cons_trip_int
= 6;
7853 bp
->rx_quick_cons_trip
= 6;
7854 bp
->rx_ticks_int
= 18;
7857 bp
->stats_ticks
= USEC_PER_SEC
& BNX2_HC_STATS_TICKS_HC_STAT_TICKS
;
7859 bp
->current_interval
= BNX2_TIMER_INTERVAL
;
7863 /* Disable WOL support if we are running on a SERDES chip. */
7864 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
7865 bnx2_get_5709_media(bp
);
7866 else if (CHIP_BOND_ID(bp
) & CHIP_BOND_ID_SERDES_BIT
)
7867 bp
->phy_flags
|= BNX2_PHY_FLAG_SERDES
;
7869 bp
->phy_port
= PORT_TP
;
7870 if (bp
->phy_flags
& BNX2_PHY_FLAG_SERDES
) {
7871 bp
->phy_port
= PORT_FIBRE
;
7872 reg
= bnx2_shmem_rd(bp
, BNX2_SHARED_HW_CFG_CONFIG
);
7873 if (!(reg
& BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX
)) {
7874 bp
->flags
|= BNX2_FLAG_NO_WOL
;
7877 if (CHIP_NUM(bp
) == CHIP_NUM_5706
) {
7878 /* Don't do parallel detect on this board because of
7879 * some board problems. The link will not go down
7880 * if we do parallel detect.
7882 if (pdev
->subsystem_vendor
== PCI_VENDOR_ID_HP
&&
7883 pdev
->subsystem_device
== 0x310c)
7884 bp
->phy_flags
|= BNX2_PHY_FLAG_NO_PARALLEL
;
7887 if (reg
& BNX2_SHARED_HW_CFG_PHY_2_5G
)
7888 bp
->phy_flags
|= BNX2_PHY_FLAG_2_5G_CAPABLE
;
7890 } else if (CHIP_NUM(bp
) == CHIP_NUM_5706
||
7891 CHIP_NUM(bp
) == CHIP_NUM_5708
)
7892 bp
->phy_flags
|= BNX2_PHY_FLAG_CRC_FIX
;
7893 else if (CHIP_NUM(bp
) == CHIP_NUM_5709
&&
7894 (CHIP_REV(bp
) == CHIP_REV_Ax
||
7895 CHIP_REV(bp
) == CHIP_REV_Bx
))
7896 bp
->phy_flags
|= BNX2_PHY_FLAG_DIS_EARLY_DAC
;
7898 bnx2_init_fw_cap(bp
);
7900 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
7901 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
7902 (CHIP_ID(bp
) == CHIP_ID_5708_B1
) ||
7903 !(REG_RD(bp
, BNX2_PCI_CONFIG_3
) & BNX2_PCI_CONFIG_3_VAUX_PRESET
)) {
7904 bp
->flags
|= BNX2_FLAG_NO_WOL
;
7908 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
7909 bp
->tx_quick_cons_trip_int
=
7910 bp
->tx_quick_cons_trip
;
7911 bp
->tx_ticks_int
= bp
->tx_ticks
;
7912 bp
->rx_quick_cons_trip_int
=
7913 bp
->rx_quick_cons_trip
;
7914 bp
->rx_ticks_int
= bp
->rx_ticks
;
7915 bp
->comp_prod_trip_int
= bp
->comp_prod_trip
;
7916 bp
->com_ticks_int
= bp
->com_ticks
;
7917 bp
->cmd_ticks_int
= bp
->cmd_ticks
;
7920 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7922 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7923 * with byte enables disabled on the unused 32-bit word. This is legal
7924 * but causes problems on the AMD 8132 which will eventually stop
7925 * responding after a while.
7927 * AMD believes this incompatibility is unique to the 5706, and
7928 * prefers to locally disable MSI rather than globally disabling it.
7930 if (CHIP_NUM(bp
) == CHIP_NUM_5706
&& disable_msi
== 0) {
7931 struct pci_dev
*amd_8132
= NULL
;
7933 while ((amd_8132
= pci_get_device(PCI_VENDOR_ID_AMD
,
7934 PCI_DEVICE_ID_AMD_8132_BRIDGE
,
7937 if (amd_8132
->revision
>= 0x10 &&
7938 amd_8132
->revision
<= 0x13) {
7940 pci_dev_put(amd_8132
);
7946 bnx2_set_default_link(bp
);
7947 bp
->req_flow_ctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
7949 init_timer(&bp
->timer
);
7950 bp
->timer
.expires
= RUN_AT(BNX2_TIMER_INTERVAL
);
7951 bp
->timer
.data
= (unsigned long) bp
;
7952 bp
->timer
.function
= bnx2_timer
;
7958 iounmap(bp
->regview
);
7963 pci_release_regions(pdev
);
7966 pci_disable_device(pdev
);
7967 pci_set_drvdata(pdev
, NULL
);
7973 static char * __devinit
7974 bnx2_bus_string(struct bnx2
*bp
, char *str
)
7978 if (bp
->flags
& BNX2_FLAG_PCIE
) {
7979 s
+= sprintf(s
, "PCI Express");
7981 s
+= sprintf(s
, "PCI");
7982 if (bp
->flags
& BNX2_FLAG_PCIX
)
7983 s
+= sprintf(s
, "-X");
7984 if (bp
->flags
& BNX2_FLAG_PCI_32BIT
)
7985 s
+= sprintf(s
, " 32-bit");
7987 s
+= sprintf(s
, " 64-bit");
7988 s
+= sprintf(s
, " %dMHz", bp
->bus_speed_mhz
);
7993 static void __devinit
7994 bnx2_init_napi(struct bnx2
*bp
)
7998 for (i
= 0; i
< BNX2_MAX_MSIX_VEC
; i
++) {
7999 struct bnx2_napi
*bnapi
= &bp
->bnx2_napi
[i
];
8000 int (*poll
)(struct napi_struct
*, int);
8005 poll
= bnx2_poll_msix
;
8007 netif_napi_add(bp
->dev
, &bp
->bnx2_napi
[i
].napi
, poll
, 64);
8012 static const struct net_device_ops bnx2_netdev_ops
= {
8013 .ndo_open
= bnx2_open
,
8014 .ndo_start_xmit
= bnx2_start_xmit
,
8015 .ndo_stop
= bnx2_close
,
8016 .ndo_get_stats
= bnx2_get_stats
,
8017 .ndo_set_rx_mode
= bnx2_set_rx_mode
,
8018 .ndo_do_ioctl
= bnx2_ioctl
,
8019 .ndo_validate_addr
= eth_validate_addr
,
8020 .ndo_set_mac_address
= bnx2_change_mac_addr
,
8021 .ndo_change_mtu
= bnx2_change_mtu
,
8022 .ndo_tx_timeout
= bnx2_tx_timeout
,
8024 .ndo_vlan_rx_register
= bnx2_vlan_rx_register
,
8026 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8027 .ndo_poll_controller
= poll_bnx2
,
8031 static int __devinit
8032 bnx2_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
8034 static int version_printed
= 0;
8035 struct net_device
*dev
= NULL
;
8040 if (version_printed
++ == 0)
8041 printk(KERN_INFO
"%s", version
);
8043 /* dev zeroed in init_etherdev */
8044 dev
= alloc_etherdev_mq(sizeof(*bp
), TX_MAX_RINGS
);
8049 rc
= bnx2_init_board(pdev
, dev
);
8055 dev
->netdev_ops
= &bnx2_netdev_ops
;
8056 dev
->watchdog_timeo
= TX_TIMEOUT
;
8057 dev
->ethtool_ops
= &bnx2_ethtool_ops
;
8059 bp
= netdev_priv(dev
);
8062 pci_set_drvdata(pdev
, dev
);
8064 rc
= bnx2_request_firmware(bp
);
8068 memcpy(dev
->dev_addr
, bp
->mac_addr
, 6);
8069 memcpy(dev
->perm_addr
, bp
->mac_addr
, 6);
8071 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
8072 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
8073 dev
->features
|= NETIF_F_IPV6_CSUM
;
8076 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
8078 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
8079 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
8080 dev
->features
|= NETIF_F_TSO6
;
8082 if ((rc
= register_netdev(dev
))) {
8083 dev_err(&pdev
->dev
, "Cannot register net device\n");
8087 printk(KERN_INFO
"%s: %s (%c%d) %s found at mem %lx, "
8088 "IRQ %d, node addr %pM\n",
8090 board_info
[ent
->driver_data
].name
,
8091 ((CHIP_ID(bp
) & 0xf000) >> 12) + 'A',
8092 ((CHIP_ID(bp
) & 0x0ff0) >> 4),
8093 bnx2_bus_string(bp
, str
),
8095 bp
->pdev
->irq
, dev
->dev_addr
);
8100 if (bp
->mips_firmware
)
8101 release_firmware(bp
->mips_firmware
);
8102 if (bp
->rv2p_firmware
)
8103 release_firmware(bp
->rv2p_firmware
);
8106 iounmap(bp
->regview
);
8107 pci_release_regions(pdev
);
8108 pci_disable_device(pdev
);
8109 pci_set_drvdata(pdev
, NULL
);
8114 static void __devexit
8115 bnx2_remove_one(struct pci_dev
*pdev
)
8117 struct net_device
*dev
= pci_get_drvdata(pdev
);
8118 struct bnx2
*bp
= netdev_priv(dev
);
8120 flush_scheduled_work();
8122 unregister_netdev(dev
);
8124 if (bp
->mips_firmware
)
8125 release_firmware(bp
->mips_firmware
);
8126 if (bp
->rv2p_firmware
)
8127 release_firmware(bp
->rv2p_firmware
);
8130 iounmap(bp
->regview
);
8133 pci_release_regions(pdev
);
8134 pci_disable_device(pdev
);
8135 pci_set_drvdata(pdev
, NULL
);
8139 bnx2_suspend(struct pci_dev
*pdev
, pm_message_t state
)
8141 struct net_device
*dev
= pci_get_drvdata(pdev
);
8142 struct bnx2
*bp
= netdev_priv(dev
);
8144 /* PCI register 4 needs to be saved whether netif_running() or not.
8145 * MSI address and data need to be saved if using MSI and
8148 pci_save_state(pdev
);
8149 if (!netif_running(dev
))
8152 flush_scheduled_work();
8153 bnx2_netif_stop(bp
);
8154 netif_device_detach(dev
);
8155 del_timer_sync(&bp
->timer
);
8156 bnx2_shutdown_chip(bp
);
8158 bnx2_set_power_state(bp
, pci_choose_state(pdev
, state
));
8163 bnx2_resume(struct pci_dev
*pdev
)
8165 struct net_device
*dev
= pci_get_drvdata(pdev
);
8166 struct bnx2
*bp
= netdev_priv(dev
);
8168 pci_restore_state(pdev
);
8169 if (!netif_running(dev
))
8172 bnx2_set_power_state(bp
, PCI_D0
);
8173 netif_device_attach(dev
);
8174 bnx2_init_nic(bp
, 1);
8175 bnx2_netif_start(bp
);
8180 * bnx2_io_error_detected - called when PCI error is detected
8181 * @pdev: Pointer to PCI device
8182 * @state: The current pci connection state
8184 * This function is called after a PCI bus error affecting
8185 * this device has been detected.
8187 static pci_ers_result_t
bnx2_io_error_detected(struct pci_dev
*pdev
,
8188 pci_channel_state_t state
)
8190 struct net_device
*dev
= pci_get_drvdata(pdev
);
8191 struct bnx2
*bp
= netdev_priv(dev
);
8194 netif_device_detach(dev
);
8196 if (netif_running(dev
)) {
8197 bnx2_netif_stop(bp
);
8198 del_timer_sync(&bp
->timer
);
8199 bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
);
8202 pci_disable_device(pdev
);
8205 /* Request a slot slot reset. */
8206 return PCI_ERS_RESULT_NEED_RESET
;
8210 * bnx2_io_slot_reset - called after the pci bus has been reset.
8211 * @pdev: Pointer to PCI device
8213 * Restart the card from scratch, as if from a cold-boot.
8215 static pci_ers_result_t
bnx2_io_slot_reset(struct pci_dev
*pdev
)
8217 struct net_device
*dev
= pci_get_drvdata(pdev
);
8218 struct bnx2
*bp
= netdev_priv(dev
);
8221 if (pci_enable_device(pdev
)) {
8223 "Cannot re-enable PCI device after reset.\n");
8225 return PCI_ERS_RESULT_DISCONNECT
;
8227 pci_set_master(pdev
);
8228 pci_restore_state(pdev
);
8230 if (netif_running(dev
)) {
8231 bnx2_set_power_state(bp
, PCI_D0
);
8232 bnx2_init_nic(bp
, 1);
8236 return PCI_ERS_RESULT_RECOVERED
;
8240 * bnx2_io_resume - called when traffic can start flowing again.
8241 * @pdev: Pointer to PCI device
8243 * This callback is called when the error recovery driver tells us that
8244 * its OK to resume normal operation.
8246 static void bnx2_io_resume(struct pci_dev
*pdev
)
8248 struct net_device
*dev
= pci_get_drvdata(pdev
);
8249 struct bnx2
*bp
= netdev_priv(dev
);
8252 if (netif_running(dev
))
8253 bnx2_netif_start(bp
);
8255 netif_device_attach(dev
);
8259 static struct pci_error_handlers bnx2_err_handler
= {
8260 .error_detected
= bnx2_io_error_detected
,
8261 .slot_reset
= bnx2_io_slot_reset
,
8262 .resume
= bnx2_io_resume
,
8265 static struct pci_driver bnx2_pci_driver
= {
8266 .name
= DRV_MODULE_NAME
,
8267 .id_table
= bnx2_pci_tbl
,
8268 .probe
= bnx2_init_one
,
8269 .remove
= __devexit_p(bnx2_remove_one
),
8270 .suspend
= bnx2_suspend
,
8271 .resume
= bnx2_resume
,
8272 .err_handler
= &bnx2_err_handler
,
8275 static int __init
bnx2_init(void)
8277 return pci_register_driver(&bnx2_pci_driver
);
8280 static void __exit
bnx2_cleanup(void)
8282 pci_unregister_driver(&bnx2_pci_driver
);
8285 module_init(bnx2_init
);
8286 module_exit(bnx2_cleanup
);