2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
50 #include <asm/system.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 #define DRV_MODULE_NAME "tg3"
67 #define TG3_MIN_NUM 117
68 #define DRV_MODULE_VERSION \
69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
70 #define DRV_MODULE_RELDATE "January 25, 2011"
72 #define TG3_DEF_MAC_MODE 0
73 #define TG3_DEF_RX_MODE 0
74 #define TG3_DEF_TX_MODE 0
75 #define TG3_DEF_MSG_ENABLE \
85 /* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
88 #define TG3_TX_TIMEOUT (5 * HZ)
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU 60
92 #define TG3_MAX_MTU(tp) \
93 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
99 #define TG3_RX_STD_RING_SIZE(tp) \
100 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
101 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JMB_RING_SIZE(tp) \
104 ((tp->tg3_flags3 & TG3_FLG3_LRG_PROD_RING_CAP) ? \
105 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
106 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
107 #define TG3_RSS_INDIR_TBL_SIZE 128
109 /* Do not place this n-ring entries value into the tp struct itself,
110 * we really want to expose these constants to GCC so that modulo et
111 * al. operations are done with shifts and masks instead of with
112 * hw multiply/modulo instructions. Another solution would be to
113 * replace things like '% foo' with '& (foo - 1)'.
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_STD_RING_BYTES(tp) \
120 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
121 #define TG3_RX_JMB_RING_BYTES(tp) \
122 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
123 #define TG3_RX_RCB_RING_BYTES(tp) \
124 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define TG3_DMA_BYTE_ENAB 64
131 #define TG3_RX_STD_DMA_SZ 1536
132 #define TG3_RX_JMB_DMA_SZ 9046
134 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
136 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
137 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
140 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
142 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
143 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
145 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
146 * that are at least dword aligned when used in PCIX mode. The driver
147 * works around this bug by double copying the packet. This workaround
148 * is built into the normal double copy length check for efficiency.
150 * However, the double copy is only necessary on those architectures
151 * where unaligned memory accesses are inefficient. For those architectures
152 * where unaligned memory accesses incur little penalty, we can reintegrate
153 * the 5701 in the normal rx path. Doing so saves a device structure
154 * dereference by hardcoding the double copy threshold in place.
156 #define TG3_RX_COPY_THRESHOLD 256
157 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
158 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
160 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
163 /* minimum number of free TX descriptors required to wake up TX process */
164 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
166 #define TG3_RAW_IP_ALIGN 2
168 /* number of ETHTOOL_GSTATS u64's */
169 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
171 #define TG3_NUM_TEST 6
173 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
175 #define FIRMWARE_TG3 "tigon/tg3.bin"
176 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
177 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
179 static char version
[] __devinitdata
=
180 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
182 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
183 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
184 MODULE_LICENSE("GPL");
185 MODULE_VERSION(DRV_MODULE_VERSION
);
186 MODULE_FIRMWARE(FIRMWARE_TG3
);
187 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
188 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
190 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
191 module_param(tg3_debug
, int, 0);
192 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
194 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
277 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
279 static const struct {
280 const char string
[ETH_GSTRING_LEN
];
281 } ethtool_stats_keys
[TG3_NUM_STATS
] = {
284 { "rx_ucast_packets" },
285 { "rx_mcast_packets" },
286 { "rx_bcast_packets" },
288 { "rx_align_errors" },
289 { "rx_xon_pause_rcvd" },
290 { "rx_xoff_pause_rcvd" },
291 { "rx_mac_ctrl_rcvd" },
292 { "rx_xoff_entered" },
293 { "rx_frame_too_long_errors" },
295 { "rx_undersize_packets" },
296 { "rx_in_length_errors" },
297 { "rx_out_length_errors" },
298 { "rx_64_or_less_octet_packets" },
299 { "rx_65_to_127_octet_packets" },
300 { "rx_128_to_255_octet_packets" },
301 { "rx_256_to_511_octet_packets" },
302 { "rx_512_to_1023_octet_packets" },
303 { "rx_1024_to_1522_octet_packets" },
304 { "rx_1523_to_2047_octet_packets" },
305 { "rx_2048_to_4095_octet_packets" },
306 { "rx_4096_to_8191_octet_packets" },
307 { "rx_8192_to_9022_octet_packets" },
314 { "tx_flow_control" },
316 { "tx_single_collisions" },
317 { "tx_mult_collisions" },
319 { "tx_excessive_collisions" },
320 { "tx_late_collisions" },
321 { "tx_collide_2times" },
322 { "tx_collide_3times" },
323 { "tx_collide_4times" },
324 { "tx_collide_5times" },
325 { "tx_collide_6times" },
326 { "tx_collide_7times" },
327 { "tx_collide_8times" },
328 { "tx_collide_9times" },
329 { "tx_collide_10times" },
330 { "tx_collide_11times" },
331 { "tx_collide_12times" },
332 { "tx_collide_13times" },
333 { "tx_collide_14times" },
334 { "tx_collide_15times" },
335 { "tx_ucast_packets" },
336 { "tx_mcast_packets" },
337 { "tx_bcast_packets" },
338 { "tx_carrier_sense_errors" },
342 { "dma_writeq_full" },
343 { "dma_write_prioq_full" },
347 { "rx_threshold_hit" },
349 { "dma_readq_full" },
350 { "dma_read_prioq_full" },
351 { "tx_comp_queue_full" },
353 { "ring_set_send_prod_index" },
354 { "ring_status_update" },
356 { "nic_avoided_irqs" },
357 { "nic_tx_threshold_hit" }
360 static const struct {
361 const char string
[ETH_GSTRING_LEN
];
362 } ethtool_test_keys
[TG3_NUM_TEST
] = {
363 { "nvram test (online) " },
364 { "link test (online) " },
365 { "register test (offline)" },
366 { "memory test (offline)" },
367 { "loopback test (offline)" },
368 { "interrupt test (offline)" },
371 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
373 writel(val
, tp
->regs
+ off
);
376 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
378 return readl(tp
->regs
+ off
);
381 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
383 writel(val
, tp
->aperegs
+ off
);
386 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
388 return readl(tp
->aperegs
+ off
);
391 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
395 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
396 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
397 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
398 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
401 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
403 writel(val
, tp
->regs
+ off
);
404 readl(tp
->regs
+ off
);
407 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
412 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
413 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
414 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
415 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
419 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
423 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
424 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
425 TG3_64BIT_REG_LOW
, val
);
428 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
429 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
430 TG3_64BIT_REG_LOW
, val
);
434 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
435 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
436 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
437 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
439 /* In indirect mode when disabling interrupts, we also need
440 * to clear the interrupt bit in the GRC local ctrl register.
442 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
444 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
445 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
449 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
454 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
455 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
456 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
457 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
461 /* usec_wait specifies the wait time in usec when writing to certain registers
462 * where it is unsafe to read back the register without some delay.
463 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
464 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
466 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
468 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) ||
469 (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
470 /* Non-posted methods */
471 tp
->write32(tp
, off
, val
);
474 tg3_write32(tp
, off
, val
);
479 /* Wait again after the read for the posted method to guarantee that
480 * the wait time is met.
486 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
488 tp
->write32_mbox(tp
, off
, val
);
489 if (!(tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
) &&
490 !(tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
491 tp
->read32_mbox(tp
, off
);
494 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
496 void __iomem
*mbox
= tp
->regs
+ off
;
498 if (tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
)
500 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
504 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
506 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
509 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
511 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
514 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
515 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
516 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
517 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
518 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
520 #define tw32(reg, val) tp->write32(tp, reg, val)
521 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
522 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
523 #define tr32(reg) tp->read32(tp, reg)
525 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
529 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) &&
530 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
533 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
534 if (tp
->tg3_flags
& TG3_FLAG_SRAM_USE_CONFIG
) {
535 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
536 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
538 /* Always leave this as zero. */
539 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
541 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
542 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
544 /* Always leave this as zero. */
545 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
547 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
550 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
554 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) &&
555 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
560 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
561 if (tp
->tg3_flags
& TG3_FLAG_SRAM_USE_CONFIG
) {
562 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
563 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
565 /* Always leave this as zero. */
566 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
568 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
569 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
571 /* Always leave this as zero. */
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
574 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
577 static void tg3_ape_lock_init(struct tg3
*tp
)
582 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
583 regbase
= TG3_APE_LOCK_GRANT
;
585 regbase
= TG3_APE_PER_LOCK_GRANT
;
587 /* Make sure the driver hasn't any stale locks. */
588 for (i
= 0; i
< 8; i
++)
589 tg3_ape_write32(tp
, regbase
+ 4 * i
, APE_LOCK_GRANT_DRIVER
);
592 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
596 u32 status
, req
, gnt
;
598 if (!(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
))
602 case TG3_APE_LOCK_GRC
:
603 case TG3_APE_LOCK_MEM
:
609 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
610 req
= TG3_APE_LOCK_REQ
;
611 gnt
= TG3_APE_LOCK_GRANT
;
613 req
= TG3_APE_PER_LOCK_REQ
;
614 gnt
= TG3_APE_PER_LOCK_GRANT
;
619 tg3_ape_write32(tp
, req
+ off
, APE_LOCK_REQ_DRIVER
);
621 /* Wait for up to 1 millisecond to acquire lock. */
622 for (i
= 0; i
< 100; i
++) {
623 status
= tg3_ape_read32(tp
, gnt
+ off
);
624 if (status
== APE_LOCK_GRANT_DRIVER
)
629 if (status
!= APE_LOCK_GRANT_DRIVER
) {
630 /* Revoke the lock request. */
631 tg3_ape_write32(tp
, gnt
+ off
,
632 APE_LOCK_GRANT_DRIVER
);
640 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
644 if (!(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
))
648 case TG3_APE_LOCK_GRC
:
649 case TG3_APE_LOCK_MEM
:
655 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
656 gnt
= TG3_APE_LOCK_GRANT
;
658 gnt
= TG3_APE_PER_LOCK_GRANT
;
660 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, APE_LOCK_GRANT_DRIVER
);
663 static void tg3_disable_ints(struct tg3
*tp
)
667 tw32(TG3PCI_MISC_HOST_CTRL
,
668 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
669 for (i
= 0; i
< tp
->irq_max
; i
++)
670 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
673 static void tg3_enable_ints(struct tg3
*tp
)
680 tw32(TG3PCI_MISC_HOST_CTRL
,
681 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
683 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
684 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
685 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
687 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
688 if (tp
->tg3_flags2
& TG3_FLG2_1SHOT_MSI
)
689 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
691 tp
->coal_now
|= tnapi
->coal_now
;
694 /* Force an initial interrupt */
695 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
696 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
697 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
699 tw32(HOSTCC_MODE
, tp
->coal_now
);
701 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
704 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
706 struct tg3
*tp
= tnapi
->tp
;
707 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
708 unsigned int work_exists
= 0;
710 /* check for phy events */
711 if (!(tp
->tg3_flags
&
712 (TG3_FLAG_USE_LINKCHG_REG
|
713 TG3_FLAG_POLL_SERDES
))) {
714 if (sblk
->status
& SD_STATUS_LINK_CHG
)
717 /* check for RX/TX work to do */
718 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
||
719 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
726 * similar to tg3_enable_ints, but it accurately determines whether there
727 * is new work pending and can return without flushing the PIO write
728 * which reenables interrupts
730 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
732 struct tg3
*tp
= tnapi
->tp
;
734 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
737 /* When doing tagged status, this work check is unnecessary.
738 * The last_tag we write above tells the chip which piece of
739 * work we've completed.
741 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
743 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
744 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
747 static void tg3_switch_clocks(struct tg3
*tp
)
752 if ((tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
) ||
753 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
756 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
758 orig_clock_ctrl
= clock_ctrl
;
759 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
760 CLOCK_CTRL_CLKRUN_OENABLE
|
762 tp
->pci_clock_ctrl
= clock_ctrl
;
764 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
765 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
766 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
767 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
769 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
770 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
772 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
774 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
775 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
778 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
781 #define PHY_BUSY_LOOPS 5000
783 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
789 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
791 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
797 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
798 MI_COM_PHY_ADDR_MASK
);
799 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
800 MI_COM_REG_ADDR_MASK
);
801 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
803 tw32_f(MAC_MI_COM
, frame_val
);
805 loops
= PHY_BUSY_LOOPS
;
808 frame_val
= tr32(MAC_MI_COM
);
810 if ((frame_val
& MI_COM_BUSY
) == 0) {
812 frame_val
= tr32(MAC_MI_COM
);
820 *val
= frame_val
& MI_COM_DATA_MASK
;
824 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
825 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
832 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
838 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
839 (reg
== MII_TG3_CTRL
|| reg
== MII_TG3_AUX_CTRL
))
842 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
844 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
848 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
849 MI_COM_PHY_ADDR_MASK
);
850 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
851 MI_COM_REG_ADDR_MASK
);
852 frame_val
|= (val
& MI_COM_DATA_MASK
);
853 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
855 tw32_f(MAC_MI_COM
, frame_val
);
857 loops
= PHY_BUSY_LOOPS
;
860 frame_val
= tr32(MAC_MI_COM
);
861 if ((frame_val
& MI_COM_BUSY
) == 0) {
863 frame_val
= tr32(MAC_MI_COM
);
873 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
874 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
881 static int tg3_bmcr_reset(struct tg3
*tp
)
886 /* OK, reset it, and poll the BMCR_RESET bit until it
887 * clears or we time out.
889 phy_control
= BMCR_RESET
;
890 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
896 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
900 if ((phy_control
& BMCR_RESET
) == 0) {
912 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
914 struct tg3
*tp
= bp
->priv
;
917 spin_lock_bh(&tp
->lock
);
919 if (tg3_readphy(tp
, reg
, &val
))
922 spin_unlock_bh(&tp
->lock
);
927 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
929 struct tg3
*tp
= bp
->priv
;
932 spin_lock_bh(&tp
->lock
);
934 if (tg3_writephy(tp
, reg
, val
))
937 spin_unlock_bh(&tp
->lock
);
942 static int tg3_mdio_reset(struct mii_bus
*bp
)
947 static void tg3_mdio_config_5785(struct tg3
*tp
)
950 struct phy_device
*phydev
;
952 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
953 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
954 case PHY_ID_BCM50610
:
955 case PHY_ID_BCM50610M
:
956 val
= MAC_PHYCFG2_50610_LED_MODES
;
958 case PHY_ID_BCMAC131
:
959 val
= MAC_PHYCFG2_AC131_LED_MODES
;
961 case PHY_ID_RTL8211C
:
962 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
964 case PHY_ID_RTL8201E
:
965 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
971 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
972 tw32(MAC_PHYCFG2
, val
);
974 val
= tr32(MAC_PHYCFG1
);
975 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
976 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
977 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
978 tw32(MAC_PHYCFG1
, val
);
983 if (!(tp
->tg3_flags3
& TG3_FLG3_RGMII_INBAND_DISABLE
))
984 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
985 MAC_PHYCFG2_FMODE_MASK_MASK
|
986 MAC_PHYCFG2_GMODE_MASK_MASK
|
987 MAC_PHYCFG2_ACT_MASK_MASK
|
988 MAC_PHYCFG2_QUAL_MASK_MASK
|
989 MAC_PHYCFG2_INBAND_ENABLE
;
991 tw32(MAC_PHYCFG2
, val
);
993 val
= tr32(MAC_PHYCFG1
);
994 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
995 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
996 if (!(tp
->tg3_flags3
& TG3_FLG3_RGMII_INBAND_DISABLE
)) {
997 if (tp
->tg3_flags3
& TG3_FLG3_RGMII_EXT_IBND_RX_EN
)
998 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
999 if (tp
->tg3_flags3
& TG3_FLG3_RGMII_EXT_IBND_TX_EN
)
1000 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1002 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1003 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1004 tw32(MAC_PHYCFG1
, val
);
1006 val
= tr32(MAC_EXT_RGMII_MODE
);
1007 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1008 MAC_RGMII_MODE_RX_QUALITY
|
1009 MAC_RGMII_MODE_RX_ACTIVITY
|
1010 MAC_RGMII_MODE_RX_ENG_DET
|
1011 MAC_RGMII_MODE_TX_ENABLE
|
1012 MAC_RGMII_MODE_TX_LOWPWR
|
1013 MAC_RGMII_MODE_TX_RESET
);
1014 if (!(tp
->tg3_flags3
& TG3_FLG3_RGMII_INBAND_DISABLE
)) {
1015 if (tp
->tg3_flags3
& TG3_FLG3_RGMII_EXT_IBND_RX_EN
)
1016 val
|= MAC_RGMII_MODE_RX_INT_B
|
1017 MAC_RGMII_MODE_RX_QUALITY
|
1018 MAC_RGMII_MODE_RX_ACTIVITY
|
1019 MAC_RGMII_MODE_RX_ENG_DET
;
1020 if (tp
->tg3_flags3
& TG3_FLG3_RGMII_EXT_IBND_TX_EN
)
1021 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1022 MAC_RGMII_MODE_TX_LOWPWR
|
1023 MAC_RGMII_MODE_TX_RESET
;
1025 tw32(MAC_EXT_RGMII_MODE
, val
);
1028 static void tg3_mdio_start(struct tg3
*tp
)
1030 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1031 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1034 if ((tp
->tg3_flags3
& TG3_FLG3_MDIOBUS_INITED
) &&
1035 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1036 tg3_mdio_config_5785(tp
);
1039 static int tg3_mdio_init(struct tg3
*tp
)
1043 struct phy_device
*phydev
;
1045 if (tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
) {
1048 tp
->phy_addr
= PCI_FUNC(tp
->pdev
->devfn
) + 1;
1050 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1051 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1053 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1054 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1058 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1062 if (!(tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
) ||
1063 (tp
->tg3_flags3
& TG3_FLG3_MDIOBUS_INITED
))
1066 tp
->mdio_bus
= mdiobus_alloc();
1067 if (tp
->mdio_bus
== NULL
)
1070 tp
->mdio_bus
->name
= "tg3 mdio bus";
1071 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1072 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1073 tp
->mdio_bus
->priv
= tp
;
1074 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1075 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1076 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1077 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1078 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1079 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1081 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1082 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1084 /* The bus registration will look for all the PHYs on the mdio bus.
1085 * Unfortunately, it does not ensure the PHY is powered up before
1086 * accessing the PHY ID registers. A chip reset is the
1087 * quickest way to bring the device back to an operational state..
1089 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1092 i
= mdiobus_register(tp
->mdio_bus
);
1094 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1095 mdiobus_free(tp
->mdio_bus
);
1099 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1101 if (!phydev
|| !phydev
->drv
) {
1102 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1103 mdiobus_unregister(tp
->mdio_bus
);
1104 mdiobus_free(tp
->mdio_bus
);
1108 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1109 case PHY_ID_BCM57780
:
1110 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1111 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1113 case PHY_ID_BCM50610
:
1114 case PHY_ID_BCM50610M
:
1115 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1116 PHY_BRCM_RX_REFCLK_UNUSED
|
1117 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1118 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1119 if (tp
->tg3_flags3
& TG3_FLG3_RGMII_INBAND_DISABLE
)
1120 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1121 if (tp
->tg3_flags3
& TG3_FLG3_RGMII_EXT_IBND_RX_EN
)
1122 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1123 if (tp
->tg3_flags3
& TG3_FLG3_RGMII_EXT_IBND_TX_EN
)
1124 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1126 case PHY_ID_RTL8211C
:
1127 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1129 case PHY_ID_RTL8201E
:
1130 case PHY_ID_BCMAC131
:
1131 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1132 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1133 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1137 tp
->tg3_flags3
|= TG3_FLG3_MDIOBUS_INITED
;
1139 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1140 tg3_mdio_config_5785(tp
);
1145 static void tg3_mdio_fini(struct tg3
*tp
)
1147 if (tp
->tg3_flags3
& TG3_FLG3_MDIOBUS_INITED
) {
1148 tp
->tg3_flags3
&= ~TG3_FLG3_MDIOBUS_INITED
;
1149 mdiobus_unregister(tp
->mdio_bus
);
1150 mdiobus_free(tp
->mdio_bus
);
1154 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1158 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1162 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1166 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1167 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1171 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1177 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1181 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1185 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1189 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1190 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1194 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1200 /* tp->lock is held. */
1201 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1205 val
= tr32(GRC_RX_CPU_EVENT
);
1206 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1207 tw32_f(GRC_RX_CPU_EVENT
, val
);
1209 tp
->last_event_jiffies
= jiffies
;
1212 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1214 /* tp->lock is held. */
1215 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1218 unsigned int delay_cnt
;
1221 /* If enough time has passed, no wait is necessary. */
1222 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1223 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1225 if (time_remain
< 0)
1228 /* Check if we can shorten the wait time. */
1229 delay_cnt
= jiffies_to_usecs(time_remain
);
1230 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1231 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1232 delay_cnt
= (delay_cnt
>> 3) + 1;
1234 for (i
= 0; i
< delay_cnt
; i
++) {
1235 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1241 /* tp->lock is held. */
1242 static void tg3_ump_link_report(struct tg3
*tp
)
1247 if (!(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) ||
1248 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
1251 tg3_wait_for_event_ack(tp
);
1253 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1255 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1258 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1260 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1261 val
|= (reg
& 0xffff);
1262 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, val
);
1265 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1267 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1268 val
|= (reg
& 0xffff);
1269 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 4, val
);
1272 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1273 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1275 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1276 val
|= (reg
& 0xffff);
1278 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 8, val
);
1280 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1284 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 12, val
);
1286 tg3_generate_fw_event(tp
);
1289 static void tg3_link_report(struct tg3
*tp
)
1291 if (!netif_carrier_ok(tp
->dev
)) {
1292 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1293 tg3_ump_link_report(tp
);
1294 } else if (netif_msg_link(tp
)) {
1295 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1296 (tp
->link_config
.active_speed
== SPEED_1000
?
1298 (tp
->link_config
.active_speed
== SPEED_100
?
1300 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1303 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1304 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1306 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1308 tg3_ump_link_report(tp
);
1312 static u16
tg3_advert_flowctrl_1000T(u8 flow_ctrl
)
1316 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1317 miireg
= ADVERTISE_PAUSE_CAP
;
1318 else if (flow_ctrl
& FLOW_CTRL_TX
)
1319 miireg
= ADVERTISE_PAUSE_ASYM
;
1320 else if (flow_ctrl
& FLOW_CTRL_RX
)
1321 miireg
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1328 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1332 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1333 miireg
= ADVERTISE_1000XPAUSE
;
1334 else if (flow_ctrl
& FLOW_CTRL_TX
)
1335 miireg
= ADVERTISE_1000XPSE_ASYM
;
1336 else if (flow_ctrl
& FLOW_CTRL_RX
)
1337 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1344 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1348 if (lcladv
& ADVERTISE_1000XPAUSE
) {
1349 if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1350 if (rmtadv
& LPA_1000XPAUSE
)
1351 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1352 else if (rmtadv
& LPA_1000XPAUSE_ASYM
)
1355 if (rmtadv
& LPA_1000XPAUSE
)
1356 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1358 } else if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1359 if ((rmtadv
& LPA_1000XPAUSE
) && (rmtadv
& LPA_1000XPAUSE_ASYM
))
1366 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1370 u32 old_rx_mode
= tp
->rx_mode
;
1371 u32 old_tx_mode
= tp
->tx_mode
;
1373 if (tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
)
1374 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1376 autoneg
= tp
->link_config
.autoneg
;
1378 if (autoneg
== AUTONEG_ENABLE
&&
1379 (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
)) {
1380 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1381 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1383 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1385 flowctrl
= tp
->link_config
.flowctrl
;
1387 tp
->link_config
.active_flowctrl
= flowctrl
;
1389 if (flowctrl
& FLOW_CTRL_RX
)
1390 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1392 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1394 if (old_rx_mode
!= tp
->rx_mode
)
1395 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1397 if (flowctrl
& FLOW_CTRL_TX
)
1398 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1400 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1402 if (old_tx_mode
!= tp
->tx_mode
)
1403 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1406 static void tg3_adjust_link(struct net_device
*dev
)
1408 u8 oldflowctrl
, linkmesg
= 0;
1409 u32 mac_mode
, lcl_adv
, rmt_adv
;
1410 struct tg3
*tp
= netdev_priv(dev
);
1411 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1413 spin_lock_bh(&tp
->lock
);
1415 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1416 MAC_MODE_HALF_DUPLEX
);
1418 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1424 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1425 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1426 else if (phydev
->speed
== SPEED_1000
||
1427 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1428 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1430 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1432 if (phydev
->duplex
== DUPLEX_HALF
)
1433 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1435 lcl_adv
= tg3_advert_flowctrl_1000T(
1436 tp
->link_config
.flowctrl
);
1439 rmt_adv
= LPA_PAUSE_CAP
;
1440 if (phydev
->asym_pause
)
1441 rmt_adv
|= LPA_PAUSE_ASYM
;
1444 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1446 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1448 if (mac_mode
!= tp
->mac_mode
) {
1449 tp
->mac_mode
= mac_mode
;
1450 tw32_f(MAC_MODE
, tp
->mac_mode
);
1454 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1455 if (phydev
->speed
== SPEED_10
)
1457 MAC_MI_STAT_10MBPS_MODE
|
1458 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1460 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1463 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1464 tw32(MAC_TX_LENGTHS
,
1465 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1466 (6 << TX_LENGTHS_IPG_SHIFT
) |
1467 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1469 tw32(MAC_TX_LENGTHS
,
1470 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1471 (6 << TX_LENGTHS_IPG_SHIFT
) |
1472 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1474 if ((phydev
->link
&& tp
->link_config
.active_speed
== SPEED_INVALID
) ||
1475 (!phydev
->link
&& tp
->link_config
.active_speed
!= SPEED_INVALID
) ||
1476 phydev
->speed
!= tp
->link_config
.active_speed
||
1477 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1478 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1481 tp
->link_config
.active_speed
= phydev
->speed
;
1482 tp
->link_config
.active_duplex
= phydev
->duplex
;
1484 spin_unlock_bh(&tp
->lock
);
1487 tg3_link_report(tp
);
1490 static int tg3_phy_init(struct tg3
*tp
)
1492 struct phy_device
*phydev
;
1494 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1497 /* Bring the PHY back to a known state. */
1500 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1502 /* Attach the MAC to the PHY. */
1503 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1504 phydev
->dev_flags
, phydev
->interface
);
1505 if (IS_ERR(phydev
)) {
1506 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1507 return PTR_ERR(phydev
);
1510 /* Mask with MAC supported features. */
1511 switch (phydev
->interface
) {
1512 case PHY_INTERFACE_MODE_GMII
:
1513 case PHY_INTERFACE_MODE_RGMII
:
1514 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1515 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1517 SUPPORTED_Asym_Pause
);
1521 case PHY_INTERFACE_MODE_MII
:
1522 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1524 SUPPORTED_Asym_Pause
);
1527 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1531 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1533 phydev
->advertising
= phydev
->supported
;
1538 static void tg3_phy_start(struct tg3
*tp
)
1540 struct phy_device
*phydev
;
1542 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1545 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1547 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
1548 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
1549 phydev
->speed
= tp
->link_config
.orig_speed
;
1550 phydev
->duplex
= tp
->link_config
.orig_duplex
;
1551 phydev
->autoneg
= tp
->link_config
.orig_autoneg
;
1552 phydev
->advertising
= tp
->link_config
.orig_advertising
;
1557 phy_start_aneg(phydev
);
1560 static void tg3_phy_stop(struct tg3
*tp
)
1562 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1565 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1568 static void tg3_phy_fini(struct tg3
*tp
)
1570 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
1571 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1572 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
1576 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1580 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1582 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1587 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1591 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1593 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1598 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
1602 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
1605 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1606 phytest
| MII_TG3_FET_SHADOW_EN
);
1607 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
1609 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1611 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1612 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
1614 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
1618 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
1622 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) ||
1623 ((tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
) &&
1624 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
1627 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1628 tg3_phy_fet_toggle_apd(tp
, enable
);
1632 reg
= MII_TG3_MISC_SHDW_WREN
|
1633 MII_TG3_MISC_SHDW_SCR5_SEL
|
1634 MII_TG3_MISC_SHDW_SCR5_LPED
|
1635 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
1636 MII_TG3_MISC_SHDW_SCR5_SDTL
|
1637 MII_TG3_MISC_SHDW_SCR5_C125OE
;
1638 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
1639 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
1641 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1644 reg
= MII_TG3_MISC_SHDW_WREN
|
1645 MII_TG3_MISC_SHDW_APD_SEL
|
1646 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
1648 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
1650 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1653 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
1657 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) ||
1658 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
1661 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1664 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
1665 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
1667 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1668 ephy
| MII_TG3_FET_SHADOW_EN
);
1669 if (!tg3_readphy(tp
, reg
, &phy
)) {
1671 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1673 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1674 tg3_writephy(tp
, reg
, phy
);
1676 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
1679 phy
= MII_TG3_AUXCTL_MISC_RDSEL_MISC
|
1680 MII_TG3_AUXCTL_SHDWSEL_MISC
;
1681 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy
) &&
1682 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &phy
)) {
1684 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1686 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1687 phy
|= MII_TG3_AUXCTL_MISC_WREN
;
1688 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy
);
1693 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
1697 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
1700 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x7007) &&
1701 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
))
1702 tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1703 (val
| (1 << 15) | (1 << 4)));
1706 static void tg3_phy_apply_otp(struct tg3
*tp
)
1715 /* Enable SM_DSP clock and tx 6dB coding. */
1716 phy
= MII_TG3_AUXCTL_SHDWSEL_AUXCTL
|
1717 MII_TG3_AUXCTL_ACTL_SMDSP_ENA
|
1718 MII_TG3_AUXCTL_ACTL_TX_6DB
;
1719 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy
);
1721 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
1722 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
1723 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
1725 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
1726 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
1727 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
1729 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
1730 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
1731 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
1733 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
1734 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
1736 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
1737 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
1739 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
1740 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
1741 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
1743 /* Turn off SM_DSP clock. */
1744 phy
= MII_TG3_AUXCTL_SHDWSEL_AUXCTL
|
1745 MII_TG3_AUXCTL_ACTL_TX_6DB
;
1746 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy
);
1749 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
1753 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
1758 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
1759 current_link_up
== 1 &&
1760 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
1761 (tp
->link_config
.active_speed
== SPEED_100
||
1762 tp
->link_config
.active_speed
== SPEED_1000
)) {
1765 if (tp
->link_config
.active_speed
== SPEED_1000
)
1766 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
1768 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
1770 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
1772 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
1773 TG3_CL45_D7_EEERES_STAT
, &val
);
1776 case TG3_CL45_D7_EEERES_STAT_LP_1000T
:
1777 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
1780 case ASIC_REV_57765
:
1781 /* Enable SM_DSP clock and tx 6dB coding. */
1782 val
= MII_TG3_AUXCTL_SHDWSEL_AUXCTL
|
1783 MII_TG3_AUXCTL_ACTL_SMDSP_ENA
|
1784 MII_TG3_AUXCTL_ACTL_TX_6DB
;
1785 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
);
1787 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
1789 /* Turn off SM_DSP clock. */
1790 val
= MII_TG3_AUXCTL_SHDWSEL_AUXCTL
|
1791 MII_TG3_AUXCTL_ACTL_TX_6DB
;
1792 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
);
1795 case TG3_CL45_D7_EEERES_STAT_LP_100TX
:
1800 if (!tp
->setlpicnt
) {
1801 val
= tr32(TG3_CPMU_EEE_MODE
);
1802 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
1806 static int tg3_wait_macro_done(struct tg3
*tp
)
1813 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
1814 if ((tmp32
& 0x1000) == 0)
1824 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
1826 static const u32 test_pat
[4][6] = {
1827 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1828 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1829 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1830 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1834 for (chan
= 0; chan
< 4; chan
++) {
1837 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1838 (chan
* 0x2000) | 0x0200);
1839 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1841 for (i
= 0; i
< 6; i
++)
1842 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
1845 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1846 if (tg3_wait_macro_done(tp
)) {
1851 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1852 (chan
* 0x2000) | 0x0200);
1853 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
1854 if (tg3_wait_macro_done(tp
)) {
1859 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
1860 if (tg3_wait_macro_done(tp
)) {
1865 for (i
= 0; i
< 6; i
+= 2) {
1868 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
1869 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
1870 tg3_wait_macro_done(tp
)) {
1876 if (low
!= test_pat
[chan
][i
] ||
1877 high
!= test_pat
[chan
][i
+1]) {
1878 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
1879 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
1880 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
1890 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
1894 for (chan
= 0; chan
< 4; chan
++) {
1897 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1898 (chan
* 0x2000) | 0x0200);
1899 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1900 for (i
= 0; i
< 6; i
++)
1901 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
1902 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1903 if (tg3_wait_macro_done(tp
))
1910 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
1912 u32 reg32
, phy9_orig
;
1913 int retries
, do_phy_reset
, err
;
1919 err
= tg3_bmcr_reset(tp
);
1925 /* Disable transmitter and interrupt. */
1926 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
1930 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
1932 /* Set full-duplex, 1000 mbps. */
1933 tg3_writephy(tp
, MII_BMCR
,
1934 BMCR_FULLDPLX
| TG3_BMCR_SPEED1000
);
1936 /* Set to master mode. */
1937 if (tg3_readphy(tp
, MII_TG3_CTRL
, &phy9_orig
))
1940 tg3_writephy(tp
, MII_TG3_CTRL
,
1941 (MII_TG3_CTRL_AS_MASTER
|
1942 MII_TG3_CTRL_ENABLE_AS_MASTER
));
1944 /* Enable SM_DSP_CLOCK and 6dB. */
1945 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
1947 /* Block the PHY control access. */
1948 tg3_phydsp_write(tp
, 0x8005, 0x0800);
1950 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
1953 } while (--retries
);
1955 err
= tg3_phy_reset_chanpat(tp
);
1959 tg3_phydsp_write(tp
, 0x8005, 0x0000);
1961 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
1962 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
1964 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
1965 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
1966 /* Set Extended packet length bit for jumbo frames */
1967 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4400);
1969 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
1972 tg3_writephy(tp
, MII_TG3_CTRL
, phy9_orig
);
1974 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
1976 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
1983 /* This will reset the tigon3 PHY if there is no valid
1984 * link unless the FORCE argument is non-zero.
1986 static int tg3_phy_reset(struct tg3
*tp
)
1991 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1992 val
= tr32(GRC_MISC_CFG
);
1993 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
1996 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
1997 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2001 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2002 netif_carrier_off(tp
->dev
);
2003 tg3_link_report(tp
);
2006 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2007 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2008 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2009 err
= tg3_phy_reset_5703_4_5(tp
);
2016 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2017 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2018 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2019 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2021 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2024 err
= tg3_bmcr_reset(tp
);
2028 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2029 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2030 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2032 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2035 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2036 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2037 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2038 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2039 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2040 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2042 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2046 if ((tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
) &&
2047 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2050 tg3_phy_apply_otp(tp
);
2052 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2053 tg3_phy_toggle_apd(tp
, true);
2055 tg3_phy_toggle_apd(tp
, false);
2058 if (tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) {
2059 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
2060 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2061 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2062 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
2064 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2065 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2066 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2068 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2069 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
2070 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2071 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2072 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2073 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
2074 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2075 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
2076 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2077 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2078 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2079 tg3_writephy(tp
, MII_TG3_TEST1
,
2080 MII_TG3_TEST1_TRIM_EN
| 0x4);
2082 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2083 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
2085 /* Set Extended packet length bit (bit 14) on all chips that */
2086 /* support jumbo frames */
2087 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2088 /* Cannot do read-modify-write on 5401 */
2089 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
2090 } else if (tp
->tg3_flags
& TG3_FLAG_JUMBO_CAPABLE
) {
2091 /* Set bit 14 with read-modify-write to preserve other bits */
2092 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0007) &&
2093 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
))
2094 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
| 0x4000);
2097 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2098 * jumbo frames transmission.
2100 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_CAPABLE
) {
2101 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2102 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2103 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2106 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2107 /* adjust output voltage */
2108 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2111 tg3_phy_toggle_automdix(tp
, 1);
2112 tg3_phy_set_wirespeed(tp
);
2116 static void tg3_frob_aux_power(struct tg3
*tp
)
2118 bool need_vaux
= false;
2120 /* The GPIOs do something completely different on 57765. */
2121 if ((tp
->tg3_flags2
& TG3_FLG2_IS_NIC
) == 0 ||
2122 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2123 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
2126 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2127 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
||
2128 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) &&
2129 tp
->pdev_peer
!= tp
->pdev
) {
2130 struct net_device
*dev_peer
;
2132 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2134 /* remove_one() may have been run on the peer. */
2136 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2138 if (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
)
2141 if ((tp_peer
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) ||
2142 (tp_peer
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
2147 if ((tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) ||
2148 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
2152 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2153 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2154 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2155 (GRC_LCLCTRL_GPIO_OE0
|
2156 GRC_LCLCTRL_GPIO_OE1
|
2157 GRC_LCLCTRL_GPIO_OE2
|
2158 GRC_LCLCTRL_GPIO_OUTPUT0
|
2159 GRC_LCLCTRL_GPIO_OUTPUT1
),
2161 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2162 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2163 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2164 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2165 GRC_LCLCTRL_GPIO_OE1
|
2166 GRC_LCLCTRL_GPIO_OE2
|
2167 GRC_LCLCTRL_GPIO_OUTPUT0
|
2168 GRC_LCLCTRL_GPIO_OUTPUT1
|
2170 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2172 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2173 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2175 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2176 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2179 u32 grc_local_ctrl
= 0;
2181 /* Workaround to prevent overdrawing Amps. */
2182 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2184 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2185 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2186 grc_local_ctrl
, 100);
2189 /* On 5753 and variants, GPIO2 cannot be used. */
2190 no_gpio2
= tp
->nic_sram_data_cfg
&
2191 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2193 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2194 GRC_LCLCTRL_GPIO_OE1
|
2195 GRC_LCLCTRL_GPIO_OE2
|
2196 GRC_LCLCTRL_GPIO_OUTPUT1
|
2197 GRC_LCLCTRL_GPIO_OUTPUT2
;
2199 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2200 GRC_LCLCTRL_GPIO_OUTPUT2
);
2202 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2203 grc_local_ctrl
, 100);
2205 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2207 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2208 grc_local_ctrl
, 100);
2211 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2212 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2213 grc_local_ctrl
, 100);
2217 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
2218 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
2219 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2220 (GRC_LCLCTRL_GPIO_OE1
|
2221 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
2223 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2224 GRC_LCLCTRL_GPIO_OE1
, 100);
2226 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2227 (GRC_LCLCTRL_GPIO_OE1
|
2228 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
2233 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2235 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2237 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2238 if (speed
!= SPEED_10
)
2240 } else if (speed
== SPEED_10
)
2246 static int tg3_setup_phy(struct tg3
*, int);
2248 #define RESET_KIND_SHUTDOWN 0
2249 #define RESET_KIND_INIT 1
2250 #define RESET_KIND_SUSPEND 2
2252 static void tg3_write_sig_post_reset(struct tg3
*, int);
2253 static int tg3_halt_cpu(struct tg3
*, u32
);
2255 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2259 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2260 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2261 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2262 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2265 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2266 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2267 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2272 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2274 val
= tr32(GRC_MISC_CFG
);
2275 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2278 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2280 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2283 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2284 tg3_writephy(tp
, MII_BMCR
,
2285 BMCR_ANENABLE
| BMCR_ANRESTART
);
2287 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2288 phytest
| MII_TG3_FET_SHADOW_EN
);
2289 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2290 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2292 MII_TG3_FET_SHDW_AUXMODE4
,
2295 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2298 } else if (do_low_power
) {
2299 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2300 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2302 tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
2303 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
|
2304 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2305 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2306 MII_TG3_AUXCTL_PCTL_VREG_11V
);
2309 /* The PHY should not be powered down on some chips because
2312 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2313 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2314 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2315 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2318 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2319 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2320 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2321 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2322 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2323 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2326 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2329 /* tp->lock is held. */
2330 static int tg3_nvram_lock(struct tg3
*tp
)
2332 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
2335 if (tp
->nvram_lock_cnt
== 0) {
2336 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2337 for (i
= 0; i
< 8000; i
++) {
2338 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2343 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2347 tp
->nvram_lock_cnt
++;
2352 /* tp->lock is held. */
2353 static void tg3_nvram_unlock(struct tg3
*tp
)
2355 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
2356 if (tp
->nvram_lock_cnt
> 0)
2357 tp
->nvram_lock_cnt
--;
2358 if (tp
->nvram_lock_cnt
== 0)
2359 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2363 /* tp->lock is held. */
2364 static void tg3_enable_nvram_access(struct tg3
*tp
)
2366 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
2367 !(tp
->tg3_flags3
& TG3_FLG3_PROTECTED_NVRAM
)) {
2368 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2370 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2374 /* tp->lock is held. */
2375 static void tg3_disable_nvram_access(struct tg3
*tp
)
2377 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
2378 !(tp
->tg3_flags3
& TG3_FLG3_PROTECTED_NVRAM
)) {
2379 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2381 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2385 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2386 u32 offset
, u32
*val
)
2391 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2394 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2395 EEPROM_ADDR_DEVID_MASK
|
2397 tw32(GRC_EEPROM_ADDR
,
2399 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2400 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2401 EEPROM_ADDR_ADDR_MASK
) |
2402 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2404 for (i
= 0; i
< 1000; i
++) {
2405 tmp
= tr32(GRC_EEPROM_ADDR
);
2407 if (tmp
& EEPROM_ADDR_COMPLETE
)
2411 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2414 tmp
= tr32(GRC_EEPROM_DATA
);
2417 * The data will always be opposite the native endian
2418 * format. Perform a blind byteswap to compensate.
2425 #define NVRAM_CMD_TIMEOUT 10000
2427 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
2431 tw32(NVRAM_CMD
, nvram_cmd
);
2432 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
2434 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
2440 if (i
== NVRAM_CMD_TIMEOUT
)
2446 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
2448 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM
) &&
2449 (tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) &&
2450 (tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
2451 !(tp
->tg3_flags3
& TG3_FLG3_NO_NVRAM_ADDR_TRANS
) &&
2452 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2454 addr
= ((addr
/ tp
->nvram_pagesize
) <<
2455 ATMEL_AT45DB0X1B_PAGE_POS
) +
2456 (addr
% tp
->nvram_pagesize
);
2461 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
2463 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM
) &&
2464 (tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) &&
2465 (tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
2466 !(tp
->tg3_flags3
& TG3_FLG3_NO_NVRAM_ADDR_TRANS
) &&
2467 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2469 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
2470 tp
->nvram_pagesize
) +
2471 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
2476 /* NOTE: Data read in from NVRAM is byteswapped according to
2477 * the byteswapping settings for all other register accesses.
2478 * tg3 devices are BE devices, so on a BE machine, the data
2479 * returned will be exactly as it is seen in NVRAM. On a LE
2480 * machine, the 32-bit value will be byteswapped.
2482 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
2486 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
))
2487 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
2489 offset
= tg3_nvram_phys_addr(tp
, offset
);
2491 if (offset
> NVRAM_ADDR_MSK
)
2494 ret
= tg3_nvram_lock(tp
);
2498 tg3_enable_nvram_access(tp
);
2500 tw32(NVRAM_ADDR
, offset
);
2501 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
2502 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
2505 *val
= tr32(NVRAM_RDDATA
);
2507 tg3_disable_nvram_access(tp
);
2509 tg3_nvram_unlock(tp
);
2514 /* Ensures NVRAM data is in bytestream format. */
2515 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
2518 int res
= tg3_nvram_read(tp
, offset
, &v
);
2520 *val
= cpu_to_be32(v
);
2524 /* tp->lock is held. */
2525 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
2527 u32 addr_high
, addr_low
;
2530 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
2531 tp
->dev
->dev_addr
[1]);
2532 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
2533 (tp
->dev
->dev_addr
[3] << 16) |
2534 (tp
->dev
->dev_addr
[4] << 8) |
2535 (tp
->dev
->dev_addr
[5] << 0));
2536 for (i
= 0; i
< 4; i
++) {
2537 if (i
== 1 && skip_mac_1
)
2539 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
2540 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
2543 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2544 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2545 for (i
= 0; i
< 12; i
++) {
2546 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
2547 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
2551 addr_high
= (tp
->dev
->dev_addr
[0] +
2552 tp
->dev
->dev_addr
[1] +
2553 tp
->dev
->dev_addr
[2] +
2554 tp
->dev
->dev_addr
[3] +
2555 tp
->dev
->dev_addr
[4] +
2556 tp
->dev
->dev_addr
[5]) &
2557 TX_BACKOFF_SEED_MASK
;
2558 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
2561 static void tg3_enable_register_access(struct tg3
*tp
)
2564 * Make sure register accesses (indirect or otherwise) will function
2567 pci_write_config_dword(tp
->pdev
,
2568 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
2571 static int tg3_power_up(struct tg3
*tp
)
2573 tg3_enable_register_access(tp
);
2575 pci_set_power_state(tp
->pdev
, PCI_D0
);
2577 /* Switch out of Vaux if it is a NIC */
2578 if (tp
->tg3_flags2
& TG3_FLG2_IS_NIC
)
2579 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
, 100);
2584 static int tg3_power_down_prepare(struct tg3
*tp
)
2587 bool device_should_wake
, do_low_power
;
2589 tg3_enable_register_access(tp
);
2591 /* Restore the CLKREQ setting. */
2592 if (tp
->tg3_flags3
& TG3_FLG3_CLKREQ_BUG
) {
2595 pci_read_config_word(tp
->pdev
,
2596 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
2598 lnkctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
2599 pci_write_config_word(tp
->pdev
,
2600 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
2604 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
2605 tw32(TG3PCI_MISC_HOST_CTRL
,
2606 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
2608 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
2609 (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
);
2611 if (tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
) {
2612 do_low_power
= false;
2613 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
2614 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2615 struct phy_device
*phydev
;
2616 u32 phyid
, advertising
;
2618 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2620 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2622 tp
->link_config
.orig_speed
= phydev
->speed
;
2623 tp
->link_config
.orig_duplex
= phydev
->duplex
;
2624 tp
->link_config
.orig_autoneg
= phydev
->autoneg
;
2625 tp
->link_config
.orig_advertising
= phydev
->advertising
;
2627 advertising
= ADVERTISED_TP
|
2629 ADVERTISED_Autoneg
|
2630 ADVERTISED_10baseT_Half
;
2632 if ((tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) ||
2633 device_should_wake
) {
2634 if (tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
)
2636 ADVERTISED_100baseT_Half
|
2637 ADVERTISED_100baseT_Full
|
2638 ADVERTISED_10baseT_Full
;
2640 advertising
|= ADVERTISED_10baseT_Full
;
2643 phydev
->advertising
= advertising
;
2645 phy_start_aneg(phydev
);
2647 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
2648 if (phyid
!= PHY_ID_BCMAC131
) {
2649 phyid
&= PHY_BCM_OUI_MASK
;
2650 if (phyid
== PHY_BCM_OUI_1
||
2651 phyid
== PHY_BCM_OUI_2
||
2652 phyid
== PHY_BCM_OUI_3
)
2653 do_low_power
= true;
2657 do_low_power
= true;
2659 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2660 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2661 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
2662 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
2663 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
2666 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
2667 tp
->link_config
.speed
= SPEED_10
;
2668 tp
->link_config
.duplex
= DUPLEX_HALF
;
2669 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
2670 tg3_setup_phy(tp
, 0);
2674 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2677 val
= tr32(GRC_VCPU_EXT_CTRL
);
2678 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
2679 } else if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
2683 for (i
= 0; i
< 200; i
++) {
2684 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
2685 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
2690 if (tp
->tg3_flags
& TG3_FLAG_WOL_CAP
)
2691 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
2692 WOL_DRV_STATE_SHUTDOWN
|
2696 if (device_should_wake
) {
2699 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
2701 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x5a);
2705 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
2706 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
2708 mac_mode
= MAC_MODE_PORT_MODE_MII
;
2710 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
2711 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2713 u32 speed
= (tp
->tg3_flags
&
2714 TG3_FLAG_WOL_SPEED_100MB
) ?
2715 SPEED_100
: SPEED_10
;
2716 if (tg3_5700_link_polarity(tp
, speed
))
2717 mac_mode
|= MAC_MODE_LINK_POLARITY
;
2719 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2722 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
2725 if (!(tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
2726 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
2728 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
2729 if (((tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) &&
2730 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) &&
2731 ((tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) ||
2732 (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)))
2733 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
2735 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)
2736 mac_mode
|= MAC_MODE_APE_TX_EN
|
2737 MAC_MODE_APE_RX_EN
|
2738 MAC_MODE_TDE_ENABLE
;
2740 tw32_f(MAC_MODE
, mac_mode
);
2743 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
2747 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
) &&
2748 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2749 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
2752 base_val
= tp
->pci_clock_ctrl
;
2753 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
2754 CLOCK_CTRL_TXCLK_DISABLE
);
2756 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
2757 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
2758 } else if ((tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) ||
2759 (tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
) ||
2760 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)) {
2762 } else if (!((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
2763 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))) {
2764 u32 newbits1
, newbits2
;
2766 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2767 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2768 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
2769 CLOCK_CTRL_TXCLK_DISABLE
|
2771 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2772 } else if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
2773 newbits1
= CLOCK_CTRL_625_CORE
;
2774 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
2776 newbits1
= CLOCK_CTRL_ALTCLK
;
2777 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2780 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
2783 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
2786 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
2789 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2790 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2791 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
2792 CLOCK_CTRL_TXCLK_DISABLE
|
2793 CLOCK_CTRL_44MHZ_CORE
);
2795 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
2798 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
2799 tp
->pci_clock_ctrl
| newbits3
, 40);
2803 if (!(device_should_wake
) &&
2804 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
2805 tg3_power_down_phy(tp
, do_low_power
);
2807 tg3_frob_aux_power(tp
);
2809 /* Workaround for unstable PLL clock */
2810 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
2811 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
2812 u32 val
= tr32(0x7d00);
2814 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2816 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
2819 err
= tg3_nvram_lock(tp
);
2820 tg3_halt_cpu(tp
, RX_CPU_BASE
);
2822 tg3_nvram_unlock(tp
);
2826 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
2831 static void tg3_power_down(struct tg3
*tp
)
2833 tg3_power_down_prepare(tp
);
2835 pci_wake_from_d3(tp
->pdev
, tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
);
2836 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
2839 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
2841 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
2842 case MII_TG3_AUX_STAT_10HALF
:
2844 *duplex
= DUPLEX_HALF
;
2847 case MII_TG3_AUX_STAT_10FULL
:
2849 *duplex
= DUPLEX_FULL
;
2852 case MII_TG3_AUX_STAT_100HALF
:
2854 *duplex
= DUPLEX_HALF
;
2857 case MII_TG3_AUX_STAT_100FULL
:
2859 *duplex
= DUPLEX_FULL
;
2862 case MII_TG3_AUX_STAT_1000HALF
:
2863 *speed
= SPEED_1000
;
2864 *duplex
= DUPLEX_HALF
;
2867 case MII_TG3_AUX_STAT_1000FULL
:
2868 *speed
= SPEED_1000
;
2869 *duplex
= DUPLEX_FULL
;
2873 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2874 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
2876 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
2880 *speed
= SPEED_INVALID
;
2881 *duplex
= DUPLEX_INVALID
;
2886 static void tg3_phy_copper_begin(struct tg3
*tp
)
2891 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2892 /* Entering low power mode. Disable gigabit and
2893 * 100baseT advertisements.
2895 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
2897 new_adv
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
2898 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
2899 if (tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
)
2900 new_adv
|= (ADVERTISE_100HALF
| ADVERTISE_100FULL
);
2902 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2903 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
2904 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
2905 tp
->link_config
.advertising
&=
2906 ~(ADVERTISED_1000baseT_Half
|
2907 ADVERTISED_1000baseT_Full
);
2909 new_adv
= ADVERTISE_CSMA
;
2910 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Half
)
2911 new_adv
|= ADVERTISE_10HALF
;
2912 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Full
)
2913 new_adv
|= ADVERTISE_10FULL
;
2914 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Half
)
2915 new_adv
|= ADVERTISE_100HALF
;
2916 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Full
)
2917 new_adv
|= ADVERTISE_100FULL
;
2919 new_adv
|= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
2921 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2923 if (tp
->link_config
.advertising
&
2924 (ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
)) {
2926 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
2927 new_adv
|= MII_TG3_CTRL_ADV_1000_HALF
;
2928 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
2929 new_adv
|= MII_TG3_CTRL_ADV_1000_FULL
;
2930 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
) &&
2931 (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
2932 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
))
2933 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
2934 MII_TG3_CTRL_ENABLE_AS_MASTER
);
2935 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
2937 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
2940 new_adv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
2941 new_adv
|= ADVERTISE_CSMA
;
2943 /* Asking for a specific link mode. */
2944 if (tp
->link_config
.speed
== SPEED_1000
) {
2945 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2947 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
2948 new_adv
= MII_TG3_CTRL_ADV_1000_FULL
;
2950 new_adv
= MII_TG3_CTRL_ADV_1000_HALF
;
2951 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
2952 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
2953 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
2954 MII_TG3_CTRL_ENABLE_AS_MASTER
);
2956 if (tp
->link_config
.speed
== SPEED_100
) {
2957 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
2958 new_adv
|= ADVERTISE_100FULL
;
2960 new_adv
|= ADVERTISE_100HALF
;
2962 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
2963 new_adv
|= ADVERTISE_10FULL
;
2965 new_adv
|= ADVERTISE_10HALF
;
2967 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2972 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
2975 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
2978 tw32(TG3_CPMU_EEE_MODE
,
2979 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2981 /* Enable SM_DSP clock and tx 6dB coding. */
2982 val
= MII_TG3_AUXCTL_SHDWSEL_AUXCTL
|
2983 MII_TG3_AUXCTL_ACTL_SMDSP_ENA
|
2984 MII_TG3_AUXCTL_ACTL_TX_6DB
;
2985 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
);
2987 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
2989 case ASIC_REV_57765
:
2990 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
2991 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
2992 MII_TG3_DSP_CH34TP2_HIBW01
);
2995 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2996 MII_TG3_DSP_TAP26_RMRXSTO
|
2997 MII_TG3_DSP_TAP26_OPCSINPT
;
2998 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
3002 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3003 /* Advertise 100-BaseTX EEE ability */
3004 if (tp
->link_config
.advertising
&
3005 ADVERTISED_100baseT_Full
)
3006 val
|= MDIO_AN_EEE_ADV_100TX
;
3007 /* Advertise 1000-BaseT EEE ability */
3008 if (tp
->link_config
.advertising
&
3009 ADVERTISED_1000baseT_Full
)
3010 val
|= MDIO_AN_EEE_ADV_1000T
;
3012 tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3014 /* Turn off SM_DSP clock. */
3015 val
= MII_TG3_AUXCTL_SHDWSEL_AUXCTL
|
3016 MII_TG3_AUXCTL_ACTL_TX_6DB
;
3017 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
);
3020 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
3021 tp
->link_config
.speed
!= SPEED_INVALID
) {
3022 u32 bmcr
, orig_bmcr
;
3024 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
3025 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
3028 switch (tp
->link_config
.speed
) {
3034 bmcr
|= BMCR_SPEED100
;
3038 bmcr
|= TG3_BMCR_SPEED1000
;
3042 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3043 bmcr
|= BMCR_FULLDPLX
;
3045 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
3046 (bmcr
!= orig_bmcr
)) {
3047 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
3048 for (i
= 0; i
< 1500; i
++) {
3052 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
3053 tg3_readphy(tp
, MII_BMSR
, &tmp
))
3055 if (!(tmp
& BMSR_LSTATUS
)) {
3060 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3064 tg3_writephy(tp
, MII_BMCR
,
3065 BMCR_ANENABLE
| BMCR_ANRESTART
);
3069 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
3073 /* Turn off tap power management. */
3074 /* Set Extended packet length bit */
3075 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
3077 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
3078 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
3079 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
3080 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
3081 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
3088 static int tg3_copper_is_advertising_all(struct tg3
*tp
, u32 mask
)
3090 u32 adv_reg
, all_mask
= 0;
3092 if (mask
& ADVERTISED_10baseT_Half
)
3093 all_mask
|= ADVERTISE_10HALF
;
3094 if (mask
& ADVERTISED_10baseT_Full
)
3095 all_mask
|= ADVERTISE_10FULL
;
3096 if (mask
& ADVERTISED_100baseT_Half
)
3097 all_mask
|= ADVERTISE_100HALF
;
3098 if (mask
& ADVERTISED_100baseT_Full
)
3099 all_mask
|= ADVERTISE_100FULL
;
3101 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
3104 if ((adv_reg
& all_mask
) != all_mask
)
3106 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3110 if (mask
& ADVERTISED_1000baseT_Half
)
3111 all_mask
|= ADVERTISE_1000HALF
;
3112 if (mask
& ADVERTISED_1000baseT_Full
)
3113 all_mask
|= ADVERTISE_1000FULL
;
3115 if (tg3_readphy(tp
, MII_TG3_CTRL
, &tg3_ctrl
))
3118 if ((tg3_ctrl
& all_mask
) != all_mask
)
3124 static int tg3_adv_1000T_flowctrl_ok(struct tg3
*tp
, u32
*lcladv
, u32
*rmtadv
)
3128 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
3131 curadv
= *lcladv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3132 reqadv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
3134 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3135 if (curadv
!= reqadv
)
3138 if (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
)
3139 tg3_readphy(tp
, MII_LPA
, rmtadv
);
3141 /* Reprogram the advertisement register, even if it
3142 * does not affect the current link. If the link
3143 * gets renegotiated in the future, we can save an
3144 * additional renegotiation cycle by advertising
3145 * it correctly in the first place.
3147 if (curadv
!= reqadv
) {
3148 *lcladv
&= ~(ADVERTISE_PAUSE_CAP
|
3149 ADVERTISE_PAUSE_ASYM
);
3150 tg3_writephy(tp
, MII_ADVERTISE
, *lcladv
| reqadv
);
3157 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
3159 int current_link_up
;
3161 u32 lcl_adv
, rmt_adv
;
3169 (MAC_STATUS_SYNC_CHANGED
|
3170 MAC_STATUS_CFG_CHANGED
|
3171 MAC_STATUS_MI_COMPLETION
|
3172 MAC_STATUS_LNKSTATE_CHANGED
));
3175 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
3177 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
3181 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x02);
3183 /* Some third-party PHYs need to be reset on link going
3186 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3187 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
3188 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
3189 netif_carrier_ok(tp
->dev
)) {
3190 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3191 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3192 !(bmsr
& BMSR_LSTATUS
))
3198 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
3199 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3200 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
3201 !(tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
))
3204 if (!(bmsr
& BMSR_LSTATUS
)) {
3205 err
= tg3_init_5401phy_dsp(tp
);
3209 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3210 for (i
= 0; i
< 1000; i
++) {
3212 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3213 (bmsr
& BMSR_LSTATUS
)) {
3219 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
3220 TG3_PHY_REV_BCM5401_B0
&&
3221 !(bmsr
& BMSR_LSTATUS
) &&
3222 tp
->link_config
.active_speed
== SPEED_1000
) {
3223 err
= tg3_phy_reset(tp
);
3225 err
= tg3_init_5401phy_dsp(tp
);
3230 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3231 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
3232 /* 5701 {A0,B0} CRC bug workaround */
3233 tg3_writephy(tp
, 0x15, 0x0a75);
3234 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3235 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
3236 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3239 /* Clear pending interrupts... */
3240 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3241 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3243 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
3244 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
3245 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
3246 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
3248 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3249 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3250 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
3251 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3252 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
3254 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
3257 current_link_up
= 0;
3258 current_speed
= SPEED_INVALID
;
3259 current_duplex
= DUPLEX_INVALID
;
3261 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
3262 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4007);
3263 tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
);
3264 if (!(val
& (1 << 10))) {
3266 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
);
3272 for (i
= 0; i
< 100; i
++) {
3273 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3274 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3275 (bmsr
& BMSR_LSTATUS
))
3280 if (bmsr
& BMSR_LSTATUS
) {
3283 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
3284 for (i
= 0; i
< 2000; i
++) {
3286 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
3291 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
3296 for (i
= 0; i
< 200; i
++) {
3297 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
3298 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
3300 if (bmcr
&& bmcr
!= 0x7fff)
3308 tp
->link_config
.active_speed
= current_speed
;
3309 tp
->link_config
.active_duplex
= current_duplex
;
3311 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3312 if ((bmcr
& BMCR_ANENABLE
) &&
3313 tg3_copper_is_advertising_all(tp
,
3314 tp
->link_config
.advertising
)) {
3315 if (tg3_adv_1000T_flowctrl_ok(tp
, &lcl_adv
,
3317 current_link_up
= 1;
3320 if (!(bmcr
& BMCR_ANENABLE
) &&
3321 tp
->link_config
.speed
== current_speed
&&
3322 tp
->link_config
.duplex
== current_duplex
&&
3323 tp
->link_config
.flowctrl
==
3324 tp
->link_config
.active_flowctrl
) {
3325 current_link_up
= 1;
3329 if (current_link_up
== 1 &&
3330 tp
->link_config
.active_duplex
== DUPLEX_FULL
)
3331 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
3335 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3336 tg3_phy_copper_begin(tp
);
3338 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3339 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3340 (bmsr
& BMSR_LSTATUS
))
3341 current_link_up
= 1;
3344 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
3345 if (current_link_up
== 1) {
3346 if (tp
->link_config
.active_speed
== SPEED_100
||
3347 tp
->link_config
.active_speed
== SPEED_10
)
3348 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3350 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3351 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
3352 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3354 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3356 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
3357 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
3358 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
3360 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
3361 if (current_link_up
== 1 &&
3362 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
3363 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
3365 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3368 /* ??? Without this setting Netgear GA302T PHY does not
3369 * ??? send/receive packets...
3371 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
3372 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
3373 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
3374 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
3378 tw32_f(MAC_MODE
, tp
->mac_mode
);
3381 tg3_phy_eee_adjust(tp
, current_link_up
);
3383 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
3384 /* Polled via timer. */
3385 tw32_f(MAC_EVENT
, 0);
3387 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
3391 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
3392 current_link_up
== 1 &&
3393 tp
->link_config
.active_speed
== SPEED_1000
&&
3394 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) ||
3395 (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
))) {
3398 (MAC_STATUS_SYNC_CHANGED
|
3399 MAC_STATUS_CFG_CHANGED
));
3402 NIC_SRAM_FIRMWARE_MBOX
,
3403 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
3406 /* Prevent send BD corruption. */
3407 if (tp
->tg3_flags3
& TG3_FLG3_CLKREQ_BUG
) {
3408 u16 oldlnkctl
, newlnkctl
;
3410 pci_read_config_word(tp
->pdev
,
3411 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
3413 if (tp
->link_config
.active_speed
== SPEED_100
||
3414 tp
->link_config
.active_speed
== SPEED_10
)
3415 newlnkctl
= oldlnkctl
& ~PCI_EXP_LNKCTL_CLKREQ_EN
;
3417 newlnkctl
= oldlnkctl
| PCI_EXP_LNKCTL_CLKREQ_EN
;
3418 if (newlnkctl
!= oldlnkctl
)
3419 pci_write_config_word(tp
->pdev
,
3420 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
3424 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
3425 if (current_link_up
)
3426 netif_carrier_on(tp
->dev
);
3428 netif_carrier_off(tp
->dev
);
3429 tg3_link_report(tp
);
3435 struct tg3_fiber_aneginfo
{
3437 #define ANEG_STATE_UNKNOWN 0
3438 #define ANEG_STATE_AN_ENABLE 1
3439 #define ANEG_STATE_RESTART_INIT 2
3440 #define ANEG_STATE_RESTART 3
3441 #define ANEG_STATE_DISABLE_LINK_OK 4
3442 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3443 #define ANEG_STATE_ABILITY_DETECT 6
3444 #define ANEG_STATE_ACK_DETECT_INIT 7
3445 #define ANEG_STATE_ACK_DETECT 8
3446 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3447 #define ANEG_STATE_COMPLETE_ACK 10
3448 #define ANEG_STATE_IDLE_DETECT_INIT 11
3449 #define ANEG_STATE_IDLE_DETECT 12
3450 #define ANEG_STATE_LINK_OK 13
3451 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3452 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3455 #define MR_AN_ENABLE 0x00000001
3456 #define MR_RESTART_AN 0x00000002
3457 #define MR_AN_COMPLETE 0x00000004
3458 #define MR_PAGE_RX 0x00000008
3459 #define MR_NP_LOADED 0x00000010
3460 #define MR_TOGGLE_TX 0x00000020
3461 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3462 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3463 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3464 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3465 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3466 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3467 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3468 #define MR_TOGGLE_RX 0x00002000
3469 #define MR_NP_RX 0x00004000
3471 #define MR_LINK_OK 0x80000000
3473 unsigned long link_time
, cur_time
;
3475 u32 ability_match_cfg
;
3476 int ability_match_count
;
3478 char ability_match
, idle_match
, ack_match
;
3480 u32 txconfig
, rxconfig
;
3481 #define ANEG_CFG_NP 0x00000080
3482 #define ANEG_CFG_ACK 0x00000040
3483 #define ANEG_CFG_RF2 0x00000020
3484 #define ANEG_CFG_RF1 0x00000010
3485 #define ANEG_CFG_PS2 0x00000001
3486 #define ANEG_CFG_PS1 0x00008000
3487 #define ANEG_CFG_HD 0x00004000
3488 #define ANEG_CFG_FD 0x00002000
3489 #define ANEG_CFG_INVAL 0x00001f06
3494 #define ANEG_TIMER_ENAB 2
3495 #define ANEG_FAILED -1
3497 #define ANEG_STATE_SETTLE_TIME 10000
3499 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
3500 struct tg3_fiber_aneginfo
*ap
)
3503 unsigned long delta
;
3507 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
3511 ap
->ability_match_cfg
= 0;
3512 ap
->ability_match_count
= 0;
3513 ap
->ability_match
= 0;
3519 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
3520 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
3522 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
3523 ap
->ability_match_cfg
= rx_cfg_reg
;
3524 ap
->ability_match
= 0;
3525 ap
->ability_match_count
= 0;
3527 if (++ap
->ability_match_count
> 1) {
3528 ap
->ability_match
= 1;
3529 ap
->ability_match_cfg
= rx_cfg_reg
;
3532 if (rx_cfg_reg
& ANEG_CFG_ACK
)
3540 ap
->ability_match_cfg
= 0;
3541 ap
->ability_match_count
= 0;
3542 ap
->ability_match
= 0;
3548 ap
->rxconfig
= rx_cfg_reg
;
3551 switch (ap
->state
) {
3552 case ANEG_STATE_UNKNOWN
:
3553 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
3554 ap
->state
= ANEG_STATE_AN_ENABLE
;
3557 case ANEG_STATE_AN_ENABLE
:
3558 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
3559 if (ap
->flags
& MR_AN_ENABLE
) {
3562 ap
->ability_match_cfg
= 0;
3563 ap
->ability_match_count
= 0;
3564 ap
->ability_match
= 0;
3568 ap
->state
= ANEG_STATE_RESTART_INIT
;
3570 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
3574 case ANEG_STATE_RESTART_INIT
:
3575 ap
->link_time
= ap
->cur_time
;
3576 ap
->flags
&= ~(MR_NP_LOADED
);
3578 tw32(MAC_TX_AUTO_NEG
, 0);
3579 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3580 tw32_f(MAC_MODE
, tp
->mac_mode
);
3583 ret
= ANEG_TIMER_ENAB
;
3584 ap
->state
= ANEG_STATE_RESTART
;
3587 case ANEG_STATE_RESTART
:
3588 delta
= ap
->cur_time
- ap
->link_time
;
3589 if (delta
> ANEG_STATE_SETTLE_TIME
)
3590 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
3592 ret
= ANEG_TIMER_ENAB
;
3595 case ANEG_STATE_DISABLE_LINK_OK
:
3599 case ANEG_STATE_ABILITY_DETECT_INIT
:
3600 ap
->flags
&= ~(MR_TOGGLE_TX
);
3601 ap
->txconfig
= ANEG_CFG_FD
;
3602 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
3603 if (flowctrl
& ADVERTISE_1000XPAUSE
)
3604 ap
->txconfig
|= ANEG_CFG_PS1
;
3605 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
3606 ap
->txconfig
|= ANEG_CFG_PS2
;
3607 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3608 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3609 tw32_f(MAC_MODE
, tp
->mac_mode
);
3612 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
3615 case ANEG_STATE_ABILITY_DETECT
:
3616 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
3617 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
3620 case ANEG_STATE_ACK_DETECT_INIT
:
3621 ap
->txconfig
|= ANEG_CFG_ACK
;
3622 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3623 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3624 tw32_f(MAC_MODE
, tp
->mac_mode
);
3627 ap
->state
= ANEG_STATE_ACK_DETECT
;
3630 case ANEG_STATE_ACK_DETECT
:
3631 if (ap
->ack_match
!= 0) {
3632 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
3633 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
3634 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
3636 ap
->state
= ANEG_STATE_AN_ENABLE
;
3638 } else if (ap
->ability_match
!= 0 &&
3639 ap
->rxconfig
== 0) {
3640 ap
->state
= ANEG_STATE_AN_ENABLE
;
3644 case ANEG_STATE_COMPLETE_ACK_INIT
:
3645 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
3649 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
3650 MR_LP_ADV_HALF_DUPLEX
|
3651 MR_LP_ADV_SYM_PAUSE
|
3652 MR_LP_ADV_ASYM_PAUSE
|
3653 MR_LP_ADV_REMOTE_FAULT1
|
3654 MR_LP_ADV_REMOTE_FAULT2
|
3655 MR_LP_ADV_NEXT_PAGE
|
3658 if (ap
->rxconfig
& ANEG_CFG_FD
)
3659 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
3660 if (ap
->rxconfig
& ANEG_CFG_HD
)
3661 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
3662 if (ap
->rxconfig
& ANEG_CFG_PS1
)
3663 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
3664 if (ap
->rxconfig
& ANEG_CFG_PS2
)
3665 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
3666 if (ap
->rxconfig
& ANEG_CFG_RF1
)
3667 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
3668 if (ap
->rxconfig
& ANEG_CFG_RF2
)
3669 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
3670 if (ap
->rxconfig
& ANEG_CFG_NP
)
3671 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
3673 ap
->link_time
= ap
->cur_time
;
3675 ap
->flags
^= (MR_TOGGLE_TX
);
3676 if (ap
->rxconfig
& 0x0008)
3677 ap
->flags
|= MR_TOGGLE_RX
;
3678 if (ap
->rxconfig
& ANEG_CFG_NP
)
3679 ap
->flags
|= MR_NP_RX
;
3680 ap
->flags
|= MR_PAGE_RX
;
3682 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
3683 ret
= ANEG_TIMER_ENAB
;
3686 case ANEG_STATE_COMPLETE_ACK
:
3687 if (ap
->ability_match
!= 0 &&
3688 ap
->rxconfig
== 0) {
3689 ap
->state
= ANEG_STATE_AN_ENABLE
;
3692 delta
= ap
->cur_time
- ap
->link_time
;
3693 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3694 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
3695 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3697 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
3698 !(ap
->flags
& MR_NP_RX
)) {
3699 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3707 case ANEG_STATE_IDLE_DETECT_INIT
:
3708 ap
->link_time
= ap
->cur_time
;
3709 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3710 tw32_f(MAC_MODE
, tp
->mac_mode
);
3713 ap
->state
= ANEG_STATE_IDLE_DETECT
;
3714 ret
= ANEG_TIMER_ENAB
;
3717 case ANEG_STATE_IDLE_DETECT
:
3718 if (ap
->ability_match
!= 0 &&
3719 ap
->rxconfig
== 0) {
3720 ap
->state
= ANEG_STATE_AN_ENABLE
;
3723 delta
= ap
->cur_time
- ap
->link_time
;
3724 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3725 /* XXX another gem from the Broadcom driver :( */
3726 ap
->state
= ANEG_STATE_LINK_OK
;
3730 case ANEG_STATE_LINK_OK
:
3731 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
3735 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
3736 /* ??? unimplemented */
3739 case ANEG_STATE_NEXT_PAGE_WAIT
:
3740 /* ??? unimplemented */
3751 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
3754 struct tg3_fiber_aneginfo aninfo
;
3755 int status
= ANEG_FAILED
;
3759 tw32_f(MAC_TX_AUTO_NEG
, 0);
3761 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
3762 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
3765 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
3768 memset(&aninfo
, 0, sizeof(aninfo
));
3769 aninfo
.flags
|= MR_AN_ENABLE
;
3770 aninfo
.state
= ANEG_STATE_UNKNOWN
;
3771 aninfo
.cur_time
= 0;
3773 while (++tick
< 195000) {
3774 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
3775 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
3781 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3782 tw32_f(MAC_MODE
, tp
->mac_mode
);
3785 *txflags
= aninfo
.txconfig
;
3786 *rxflags
= aninfo
.flags
;
3788 if (status
== ANEG_DONE
&&
3789 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
3790 MR_LP_ADV_FULL_DUPLEX
)))
3796 static void tg3_init_bcm8002(struct tg3
*tp
)
3798 u32 mac_status
= tr32(MAC_STATUS
);
3801 /* Reset when initting first time or we have a link. */
3802 if ((tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) &&
3803 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
3806 /* Set PLL lock range. */
3807 tg3_writephy(tp
, 0x16, 0x8007);
3810 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
3812 /* Wait for reset to complete. */
3813 /* XXX schedule_timeout() ... */
3814 for (i
= 0; i
< 500; i
++)
3817 /* Config mode; select PMA/Ch 1 regs. */
3818 tg3_writephy(tp
, 0x10, 0x8411);
3820 /* Enable auto-lock and comdet, select txclk for tx. */
3821 tg3_writephy(tp
, 0x11, 0x0a10);
3823 tg3_writephy(tp
, 0x18, 0x00a0);
3824 tg3_writephy(tp
, 0x16, 0x41ff);
3826 /* Assert and deassert POR. */
3827 tg3_writephy(tp
, 0x13, 0x0400);
3829 tg3_writephy(tp
, 0x13, 0x0000);
3831 tg3_writephy(tp
, 0x11, 0x0a50);
3833 tg3_writephy(tp
, 0x11, 0x0a10);
3835 /* Wait for signal to stabilize */
3836 /* XXX schedule_timeout() ... */
3837 for (i
= 0; i
< 15000; i
++)
3840 /* Deselect the channel register so we can read the PHYID
3843 tg3_writephy(tp
, 0x10, 0x8011);
3846 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
3849 u32 sg_dig_ctrl
, sg_dig_status
;
3850 u32 serdes_cfg
, expected_sg_dig_ctrl
;
3851 int workaround
, port_a
;
3852 int current_link_up
;
3855 expected_sg_dig_ctrl
= 0;
3858 current_link_up
= 0;
3860 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
3861 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
3863 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
3866 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3867 /* preserve bits 20-23 for voltage regulator */
3868 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
3871 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3873 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
3874 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
3876 u32 val
= serdes_cfg
;
3882 tw32_f(MAC_SERDES_CFG
, val
);
3885 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
3887 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
3888 tg3_setup_flow_control(tp
, 0, 0);
3889 current_link_up
= 1;
3894 /* Want auto-negotiation. */
3895 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
3897 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
3898 if (flowctrl
& ADVERTISE_1000XPAUSE
)
3899 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
3900 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
3901 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
3903 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
3904 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
3905 tp
->serdes_counter
&&
3906 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
3907 MAC_STATUS_RCVD_CFG
)) ==
3908 MAC_STATUS_PCS_SYNCED
)) {
3909 tp
->serdes_counter
--;
3910 current_link_up
= 1;
3915 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
3916 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
3918 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
3920 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
3921 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
3922 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
3923 MAC_STATUS_SIGNAL_DET
)) {
3924 sg_dig_status
= tr32(SG_DIG_STATUS
);
3925 mac_status
= tr32(MAC_STATUS
);
3927 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
3928 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
3929 u32 local_adv
= 0, remote_adv
= 0;
3931 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
3932 local_adv
|= ADVERTISE_1000XPAUSE
;
3933 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
3934 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
3936 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
3937 remote_adv
|= LPA_1000XPAUSE
;
3938 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
3939 remote_adv
|= LPA_1000XPAUSE_ASYM
;
3941 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
3942 current_link_up
= 1;
3943 tp
->serdes_counter
= 0;
3944 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
3945 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
3946 if (tp
->serdes_counter
)
3947 tp
->serdes_counter
--;
3950 u32 val
= serdes_cfg
;
3957 tw32_f(MAC_SERDES_CFG
, val
);
3960 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
3963 /* Link parallel detection - link is up */
3964 /* only if we have PCS_SYNC and not */
3965 /* receiving config code words */
3966 mac_status
= tr32(MAC_STATUS
);
3967 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
3968 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
3969 tg3_setup_flow_control(tp
, 0, 0);
3970 current_link_up
= 1;
3972 TG3_PHYFLG_PARALLEL_DETECT
;
3973 tp
->serdes_counter
=
3974 SERDES_PARALLEL_DET_TIMEOUT
;
3976 goto restart_autoneg
;
3980 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
3981 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
3985 return current_link_up
;
3988 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
3990 int current_link_up
= 0;
3992 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
3995 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3996 u32 txflags
, rxflags
;
3999 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
4000 u32 local_adv
= 0, remote_adv
= 0;
4002 if (txflags
& ANEG_CFG_PS1
)
4003 local_adv
|= ADVERTISE_1000XPAUSE
;
4004 if (txflags
& ANEG_CFG_PS2
)
4005 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4007 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
4008 remote_adv
|= LPA_1000XPAUSE
;
4009 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
4010 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4012 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4014 current_link_up
= 1;
4016 for (i
= 0; i
< 30; i
++) {
4019 (MAC_STATUS_SYNC_CHANGED
|
4020 MAC_STATUS_CFG_CHANGED
));
4022 if ((tr32(MAC_STATUS
) &
4023 (MAC_STATUS_SYNC_CHANGED
|
4024 MAC_STATUS_CFG_CHANGED
)) == 0)
4028 mac_status
= tr32(MAC_STATUS
);
4029 if (current_link_up
== 0 &&
4030 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4031 !(mac_status
& MAC_STATUS_RCVD_CFG
))
4032 current_link_up
= 1;
4034 tg3_setup_flow_control(tp
, 0, 0);
4036 /* Forcing 1000FD link up. */
4037 current_link_up
= 1;
4039 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
4042 tw32_f(MAC_MODE
, tp
->mac_mode
);
4047 return current_link_up
;
4050 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
4053 u16 orig_active_speed
;
4054 u8 orig_active_duplex
;
4056 int current_link_up
;
4059 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
4060 orig_active_speed
= tp
->link_config
.active_speed
;
4061 orig_active_duplex
= tp
->link_config
.active_duplex
;
4063 if (!(tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
) &&
4064 netif_carrier_ok(tp
->dev
) &&
4065 (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
)) {
4066 mac_status
= tr32(MAC_STATUS
);
4067 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
4068 MAC_STATUS_SIGNAL_DET
|
4069 MAC_STATUS_CFG_CHANGED
|
4070 MAC_STATUS_RCVD_CFG
);
4071 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
4072 MAC_STATUS_SIGNAL_DET
)) {
4073 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4074 MAC_STATUS_CFG_CHANGED
));
4079 tw32_f(MAC_TX_AUTO_NEG
, 0);
4081 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
4082 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
4083 tw32_f(MAC_MODE
, tp
->mac_mode
);
4086 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
4087 tg3_init_bcm8002(tp
);
4089 /* Enable link change event even when serdes polling. */
4090 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4093 current_link_up
= 0;
4094 mac_status
= tr32(MAC_STATUS
);
4096 if (tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
)
4097 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
4099 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
4101 tp
->napi
[0].hw_status
->status
=
4102 (SD_STATUS_UPDATED
|
4103 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
4105 for (i
= 0; i
< 100; i
++) {
4106 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4107 MAC_STATUS_CFG_CHANGED
));
4109 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
4110 MAC_STATUS_CFG_CHANGED
|
4111 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
4115 mac_status
= tr32(MAC_STATUS
);
4116 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
4117 current_link_up
= 0;
4118 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
4119 tp
->serdes_counter
== 0) {
4120 tw32_f(MAC_MODE
, (tp
->mac_mode
|
4121 MAC_MODE_SEND_CONFIGS
));
4123 tw32_f(MAC_MODE
, tp
->mac_mode
);
4127 if (current_link_up
== 1) {
4128 tp
->link_config
.active_speed
= SPEED_1000
;
4129 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
4130 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4131 LED_CTRL_LNKLED_OVERRIDE
|
4132 LED_CTRL_1000MBPS_ON
));
4134 tp
->link_config
.active_speed
= SPEED_INVALID
;
4135 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
4136 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4137 LED_CTRL_LNKLED_OVERRIDE
|
4138 LED_CTRL_TRAFFIC_OVERRIDE
));
4141 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4142 if (current_link_up
)
4143 netif_carrier_on(tp
->dev
);
4145 netif_carrier_off(tp
->dev
);
4146 tg3_link_report(tp
);
4148 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
4149 if (orig_pause_cfg
!= now_pause_cfg
||
4150 orig_active_speed
!= tp
->link_config
.active_speed
||
4151 orig_active_duplex
!= tp
->link_config
.active_duplex
)
4152 tg3_link_report(tp
);
4158 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
4160 int current_link_up
, err
= 0;
4164 u32 local_adv
, remote_adv
;
4166 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4167 tw32_f(MAC_MODE
, tp
->mac_mode
);
4173 (MAC_STATUS_SYNC_CHANGED
|
4174 MAC_STATUS_CFG_CHANGED
|
4175 MAC_STATUS_MI_COMPLETION
|
4176 MAC_STATUS_LNKSTATE_CHANGED
));
4182 current_link_up
= 0;
4183 current_speed
= SPEED_INVALID
;
4184 current_duplex
= DUPLEX_INVALID
;
4186 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4187 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4188 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
4189 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4190 bmsr
|= BMSR_LSTATUS
;
4192 bmsr
&= ~BMSR_LSTATUS
;
4195 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4197 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
4198 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4199 /* do nothing, just check for link up at the end */
4200 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4203 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4204 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
4205 ADVERTISE_1000XPAUSE
|
4206 ADVERTISE_1000XPSE_ASYM
|
4209 new_adv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4211 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
4212 new_adv
|= ADVERTISE_1000XHALF
;
4213 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
4214 new_adv
|= ADVERTISE_1000XFULL
;
4216 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
4217 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4218 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
4219 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4221 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4222 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
4223 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4230 bmcr
&= ~BMCR_SPEED1000
;
4231 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
4233 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4234 new_bmcr
|= BMCR_FULLDPLX
;
4236 if (new_bmcr
!= bmcr
) {
4237 /* BMCR_SPEED1000 is a reserved bit that needs
4238 * to be set on write.
4240 new_bmcr
|= BMCR_SPEED1000
;
4242 /* Force a linkdown */
4243 if (netif_carrier_ok(tp
->dev
)) {
4246 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4247 adv
&= ~(ADVERTISE_1000XFULL
|
4248 ADVERTISE_1000XHALF
|
4250 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
4251 tg3_writephy(tp
, MII_BMCR
, bmcr
|
4255 netif_carrier_off(tp
->dev
);
4257 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
4259 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4260 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4261 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
4263 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4264 bmsr
|= BMSR_LSTATUS
;
4266 bmsr
&= ~BMSR_LSTATUS
;
4268 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4272 if (bmsr
& BMSR_LSTATUS
) {
4273 current_speed
= SPEED_1000
;
4274 current_link_up
= 1;
4275 if (bmcr
& BMCR_FULLDPLX
)
4276 current_duplex
= DUPLEX_FULL
;
4278 current_duplex
= DUPLEX_HALF
;
4283 if (bmcr
& BMCR_ANENABLE
) {
4286 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
4287 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
4288 common
= local_adv
& remote_adv
;
4289 if (common
& (ADVERTISE_1000XHALF
|
4290 ADVERTISE_1000XFULL
)) {
4291 if (common
& ADVERTISE_1000XFULL
)
4292 current_duplex
= DUPLEX_FULL
;
4294 current_duplex
= DUPLEX_HALF
;
4295 } else if (!(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
4296 /* Link is up via parallel detect */
4298 current_link_up
= 0;
4303 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
4304 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4306 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4307 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4308 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4310 tw32_f(MAC_MODE
, tp
->mac_mode
);
4313 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4315 tp
->link_config
.active_speed
= current_speed
;
4316 tp
->link_config
.active_duplex
= current_duplex
;
4318 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4319 if (current_link_up
)
4320 netif_carrier_on(tp
->dev
);
4322 netif_carrier_off(tp
->dev
);
4323 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4325 tg3_link_report(tp
);
4330 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
4332 if (tp
->serdes_counter
) {
4333 /* Give autoneg time to complete. */
4334 tp
->serdes_counter
--;
4338 if (!netif_carrier_ok(tp
->dev
) &&
4339 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
4342 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4343 if (bmcr
& BMCR_ANENABLE
) {
4346 /* Select shadow register 0x1f */
4347 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
4348 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
4350 /* Select expansion interrupt status register */
4351 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4352 MII_TG3_DSP_EXP1_INT_STAT
);
4353 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4354 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4356 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
4357 /* We have signal detect and not receiving
4358 * config code words, link is up by parallel
4362 bmcr
&= ~BMCR_ANENABLE
;
4363 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
4364 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4365 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
4368 } else if (netif_carrier_ok(tp
->dev
) &&
4369 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
4370 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4373 /* Select expansion interrupt status register */
4374 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4375 MII_TG3_DSP_EXP1_INT_STAT
);
4376 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4380 /* Config code words received, turn on autoneg. */
4381 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4382 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
4384 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4390 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
4394 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
4395 err
= tg3_setup_fiber_phy(tp
, force_reset
);
4396 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4397 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
4399 err
= tg3_setup_copper_phy(tp
, force_reset
);
4401 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
4404 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
4405 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
4407 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
4412 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
4413 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
4414 tw32(GRC_MISC_CFG
, val
);
4417 if (tp
->link_config
.active_speed
== SPEED_1000
&&
4418 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4419 tw32(MAC_TX_LENGTHS
,
4420 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
4421 (6 << TX_LENGTHS_IPG_SHIFT
) |
4422 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
4424 tw32(MAC_TX_LENGTHS
,
4425 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
4426 (6 << TX_LENGTHS_IPG_SHIFT
) |
4427 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
4429 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
4430 if (netif_carrier_ok(tp
->dev
)) {
4431 tw32(HOSTCC_STAT_COAL_TICKS
,
4432 tp
->coal
.stats_block_coalesce_usecs
);
4434 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
4438 if (tp
->tg3_flags
& TG3_FLAG_ASPM_WORKAROUND
) {
4439 u32 val
= tr32(PCIE_PWR_MGMT_THRESH
);
4440 if (!netif_carrier_ok(tp
->dev
))
4441 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
4444 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
4445 tw32(PCIE_PWR_MGMT_THRESH
, val
);
4451 static inline int tg3_irq_sync(struct tg3
*tp
)
4453 return tp
->irq_sync
;
4456 /* This is called whenever we suspect that the system chipset is re-
4457 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4458 * is bogus tx completions. We try to recover by setting the
4459 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4462 static void tg3_tx_recover(struct tg3
*tp
)
4464 BUG_ON((tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
) ||
4465 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
4467 netdev_warn(tp
->dev
,
4468 "The system may be re-ordering memory-mapped I/O "
4469 "cycles to the network device, attempting to recover. "
4470 "Please report the problem to the driver maintainer "
4471 "and include system chipset information.\n");
4473 spin_lock(&tp
->lock
);
4474 tp
->tg3_flags
|= TG3_FLAG_TX_RECOVERY_PENDING
;
4475 spin_unlock(&tp
->lock
);
4478 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
4480 /* Tell compiler to fetch tx indices from memory. */
4482 return tnapi
->tx_pending
-
4483 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
4486 /* Tigon3 never reports partial packet sends. So we do not
4487 * need special logic to handle SKBs that have not had all
4488 * of their frags sent yet, like SunGEM does.
4490 static void tg3_tx(struct tg3_napi
*tnapi
)
4492 struct tg3
*tp
= tnapi
->tp
;
4493 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
4494 u32 sw_idx
= tnapi
->tx_cons
;
4495 struct netdev_queue
*txq
;
4496 int index
= tnapi
- tp
->napi
;
4498 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
)
4501 txq
= netdev_get_tx_queue(tp
->dev
, index
);
4503 while (sw_idx
!= hw_idx
) {
4504 struct ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
4505 struct sk_buff
*skb
= ri
->skb
;
4508 if (unlikely(skb
== NULL
)) {
4513 pci_unmap_single(tp
->pdev
,
4514 dma_unmap_addr(ri
, mapping
),
4520 sw_idx
= NEXT_TX(sw_idx
);
4522 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4523 ri
= &tnapi
->tx_buffers
[sw_idx
];
4524 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
4527 pci_unmap_page(tp
->pdev
,
4528 dma_unmap_addr(ri
, mapping
),
4529 skb_shinfo(skb
)->frags
[i
].size
,
4531 sw_idx
= NEXT_TX(sw_idx
);
4536 if (unlikely(tx_bug
)) {
4542 tnapi
->tx_cons
= sw_idx
;
4544 /* Need to make the tx_cons update visible to tg3_start_xmit()
4545 * before checking for netif_queue_stopped(). Without the
4546 * memory barrier, there is a small possibility that tg3_start_xmit()
4547 * will miss it and cause the queue to be stopped forever.
4551 if (unlikely(netif_tx_queue_stopped(txq
) &&
4552 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
4553 __netif_tx_lock(txq
, smp_processor_id());
4554 if (netif_tx_queue_stopped(txq
) &&
4555 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
4556 netif_tx_wake_queue(txq
);
4557 __netif_tx_unlock(txq
);
4561 static void tg3_rx_skb_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
4566 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
4567 map_sz
, PCI_DMA_FROMDEVICE
);
4568 dev_kfree_skb_any(ri
->skb
);
4572 /* Returns size of skb allocated or < 0 on error.
4574 * We only need to fill in the address because the other members
4575 * of the RX descriptor are invariant, see tg3_init_rings.
4577 * Note the purposeful assymetry of cpu vs. chip accesses. For
4578 * posting buffers we only dirty the first cache line of the RX
4579 * descriptor (containing the address). Whereas for the RX status
4580 * buffers the cpu only reads the last cacheline of the RX descriptor
4581 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4583 static int tg3_alloc_rx_skb(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
4584 u32 opaque_key
, u32 dest_idx_unmasked
)
4586 struct tg3_rx_buffer_desc
*desc
;
4587 struct ring_info
*map
;
4588 struct sk_buff
*skb
;
4590 int skb_size
, dest_idx
;
4592 switch (opaque_key
) {
4593 case RXD_OPAQUE_RING_STD
:
4594 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4595 desc
= &tpr
->rx_std
[dest_idx
];
4596 map
= &tpr
->rx_std_buffers
[dest_idx
];
4597 skb_size
= tp
->rx_pkt_map_sz
;
4600 case RXD_OPAQUE_RING_JUMBO
:
4601 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
4602 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
4603 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
4604 skb_size
= TG3_RX_JMB_MAP_SZ
;
4611 /* Do not overwrite any of the map or rp information
4612 * until we are sure we can commit to a new buffer.
4614 * Callers depend upon this behavior and assume that
4615 * we leave everything unchanged if we fail.
4617 skb
= netdev_alloc_skb(tp
->dev
, skb_size
+ tp
->rx_offset
);
4621 skb_reserve(skb
, tp
->rx_offset
);
4623 mapping
= pci_map_single(tp
->pdev
, skb
->data
, skb_size
,
4624 PCI_DMA_FROMDEVICE
);
4625 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
4631 dma_unmap_addr_set(map
, mapping
, mapping
);
4633 desc
->addr_hi
= ((u64
)mapping
>> 32);
4634 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
4639 /* We only need to move over in the address because the other
4640 * members of the RX descriptor are invariant. See notes above
4641 * tg3_alloc_rx_skb for full details.
4643 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
4644 struct tg3_rx_prodring_set
*dpr
,
4645 u32 opaque_key
, int src_idx
,
4646 u32 dest_idx_unmasked
)
4648 struct tg3
*tp
= tnapi
->tp
;
4649 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
4650 struct ring_info
*src_map
, *dest_map
;
4651 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
4654 switch (opaque_key
) {
4655 case RXD_OPAQUE_RING_STD
:
4656 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4657 dest_desc
= &dpr
->rx_std
[dest_idx
];
4658 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
4659 src_desc
= &spr
->rx_std
[src_idx
];
4660 src_map
= &spr
->rx_std_buffers
[src_idx
];
4663 case RXD_OPAQUE_RING_JUMBO
:
4664 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
4665 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
4666 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
4667 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
4668 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
4675 dest_map
->skb
= src_map
->skb
;
4676 dma_unmap_addr_set(dest_map
, mapping
,
4677 dma_unmap_addr(src_map
, mapping
));
4678 dest_desc
->addr_hi
= src_desc
->addr_hi
;
4679 dest_desc
->addr_lo
= src_desc
->addr_lo
;
4681 /* Ensure that the update to the skb happens after the physical
4682 * addresses have been transferred to the new BD location.
4686 src_map
->skb
= NULL
;
4689 /* The RX ring scheme is composed of multiple rings which post fresh
4690 * buffers to the chip, and one special ring the chip uses to report
4691 * status back to the host.
4693 * The special ring reports the status of received packets to the
4694 * host. The chip does not write into the original descriptor the
4695 * RX buffer was obtained from. The chip simply takes the original
4696 * descriptor as provided by the host, updates the status and length
4697 * field, then writes this into the next status ring entry.
4699 * Each ring the host uses to post buffers to the chip is described
4700 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4701 * it is first placed into the on-chip ram. When the packet's length
4702 * is known, it walks down the TG3_BDINFO entries to select the ring.
4703 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4704 * which is within the range of the new packet's length is chosen.
4706 * The "separate ring for rx status" scheme may sound queer, but it makes
4707 * sense from a cache coherency perspective. If only the host writes
4708 * to the buffer post rings, and only the chip writes to the rx status
4709 * rings, then cache lines never move beyond shared-modified state.
4710 * If both the host and chip were to write into the same ring, cache line
4711 * eviction could occur since both entities want it in an exclusive state.
4713 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
4715 struct tg3
*tp
= tnapi
->tp
;
4716 u32 work_mask
, rx_std_posted
= 0;
4717 u32 std_prod_idx
, jmb_prod_idx
;
4718 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
4721 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
4723 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
4725 * We need to order the read of hw_idx and the read of
4726 * the opaque cookie.
4731 std_prod_idx
= tpr
->rx_std_prod_idx
;
4732 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
4733 while (sw_idx
!= hw_idx
&& budget
> 0) {
4734 struct ring_info
*ri
;
4735 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
4737 struct sk_buff
*skb
;
4738 dma_addr_t dma_addr
;
4739 u32 opaque_key
, desc_idx
, *post_ptr
;
4741 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
4742 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
4743 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
4744 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
4745 dma_addr
= dma_unmap_addr(ri
, mapping
);
4747 post_ptr
= &std_prod_idx
;
4749 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
4750 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
4751 dma_addr
= dma_unmap_addr(ri
, mapping
);
4753 post_ptr
= &jmb_prod_idx
;
4755 goto next_pkt_nopost
;
4757 work_mask
|= opaque_key
;
4759 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
4760 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
4762 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
4763 desc_idx
, *post_ptr
);
4765 /* Other statistics kept track of by card. */
4770 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
4773 if (len
> TG3_RX_COPY_THRESH(tp
)) {
4776 skb_size
= tg3_alloc_rx_skb(tp
, tpr
, opaque_key
,
4781 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
4782 PCI_DMA_FROMDEVICE
);
4784 /* Ensure that the update to the skb happens
4785 * after the usage of the old DMA mapping.
4793 struct sk_buff
*copy_skb
;
4795 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
4796 desc_idx
, *post_ptr
);
4798 copy_skb
= netdev_alloc_skb(tp
->dev
, len
+
4800 if (copy_skb
== NULL
)
4801 goto drop_it_no_recycle
;
4803 skb_reserve(copy_skb
, TG3_RAW_IP_ALIGN
);
4804 skb_put(copy_skb
, len
);
4805 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
4806 skb_copy_from_linear_data(skb
, copy_skb
->data
, len
);
4807 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
4809 /* We'll reuse the original ring buffer. */
4813 if ((tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) &&
4814 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
4815 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
4816 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
4817 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4819 skb_checksum_none_assert(skb
);
4821 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
4823 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
4824 skb
->protocol
!= htons(ETH_P_8021Q
)) {
4826 goto drop_it_no_recycle
;
4829 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
4830 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
4831 __vlan_hwaccel_put_tag(skb
,
4832 desc
->err_vlan
& RXD_VLAN_MASK
);
4834 napi_gro_receive(&tnapi
->napi
, skb
);
4842 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
4843 tpr
->rx_std_prod_idx
= std_prod_idx
&
4844 tp
->rx_std_ring_mask
;
4845 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
4846 tpr
->rx_std_prod_idx
);
4847 work_mask
&= ~RXD_OPAQUE_RING_STD
;
4852 sw_idx
&= tp
->rx_ret_ring_mask
;
4854 /* Refresh hw_idx to see if there is new work */
4855 if (sw_idx
== hw_idx
) {
4856 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
4861 /* ACK the status ring. */
4862 tnapi
->rx_rcb_ptr
= sw_idx
;
4863 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
4865 /* Refill RX ring(s). */
4866 if (!(tp
->tg3_flags3
& TG3_FLG3_ENABLE_RSS
)) {
4867 if (work_mask
& RXD_OPAQUE_RING_STD
) {
4868 tpr
->rx_std_prod_idx
= std_prod_idx
&
4869 tp
->rx_std_ring_mask
;
4870 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
4871 tpr
->rx_std_prod_idx
);
4873 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
4874 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
4875 tp
->rx_jmb_ring_mask
;
4876 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
4877 tpr
->rx_jmb_prod_idx
);
4880 } else if (work_mask
) {
4881 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4882 * updated before the producer indices can be updated.
4886 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
4887 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
4889 if (tnapi
!= &tp
->napi
[1])
4890 napi_schedule(&tp
->napi
[1].napi
);
4896 static void tg3_poll_link(struct tg3
*tp
)
4898 /* handle link change and other phy events */
4899 if (!(tp
->tg3_flags
&
4900 (TG3_FLAG_USE_LINKCHG_REG
|
4901 TG3_FLAG_POLL_SERDES
))) {
4902 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
4904 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
4905 sblk
->status
= SD_STATUS_UPDATED
|
4906 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
4907 spin_lock(&tp
->lock
);
4908 if (tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
) {
4910 (MAC_STATUS_SYNC_CHANGED
|
4911 MAC_STATUS_CFG_CHANGED
|
4912 MAC_STATUS_MI_COMPLETION
|
4913 MAC_STATUS_LNKSTATE_CHANGED
));
4916 tg3_setup_phy(tp
, 0);
4917 spin_unlock(&tp
->lock
);
4922 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
4923 struct tg3_rx_prodring_set
*dpr
,
4924 struct tg3_rx_prodring_set
*spr
)
4926 u32 si
, di
, cpycnt
, src_prod_idx
;
4930 src_prod_idx
= spr
->rx_std_prod_idx
;
4932 /* Make sure updates to the rx_std_buffers[] entries and the
4933 * standard producer index are seen in the correct order.
4937 if (spr
->rx_std_cons_idx
== src_prod_idx
)
4940 if (spr
->rx_std_cons_idx
< src_prod_idx
)
4941 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
4943 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
4944 spr
->rx_std_cons_idx
;
4946 cpycnt
= min(cpycnt
,
4947 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
4949 si
= spr
->rx_std_cons_idx
;
4950 di
= dpr
->rx_std_prod_idx
;
4952 for (i
= di
; i
< di
+ cpycnt
; i
++) {
4953 if (dpr
->rx_std_buffers
[i
].skb
) {
4963 /* Ensure that updates to the rx_std_buffers ring and the
4964 * shadowed hardware producer ring from tg3_recycle_skb() are
4965 * ordered correctly WRT the skb check above.
4969 memcpy(&dpr
->rx_std_buffers
[di
],
4970 &spr
->rx_std_buffers
[si
],
4971 cpycnt
* sizeof(struct ring_info
));
4973 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
4974 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
4975 sbd
= &spr
->rx_std
[si
];
4976 dbd
= &dpr
->rx_std
[di
];
4977 dbd
->addr_hi
= sbd
->addr_hi
;
4978 dbd
->addr_lo
= sbd
->addr_lo
;
4981 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
4982 tp
->rx_std_ring_mask
;
4983 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
4984 tp
->rx_std_ring_mask
;
4988 src_prod_idx
= spr
->rx_jmb_prod_idx
;
4990 /* Make sure updates to the rx_jmb_buffers[] entries and
4991 * the jumbo producer index are seen in the correct order.
4995 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
4998 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
4999 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
5001 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
5002 spr
->rx_jmb_cons_idx
;
5004 cpycnt
= min(cpycnt
,
5005 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
5007 si
= spr
->rx_jmb_cons_idx
;
5008 di
= dpr
->rx_jmb_prod_idx
;
5010 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5011 if (dpr
->rx_jmb_buffers
[i
].skb
) {
5021 /* Ensure that updates to the rx_jmb_buffers ring and the
5022 * shadowed hardware producer ring from tg3_recycle_skb() are
5023 * ordered correctly WRT the skb check above.
5027 memcpy(&dpr
->rx_jmb_buffers
[di
],
5028 &spr
->rx_jmb_buffers
[si
],
5029 cpycnt
* sizeof(struct ring_info
));
5031 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5032 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5033 sbd
= &spr
->rx_jmb
[si
].std
;
5034 dbd
= &dpr
->rx_jmb
[di
].std
;
5035 dbd
->addr_hi
= sbd
->addr_hi
;
5036 dbd
->addr_lo
= sbd
->addr_lo
;
5039 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
5040 tp
->rx_jmb_ring_mask
;
5041 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
5042 tp
->rx_jmb_ring_mask
;
5048 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
5050 struct tg3
*tp
= tnapi
->tp
;
5052 /* run TX completion thread */
5053 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
5055 if (unlikely(tp
->tg3_flags
& TG3_FLAG_TX_RECOVERY_PENDING
))
5059 /* run RX thread, within the bounds set by NAPI.
5060 * All RX "locking" is done by ensuring outside
5061 * code synchronizes with tg3->napi.poll()
5063 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
5064 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
5066 if ((tp
->tg3_flags3
& TG3_FLG3_ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
5067 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
5069 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
5070 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
5072 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5073 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
5074 &tp
->napi
[i
].prodring
);
5078 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
5079 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5080 dpr
->rx_std_prod_idx
);
5082 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
5083 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5084 dpr
->rx_jmb_prod_idx
);
5089 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
5095 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
5097 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5098 struct tg3
*tp
= tnapi
->tp
;
5100 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5103 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5105 if (unlikely(tp
->tg3_flags
& TG3_FLAG_TX_RECOVERY_PENDING
))
5108 if (unlikely(work_done
>= budget
))
5111 /* tp->last_tag is used in tg3_int_reenable() below
5112 * to tell the hw how much work has been processed,
5113 * so we must read it before checking for more work.
5115 tnapi
->last_tag
= sblk
->status_tag
;
5116 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5119 /* check for RX/TX work to do */
5120 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
5121 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
5122 napi_complete(napi
);
5123 /* Reenable interrupts. */
5124 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
5133 /* work_done is guaranteed to be less than budget. */
5134 napi_complete(napi
);
5135 schedule_work(&tp
->reset_task
);
5139 static int tg3_poll(struct napi_struct
*napi
, int budget
)
5141 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5142 struct tg3
*tp
= tnapi
->tp
;
5144 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5149 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5151 if (unlikely(tp
->tg3_flags
& TG3_FLAG_TX_RECOVERY_PENDING
))
5154 if (unlikely(work_done
>= budget
))
5157 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
5158 /* tp->last_tag is used in tg3_int_reenable() below
5159 * to tell the hw how much work has been processed,
5160 * so we must read it before checking for more work.
5162 tnapi
->last_tag
= sblk
->status_tag
;
5163 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5166 sblk
->status
&= ~SD_STATUS_UPDATED
;
5168 if (likely(!tg3_has_work(tnapi
))) {
5169 napi_complete(napi
);
5170 tg3_int_reenable(tnapi
);
5178 /* work_done is guaranteed to be less than budget. */
5179 napi_complete(napi
);
5180 schedule_work(&tp
->reset_task
);
5184 static void tg3_napi_disable(struct tg3
*tp
)
5188 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
5189 napi_disable(&tp
->napi
[i
].napi
);
5192 static void tg3_napi_enable(struct tg3
*tp
)
5196 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5197 napi_enable(&tp
->napi
[i
].napi
);
5200 static void tg3_napi_init(struct tg3
*tp
)
5204 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
5205 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5206 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
5209 static void tg3_napi_fini(struct tg3
*tp
)
5213 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5214 netif_napi_del(&tp
->napi
[i
].napi
);
5217 static inline void tg3_netif_stop(struct tg3
*tp
)
5219 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
5220 tg3_napi_disable(tp
);
5221 netif_tx_disable(tp
->dev
);
5224 static inline void tg3_netif_start(struct tg3
*tp
)
5226 /* NOTE: unconditional netif_tx_wake_all_queues is only
5227 * appropriate so long as all callers are assured to
5228 * have free tx slots (such as after tg3_init_hw)
5230 netif_tx_wake_all_queues(tp
->dev
);
5232 tg3_napi_enable(tp
);
5233 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
5234 tg3_enable_ints(tp
);
5237 static void tg3_irq_quiesce(struct tg3
*tp
)
5241 BUG_ON(tp
->irq_sync
);
5246 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5247 synchronize_irq(tp
->napi
[i
].irq_vec
);
5250 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5251 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5252 * with as well. Most of the time, this is not necessary except when
5253 * shutting down the device.
5255 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
5257 spin_lock_bh(&tp
->lock
);
5259 tg3_irq_quiesce(tp
);
5262 static inline void tg3_full_unlock(struct tg3
*tp
)
5264 spin_unlock_bh(&tp
->lock
);
5267 /* One-shot MSI handler - Chip automatically disables interrupt
5268 * after sending MSI so driver doesn't have to do it.
5270 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
5272 struct tg3_napi
*tnapi
= dev_id
;
5273 struct tg3
*tp
= tnapi
->tp
;
5275 prefetch(tnapi
->hw_status
);
5277 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5279 if (likely(!tg3_irq_sync(tp
)))
5280 napi_schedule(&tnapi
->napi
);
5285 /* MSI ISR - No need to check for interrupt sharing and no need to
5286 * flush status block and interrupt mailbox. PCI ordering rules
5287 * guarantee that MSI will arrive after the status block.
5289 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
5291 struct tg3_napi
*tnapi
= dev_id
;
5292 struct tg3
*tp
= tnapi
->tp
;
5294 prefetch(tnapi
->hw_status
);
5296 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5298 * Writing any value to intr-mbox-0 clears PCI INTA# and
5299 * chip-internal interrupt pending events.
5300 * Writing non-zero to intr-mbox-0 additional tells the
5301 * NIC to stop sending us irqs, engaging "in-intr-handler"
5304 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5305 if (likely(!tg3_irq_sync(tp
)))
5306 napi_schedule(&tnapi
->napi
);
5308 return IRQ_RETVAL(1);
5311 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
5313 struct tg3_napi
*tnapi
= dev_id
;
5314 struct tg3
*tp
= tnapi
->tp
;
5315 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5316 unsigned int handled
= 1;
5318 /* In INTx mode, it is possible for the interrupt to arrive at
5319 * the CPU before the status block posted prior to the interrupt.
5320 * Reading the PCI State register will confirm whether the
5321 * interrupt is ours and will flush the status block.
5323 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
5324 if ((tp
->tg3_flags
& TG3_FLAG_CHIP_RESETTING
) ||
5325 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5332 * Writing any value to intr-mbox-0 clears PCI INTA# and
5333 * chip-internal interrupt pending events.
5334 * Writing non-zero to intr-mbox-0 additional tells the
5335 * NIC to stop sending us irqs, engaging "in-intr-handler"
5338 * Flush the mailbox to de-assert the IRQ immediately to prevent
5339 * spurious interrupts. The flush impacts performance but
5340 * excessive spurious interrupts can be worse in some cases.
5342 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5343 if (tg3_irq_sync(tp
))
5345 sblk
->status
&= ~SD_STATUS_UPDATED
;
5346 if (likely(tg3_has_work(tnapi
))) {
5347 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5348 napi_schedule(&tnapi
->napi
);
5350 /* No work, shared interrupt perhaps? re-enable
5351 * interrupts, and flush that PCI write
5353 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
5357 return IRQ_RETVAL(handled
);
5360 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
5362 struct tg3_napi
*tnapi
= dev_id
;
5363 struct tg3
*tp
= tnapi
->tp
;
5364 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5365 unsigned int handled
= 1;
5367 /* In INTx mode, it is possible for the interrupt to arrive at
5368 * the CPU before the status block posted prior to the interrupt.
5369 * Reading the PCI State register will confirm whether the
5370 * interrupt is ours and will flush the status block.
5372 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
5373 if ((tp
->tg3_flags
& TG3_FLAG_CHIP_RESETTING
) ||
5374 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5381 * writing any value to intr-mbox-0 clears PCI INTA# and
5382 * chip-internal interrupt pending events.
5383 * writing non-zero to intr-mbox-0 additional tells the
5384 * NIC to stop sending us irqs, engaging "in-intr-handler"
5387 * Flush the mailbox to de-assert the IRQ immediately to prevent
5388 * spurious interrupts. The flush impacts performance but
5389 * excessive spurious interrupts can be worse in some cases.
5391 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5394 * In a shared interrupt configuration, sometimes other devices'
5395 * interrupts will scream. We record the current status tag here
5396 * so that the above check can report that the screaming interrupts
5397 * are unhandled. Eventually they will be silenced.
5399 tnapi
->last_irq_tag
= sblk
->status_tag
;
5401 if (tg3_irq_sync(tp
))
5404 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5406 napi_schedule(&tnapi
->napi
);
5409 return IRQ_RETVAL(handled
);
5412 /* ISR for interrupt test */
5413 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
5415 struct tg3_napi
*tnapi
= dev_id
;
5416 struct tg3
*tp
= tnapi
->tp
;
5417 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5419 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
5420 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5421 tg3_disable_ints(tp
);
5422 return IRQ_RETVAL(1);
5424 return IRQ_RETVAL(0);
5427 static int tg3_init_hw(struct tg3
*, int);
5428 static int tg3_halt(struct tg3
*, int, int);
5430 /* Restart hardware after configuration changes, self-test, etc.
5431 * Invoked with tp->lock held.
5433 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
5434 __releases(tp
->lock
)
5435 __acquires(tp
->lock
)
5439 err
= tg3_init_hw(tp
, reset_phy
);
5442 "Failed to re-initialize device, aborting\n");
5443 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
5444 tg3_full_unlock(tp
);
5445 del_timer_sync(&tp
->timer
);
5447 tg3_napi_enable(tp
);
5449 tg3_full_lock(tp
, 0);
5454 #ifdef CONFIG_NET_POLL_CONTROLLER
5455 static void tg3_poll_controller(struct net_device
*dev
)
5458 struct tg3
*tp
= netdev_priv(dev
);
5460 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5461 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
5465 static void tg3_reset_task(struct work_struct
*work
)
5467 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
5469 unsigned int restart_timer
;
5471 tg3_full_lock(tp
, 0);
5473 if (!netif_running(tp
->dev
)) {
5474 tg3_full_unlock(tp
);
5478 tg3_full_unlock(tp
);
5484 tg3_full_lock(tp
, 1);
5486 restart_timer
= tp
->tg3_flags2
& TG3_FLG2_RESTART_TIMER
;
5487 tp
->tg3_flags2
&= ~TG3_FLG2_RESTART_TIMER
;
5489 if (tp
->tg3_flags
& TG3_FLAG_TX_RECOVERY_PENDING
) {
5490 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
5491 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
5492 tp
->tg3_flags
|= TG3_FLAG_MBOX_WRITE_REORDER
;
5493 tp
->tg3_flags
&= ~TG3_FLAG_TX_RECOVERY_PENDING
;
5496 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
5497 err
= tg3_init_hw(tp
, 1);
5501 tg3_netif_start(tp
);
5504 mod_timer(&tp
->timer
, jiffies
+ 1);
5507 tg3_full_unlock(tp
);
5513 static void tg3_dump_short_state(struct tg3
*tp
)
5515 netdev_err(tp
->dev
, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5516 tr32(MAC_TX_STATUS
), tr32(MAC_RX_STATUS
));
5517 netdev_err(tp
->dev
, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5518 tr32(RDMAC_STATUS
), tr32(WDMAC_STATUS
));
5521 static void tg3_tx_timeout(struct net_device
*dev
)
5523 struct tg3
*tp
= netdev_priv(dev
);
5525 if (netif_msg_tx_err(tp
)) {
5526 netdev_err(dev
, "transmit timed out, resetting\n");
5527 tg3_dump_short_state(tp
);
5530 schedule_work(&tp
->reset_task
);
5533 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5534 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
5536 u32 base
= (u32
) mapping
& 0xffffffff;
5538 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
5541 /* Test for DMA addresses > 40-bit */
5542 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
5545 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5546 if (tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
)
5547 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
5554 static void tg3_set_txd(struct tg3_napi
*, int, dma_addr_t
, int, u32
, u32
);
5556 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5557 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
5558 struct sk_buff
*skb
, u32 last_plus_one
,
5559 u32
*start
, u32 base_flags
, u32 mss
)
5561 struct tg3
*tp
= tnapi
->tp
;
5562 struct sk_buff
*new_skb
;
5563 dma_addr_t new_addr
= 0;
5567 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
5568 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
5570 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
5572 new_skb
= skb_copy_expand(skb
,
5573 skb_headroom(skb
) + more_headroom
,
5574 skb_tailroom(skb
), GFP_ATOMIC
);
5580 /* New SKB is guaranteed to be linear. */
5582 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
5584 /* Make sure the mapping succeeded */
5585 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
5587 dev_kfree_skb(new_skb
);
5590 /* Make sure new skb does not cross any 4G boundaries.
5591 * Drop the packet if it does.
5593 } else if ((tp
->tg3_flags3
& TG3_FLG3_4G_DMA_BNDRY_BUG
) &&
5594 tg3_4g_overflow_test(new_addr
, new_skb
->len
)) {
5595 pci_unmap_single(tp
->pdev
, new_addr
, new_skb
->len
,
5598 dev_kfree_skb(new_skb
);
5601 tg3_set_txd(tnapi
, entry
, new_addr
, new_skb
->len
,
5602 base_flags
, 1 | (mss
<< 1));
5603 *start
= NEXT_TX(entry
);
5607 /* Now clean up the sw ring entries. */
5609 while (entry
!= last_plus_one
) {
5613 len
= skb_headlen(skb
);
5615 len
= skb_shinfo(skb
)->frags
[i
-1].size
;
5617 pci_unmap_single(tp
->pdev
,
5618 dma_unmap_addr(&tnapi
->tx_buffers
[entry
],
5620 len
, PCI_DMA_TODEVICE
);
5622 tnapi
->tx_buffers
[entry
].skb
= new_skb
;
5623 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
5626 tnapi
->tx_buffers
[entry
].skb
= NULL
;
5628 entry
= NEXT_TX(entry
);
5637 static void tg3_set_txd(struct tg3_napi
*tnapi
, int entry
,
5638 dma_addr_t mapping
, int len
, u32 flags
,
5641 struct tg3_tx_buffer_desc
*txd
= &tnapi
->tx_ring
[entry
];
5642 int is_end
= (mss_and_is_end
& 0x1);
5643 u32 mss
= (mss_and_is_end
>> 1);
5647 flags
|= TXD_FLAG_END
;
5648 if (flags
& TXD_FLAG_VLAN
) {
5649 vlan_tag
= flags
>> 16;
5652 vlan_tag
|= (mss
<< TXD_MSS_SHIFT
);
5654 txd
->addr_hi
= ((u64
) mapping
>> 32);
5655 txd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
5656 txd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | flags
;
5657 txd
->vlan_tag
= vlan_tag
<< TXD_VLAN_TAG_SHIFT
;
5660 /* hard_start_xmit for devices that don't have any bugs and
5661 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5663 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
,
5664 struct net_device
*dev
)
5666 struct tg3
*tp
= netdev_priv(dev
);
5667 u32 len
, entry
, base_flags
, mss
;
5669 struct tg3_napi
*tnapi
;
5670 struct netdev_queue
*txq
;
5671 unsigned int i
, last
;
5673 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
5674 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
5675 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
)
5678 /* We are running in BH disabled context with netif_tx_lock
5679 * and TX reclaim runs via tp->napi.poll inside of a software
5680 * interrupt. Furthermore, IRQ processing runs lockless so we have
5681 * no IRQ context deadlocks to worry about either. Rejoice!
5683 if (unlikely(tg3_tx_avail(tnapi
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
5684 if (!netif_tx_queue_stopped(txq
)) {
5685 netif_tx_stop_queue(txq
);
5687 /* This is a hard error, log it. */
5689 "BUG! Tx Ring full when queue awake!\n");
5691 return NETDEV_TX_BUSY
;
5694 entry
= tnapi
->tx_prod
;
5696 mss
= skb_shinfo(skb
)->gso_size
;
5698 int tcp_opt_len
, ip_tcp_len
;
5701 if (skb_header_cloned(skb
) &&
5702 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
5707 if (skb_is_gso_v6(skb
)) {
5708 hdrlen
= skb_headlen(skb
) - ETH_HLEN
;
5710 struct iphdr
*iph
= ip_hdr(skb
);
5712 tcp_opt_len
= tcp_optlen(skb
);
5713 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
5716 iph
->tot_len
= htons(mss
+ ip_tcp_len
+ tcp_opt_len
);
5717 hdrlen
= ip_tcp_len
+ tcp_opt_len
;
5720 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO_3
) {
5721 mss
|= (hdrlen
& 0xc) << 12;
5723 base_flags
|= 0x00000010;
5724 base_flags
|= (hdrlen
& 0x3e0) << 5;
5728 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
5729 TXD_FLAG_CPU_POST_DMA
);
5731 tcp_hdr(skb
)->check
= 0;
5733 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
5734 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
5737 if (vlan_tx_tag_present(skb
))
5738 base_flags
|= (TXD_FLAG_VLAN
|
5739 (vlan_tx_tag_get(skb
) << 16));
5741 len
= skb_headlen(skb
);
5743 /* Queue skb data, a.k.a. the main skb fragment. */
5744 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
5745 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
5750 tnapi
->tx_buffers
[entry
].skb
= skb
;
5751 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
5753 if ((tp
->tg3_flags3
& TG3_FLG3_USE_JUMBO_BDFLAG
) &&
5754 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
5755 base_flags
|= TXD_FLAG_JMB_PKT
;
5757 tg3_set_txd(tnapi
, entry
, mapping
, len
, base_flags
,
5758 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
5760 entry
= NEXT_TX(entry
);
5762 /* Now loop through additional data fragments, and queue them. */
5763 if (skb_shinfo(skb
)->nr_frags
> 0) {
5764 last
= skb_shinfo(skb
)->nr_frags
- 1;
5765 for (i
= 0; i
<= last
; i
++) {
5766 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
5769 mapping
= pci_map_page(tp
->pdev
,
5772 len
, PCI_DMA_TODEVICE
);
5773 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
5776 tnapi
->tx_buffers
[entry
].skb
= NULL
;
5777 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
5780 tg3_set_txd(tnapi
, entry
, mapping
, len
,
5781 base_flags
, (i
== last
) | (mss
<< 1));
5783 entry
= NEXT_TX(entry
);
5787 /* Packets are ready, update Tx producer idx local and on card. */
5788 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
5790 tnapi
->tx_prod
= entry
;
5791 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
5792 netif_tx_stop_queue(txq
);
5794 /* netif_tx_stop_queue() must be done before checking
5795 * checking tx index in tg3_tx_avail() below, because in
5796 * tg3_tx(), we update tx index before checking for
5797 * netif_tx_queue_stopped().
5800 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
5801 netif_tx_wake_queue(txq
);
5807 return NETDEV_TX_OK
;
5811 entry
= tnapi
->tx_prod
;
5812 tnapi
->tx_buffers
[entry
].skb
= NULL
;
5813 pci_unmap_single(tp
->pdev
,
5814 dma_unmap_addr(&tnapi
->tx_buffers
[entry
], mapping
),
5817 for (i
= 0; i
<= last
; i
++) {
5818 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
5819 entry
= NEXT_TX(entry
);
5821 pci_unmap_page(tp
->pdev
,
5822 dma_unmap_addr(&tnapi
->tx_buffers
[entry
],
5824 frag
->size
, PCI_DMA_TODEVICE
);
5828 return NETDEV_TX_OK
;
5831 static netdev_tx_t
tg3_start_xmit_dma_bug(struct sk_buff
*,
5832 struct net_device
*);
5834 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5835 * TSO header is greater than 80 bytes.
5837 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
5839 struct sk_buff
*segs
, *nskb
;
5840 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
5842 /* Estimate the number of fragments in the worst case */
5843 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
5844 netif_stop_queue(tp
->dev
);
5846 /* netif_tx_stop_queue() must be done before checking
5847 * checking tx index in tg3_tx_avail() below, because in
5848 * tg3_tx(), we update tx index before checking for
5849 * netif_tx_queue_stopped().
5852 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
5853 return NETDEV_TX_BUSY
;
5855 netif_wake_queue(tp
->dev
);
5858 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
5860 goto tg3_tso_bug_end
;
5866 tg3_start_xmit_dma_bug(nskb
, tp
->dev
);
5872 return NETDEV_TX_OK
;
5875 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5876 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5878 static netdev_tx_t
tg3_start_xmit_dma_bug(struct sk_buff
*skb
,
5879 struct net_device
*dev
)
5881 struct tg3
*tp
= netdev_priv(dev
);
5882 u32 len
, entry
, base_flags
, mss
;
5883 int would_hit_hwbug
;
5885 struct tg3_napi
*tnapi
;
5886 struct netdev_queue
*txq
;
5887 unsigned int i
, last
;
5889 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
5890 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
5891 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
)
5894 /* We are running in BH disabled context with netif_tx_lock
5895 * and TX reclaim runs via tp->napi.poll inside of a software
5896 * interrupt. Furthermore, IRQ processing runs lockless so we have
5897 * no IRQ context deadlocks to worry about either. Rejoice!
5899 if (unlikely(tg3_tx_avail(tnapi
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
5900 if (!netif_tx_queue_stopped(txq
)) {
5901 netif_tx_stop_queue(txq
);
5903 /* This is a hard error, log it. */
5905 "BUG! Tx Ring full when queue awake!\n");
5907 return NETDEV_TX_BUSY
;
5910 entry
= tnapi
->tx_prod
;
5912 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
5913 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
5915 mss
= skb_shinfo(skb
)->gso_size
;
5918 u32 tcp_opt_len
, hdr_len
;
5920 if (skb_header_cloned(skb
) &&
5921 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
5927 tcp_opt_len
= tcp_optlen(skb
);
5929 if (skb_is_gso_v6(skb
)) {
5930 hdr_len
= skb_headlen(skb
) - ETH_HLEN
;
5934 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
5935 hdr_len
= ip_tcp_len
+ tcp_opt_len
;
5938 iph
->tot_len
= htons(mss
+ hdr_len
);
5941 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
5942 (tp
->tg3_flags2
& TG3_FLG2_TSO_BUG
))
5943 return tg3_tso_bug(tp
, skb
);
5945 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
5946 TXD_FLAG_CPU_POST_DMA
);
5948 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
5949 tcp_hdr(skb
)->check
= 0;
5950 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
5952 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
5957 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO_3
) {
5958 mss
|= (hdr_len
& 0xc) << 12;
5960 base_flags
|= 0x00000010;
5961 base_flags
|= (hdr_len
& 0x3e0) << 5;
5962 } else if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO_2
)
5963 mss
|= hdr_len
<< 9;
5964 else if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO_1
) ||
5965 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
5966 if (tcp_opt_len
|| iph
->ihl
> 5) {
5969 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
5970 mss
|= (tsflags
<< 11);
5973 if (tcp_opt_len
|| iph
->ihl
> 5) {
5976 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
5977 base_flags
|= tsflags
<< 12;
5982 if (vlan_tx_tag_present(skb
))
5983 base_flags
|= (TXD_FLAG_VLAN
|
5984 (vlan_tx_tag_get(skb
) << 16));
5986 if ((tp
->tg3_flags3
& TG3_FLG3_USE_JUMBO_BDFLAG
) &&
5987 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
5988 base_flags
|= TXD_FLAG_JMB_PKT
;
5990 len
= skb_headlen(skb
);
5992 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
5993 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
5998 tnapi
->tx_buffers
[entry
].skb
= skb
;
5999 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
6001 would_hit_hwbug
= 0;
6003 if ((tp
->tg3_flags3
& TG3_FLG3_SHORT_DMA_BUG
) && len
<= 8)
6004 would_hit_hwbug
= 1;
6006 if ((tp
->tg3_flags3
& TG3_FLG3_4G_DMA_BNDRY_BUG
) &&
6007 tg3_4g_overflow_test(mapping
, len
))
6008 would_hit_hwbug
= 1;
6010 if ((tp
->tg3_flags3
& TG3_FLG3_40BIT_DMA_LIMIT_BUG
) &&
6011 tg3_40bit_overflow_test(tp
, mapping
, len
))
6012 would_hit_hwbug
= 1;
6014 if (tp
->tg3_flags3
& TG3_FLG3_5701_DMA_BUG
)
6015 would_hit_hwbug
= 1;
6017 tg3_set_txd(tnapi
, entry
, mapping
, len
, base_flags
,
6018 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
6020 entry
= NEXT_TX(entry
);
6022 /* Now loop through additional data fragments, and queue them. */
6023 if (skb_shinfo(skb
)->nr_frags
> 0) {
6024 last
= skb_shinfo(skb
)->nr_frags
- 1;
6025 for (i
= 0; i
<= last
; i
++) {
6026 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6029 mapping
= pci_map_page(tp
->pdev
,
6032 len
, PCI_DMA_TODEVICE
);
6034 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6035 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
6037 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
6040 if ((tp
->tg3_flags3
& TG3_FLG3_SHORT_DMA_BUG
) &&
6042 would_hit_hwbug
= 1;
6044 if ((tp
->tg3_flags3
& TG3_FLG3_4G_DMA_BNDRY_BUG
) &&
6045 tg3_4g_overflow_test(mapping
, len
))
6046 would_hit_hwbug
= 1;
6048 if ((tp
->tg3_flags3
& TG3_FLG3_40BIT_DMA_LIMIT_BUG
) &&
6049 tg3_40bit_overflow_test(tp
, mapping
, len
))
6050 would_hit_hwbug
= 1;
6052 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
6053 tg3_set_txd(tnapi
, entry
, mapping
, len
,
6054 base_flags
, (i
== last
)|(mss
<< 1));
6056 tg3_set_txd(tnapi
, entry
, mapping
, len
,
6057 base_flags
, (i
== last
));
6059 entry
= NEXT_TX(entry
);
6063 if (would_hit_hwbug
) {
6064 u32 last_plus_one
= entry
;
6067 start
= entry
- 1 - skb_shinfo(skb
)->nr_frags
;
6068 start
&= (TG3_TX_RING_SIZE
- 1);
6070 /* If the workaround fails due to memory/mapping
6071 * failure, silently drop this packet.
6073 if (tigon3_dma_hwbug_workaround(tnapi
, skb
, last_plus_one
,
6074 &start
, base_flags
, mss
))
6080 /* Packets are ready, update Tx producer idx local and on card. */
6081 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
6083 tnapi
->tx_prod
= entry
;
6084 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
6085 netif_tx_stop_queue(txq
);
6087 /* netif_tx_stop_queue() must be done before checking
6088 * checking tx index in tg3_tx_avail() below, because in
6089 * tg3_tx(), we update tx index before checking for
6090 * netif_tx_queue_stopped().
6093 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
6094 netif_tx_wake_queue(txq
);
6100 return NETDEV_TX_OK
;
6104 entry
= tnapi
->tx_prod
;
6105 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6106 pci_unmap_single(tp
->pdev
,
6107 dma_unmap_addr(&tnapi
->tx_buffers
[entry
], mapping
),
6110 for (i
= 0; i
<= last
; i
++) {
6111 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6112 entry
= NEXT_TX(entry
);
6114 pci_unmap_page(tp
->pdev
,
6115 dma_unmap_addr(&tnapi
->tx_buffers
[entry
],
6117 frag
->size
, PCI_DMA_TODEVICE
);
6121 return NETDEV_TX_OK
;
6124 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
6129 if (new_mtu
> ETH_DATA_LEN
) {
6130 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
6131 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
6132 ethtool_op_set_tso(dev
, 0);
6134 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
6137 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
6138 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
6139 tp
->tg3_flags
&= ~TG3_FLAG_JUMBO_RING_ENABLE
;
6143 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
6145 struct tg3
*tp
= netdev_priv(dev
);
6148 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
6151 if (!netif_running(dev
)) {
6152 /* We'll just catch it later when the
6155 tg3_set_mtu(dev
, tp
, new_mtu
);
6163 tg3_full_lock(tp
, 1);
6165 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6167 tg3_set_mtu(dev
, tp
, new_mtu
);
6169 err
= tg3_restart_hw(tp
, 0);
6172 tg3_netif_start(tp
);
6174 tg3_full_unlock(tp
);
6182 static void tg3_rx_prodring_free(struct tg3
*tp
,
6183 struct tg3_rx_prodring_set
*tpr
)
6187 if (tpr
!= &tp
->napi
[0].prodring
) {
6188 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
6189 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
6190 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6193 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_CAPABLE
) {
6194 for (i
= tpr
->rx_jmb_cons_idx
;
6195 i
!= tpr
->rx_jmb_prod_idx
;
6196 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
6197 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6205 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
6206 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6209 if ((tp
->tg3_flags
& TG3_FLAG_JUMBO_CAPABLE
) &&
6210 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
6211 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
6212 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6217 /* Initialize rx rings for packet processing.
6219 * The chip has been shut down and the driver detached from
6220 * the networking, so no interrupts or new tx packets will
6221 * end up in the driver. tp->{tx,}lock are held and thus
6224 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
6225 struct tg3_rx_prodring_set
*tpr
)
6227 u32 i
, rx_pkt_dma_sz
;
6229 tpr
->rx_std_cons_idx
= 0;
6230 tpr
->rx_std_prod_idx
= 0;
6231 tpr
->rx_jmb_cons_idx
= 0;
6232 tpr
->rx_jmb_prod_idx
= 0;
6234 if (tpr
!= &tp
->napi
[0].prodring
) {
6235 memset(&tpr
->rx_std_buffers
[0], 0,
6236 TG3_RX_STD_BUFF_RING_SIZE(tp
));
6237 if (tpr
->rx_jmb_buffers
)
6238 memset(&tpr
->rx_jmb_buffers
[0], 0,
6239 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
6243 /* Zero out all descriptors. */
6244 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
6246 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
6247 if ((tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) &&
6248 tp
->dev
->mtu
> ETH_DATA_LEN
)
6249 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
6250 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
6252 /* Initialize invariants of the rings, we only set this
6253 * stuff once. This works because the card does not
6254 * write into the rx buffer posting rings.
6256 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
6257 struct tg3_rx_buffer_desc
*rxd
;
6259 rxd
= &tpr
->rx_std
[i
];
6260 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
6261 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
6262 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
6263 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6266 /* Now allocate fresh SKBs for each rx ring. */
6267 for (i
= 0; i
< tp
->rx_pending
; i
++) {
6268 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
) < 0) {
6269 netdev_warn(tp
->dev
,
6270 "Using a smaller RX standard ring. Only "
6271 "%d out of %d buffers were allocated "
6272 "successfully\n", i
, tp
->rx_pending
);
6280 if (!(tp
->tg3_flags
& TG3_FLAG_JUMBO_CAPABLE
) ||
6281 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
6284 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
6286 if (!(tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
))
6289 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
6290 struct tg3_rx_buffer_desc
*rxd
;
6292 rxd
= &tpr
->rx_jmb
[i
].std
;
6293 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
6294 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
6296 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
6297 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6300 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
6301 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
) < 0) {
6302 netdev_warn(tp
->dev
,
6303 "Using a smaller RX jumbo ring. Only %d "
6304 "out of %d buffers were allocated "
6305 "successfully\n", i
, tp
->rx_jumbo_pending
);
6308 tp
->rx_jumbo_pending
= i
;
6317 tg3_rx_prodring_free(tp
, tpr
);
6321 static void tg3_rx_prodring_fini(struct tg3
*tp
,
6322 struct tg3_rx_prodring_set
*tpr
)
6324 kfree(tpr
->rx_std_buffers
);
6325 tpr
->rx_std_buffers
= NULL
;
6326 kfree(tpr
->rx_jmb_buffers
);
6327 tpr
->rx_jmb_buffers
= NULL
;
6329 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
6330 tpr
->rx_std
, tpr
->rx_std_mapping
);
6334 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
6335 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
6340 static int tg3_rx_prodring_init(struct tg3
*tp
,
6341 struct tg3_rx_prodring_set
*tpr
)
6343 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
6345 if (!tpr
->rx_std_buffers
)
6348 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
6349 TG3_RX_STD_RING_BYTES(tp
),
6350 &tpr
->rx_std_mapping
,
6355 if ((tp
->tg3_flags
& TG3_FLAG_JUMBO_CAPABLE
) &&
6356 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
6357 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
6359 if (!tpr
->rx_jmb_buffers
)
6362 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6363 TG3_RX_JMB_RING_BYTES(tp
),
6364 &tpr
->rx_jmb_mapping
,
6373 tg3_rx_prodring_fini(tp
, tpr
);
6377 /* Free up pending packets in all rx/tx rings.
6379 * The chip has been shut down and the driver detached from
6380 * the networking, so no interrupts or new tx packets will
6381 * end up in the driver. tp->{tx,}lock is not held and we are not
6382 * in an interrupt context and thus may sleep.
6384 static void tg3_free_rings(struct tg3
*tp
)
6388 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
6389 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
6391 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
6393 if (!tnapi
->tx_buffers
)
6396 for (i
= 0; i
< TG3_TX_RING_SIZE
; ) {
6397 struct ring_info
*txp
;
6398 struct sk_buff
*skb
;
6401 txp
= &tnapi
->tx_buffers
[i
];
6409 pci_unmap_single(tp
->pdev
,
6410 dma_unmap_addr(txp
, mapping
),
6417 for (k
= 0; k
< skb_shinfo(skb
)->nr_frags
; k
++) {
6418 txp
= &tnapi
->tx_buffers
[i
& (TG3_TX_RING_SIZE
- 1)];
6419 pci_unmap_page(tp
->pdev
,
6420 dma_unmap_addr(txp
, mapping
),
6421 skb_shinfo(skb
)->frags
[k
].size
,
6426 dev_kfree_skb_any(skb
);
6431 /* Initialize tx/rx rings for packet processing.
6433 * The chip has been shut down and the driver detached from
6434 * the networking, so no interrupts or new tx packets will
6435 * end up in the driver. tp->{tx,}lock are held and thus
6438 static int tg3_init_rings(struct tg3
*tp
)
6442 /* Free up all the SKBs. */
6445 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6446 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6448 tnapi
->last_tag
= 0;
6449 tnapi
->last_irq_tag
= 0;
6450 tnapi
->hw_status
->status
= 0;
6451 tnapi
->hw_status
->status_tag
= 0;
6452 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6457 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
6459 tnapi
->rx_rcb_ptr
= 0;
6461 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6463 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
6473 * Must not be invoked with interrupt sources disabled and
6474 * the hardware shutdown down.
6476 static void tg3_free_consistent(struct tg3
*tp
)
6480 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6481 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6483 if (tnapi
->tx_ring
) {
6484 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
6485 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
6486 tnapi
->tx_ring
= NULL
;
6489 kfree(tnapi
->tx_buffers
);
6490 tnapi
->tx_buffers
= NULL
;
6492 if (tnapi
->rx_rcb
) {
6493 dma_free_coherent(&tp
->pdev
->dev
,
6494 TG3_RX_RCB_RING_BYTES(tp
),
6496 tnapi
->rx_rcb_mapping
);
6497 tnapi
->rx_rcb
= NULL
;
6500 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
6502 if (tnapi
->hw_status
) {
6503 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
6505 tnapi
->status_mapping
);
6506 tnapi
->hw_status
= NULL
;
6511 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
6512 tp
->hw_stats
, tp
->stats_mapping
);
6513 tp
->hw_stats
= NULL
;
6518 * Must not be invoked with interrupt sources disabled and
6519 * the hardware shutdown down. Can sleep.
6521 static int tg3_alloc_consistent(struct tg3
*tp
)
6525 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
6526 sizeof(struct tg3_hw_stats
),
6532 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6534 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6535 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6536 struct tg3_hw_status
*sblk
;
6538 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
6540 &tnapi
->status_mapping
,
6542 if (!tnapi
->hw_status
)
6545 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6546 sblk
= tnapi
->hw_status
;
6548 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
6551 /* If multivector TSS is enabled, vector 0 does not handle
6552 * tx interrupts. Don't allocate any resources for it.
6554 if ((!i
&& !(tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
)) ||
6555 (i
&& (tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
))) {
6556 tnapi
->tx_buffers
= kzalloc(sizeof(struct ring_info
) *
6559 if (!tnapi
->tx_buffers
)
6562 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
6564 &tnapi
->tx_desc_mapping
,
6566 if (!tnapi
->tx_ring
)
6571 * When RSS is enabled, the status block format changes
6572 * slightly. The "rx_jumbo_consumer", "reserved",
6573 * and "rx_mini_consumer" members get mapped to the
6574 * other three rx return ring producer indexes.
6578 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
6581 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
6584 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
6587 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
6592 * If multivector RSS is enabled, vector 0 does not handle
6593 * rx or tx interrupts. Don't allocate any resources for it.
6595 if (!i
&& (tp
->tg3_flags3
& TG3_FLG3_ENABLE_RSS
))
6598 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6599 TG3_RX_RCB_RING_BYTES(tp
),
6600 &tnapi
->rx_rcb_mapping
,
6605 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6611 tg3_free_consistent(tp
);
6615 #define MAX_WAIT_CNT 1000
6617 /* To stop a block, clear the enable bit and poll till it
6618 * clears. tp->lock is held.
6620 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
6625 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
6632 /* We can't enable/disable these bits of the
6633 * 5705/5750, just say success.
6646 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6649 if ((val
& enable_bit
) == 0)
6653 if (i
== MAX_WAIT_CNT
&& !silent
) {
6654 dev_err(&tp
->pdev
->dev
,
6655 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6663 /* tp->lock is held. */
6664 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
6668 tg3_disable_ints(tp
);
6670 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
6671 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6674 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
6675 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
6676 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
6677 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
6678 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
6679 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
6681 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
6682 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
6683 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
6684 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
6685 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
6686 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
6687 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
6689 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
6690 tw32_f(MAC_MODE
, tp
->mac_mode
);
6693 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
6694 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
6696 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6698 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
6701 if (i
>= MAX_WAIT_CNT
) {
6702 dev_err(&tp
->pdev
->dev
,
6703 "%s timed out, TX_MODE_ENABLE will not clear "
6704 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
6708 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
6709 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
6710 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
6712 tw32(FTQ_RESET
, 0xffffffff);
6713 tw32(FTQ_RESET
, 0x00000000);
6715 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
6716 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
6718 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6719 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6720 if (tnapi
->hw_status
)
6721 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6724 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6729 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
6734 /* NCSI does not support APE events */
6735 if (tp
->tg3_flags3
& TG3_FLG3_APE_HAS_NCSI
)
6738 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
6739 if (apedata
!= APE_SEG_SIG_MAGIC
)
6742 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
6743 if (!(apedata
& APE_FW_STATUS_READY
))
6746 /* Wait for up to 1 millisecond for APE to service previous event. */
6747 for (i
= 0; i
< 10; i
++) {
6748 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
6751 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
6753 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6754 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
6755 event
| APE_EVENT_STATUS_EVENT_PENDING
);
6757 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
6759 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6765 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6766 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
6769 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
6774 if (!(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
))
6778 case RESET_KIND_INIT
:
6779 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
6780 APE_HOST_SEG_SIG_MAGIC
);
6781 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
6782 APE_HOST_SEG_LEN_MAGIC
);
6783 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
6784 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
6785 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
6786 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
6787 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
6788 APE_HOST_BEHAV_NO_PHYLOCK
);
6789 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
6790 TG3_APE_HOST_DRVR_STATE_START
);
6792 event
= APE_EVENT_STATUS_STATE_START
;
6794 case RESET_KIND_SHUTDOWN
:
6795 /* With the interface we are currently using,
6796 * APE does not track driver state. Wiping
6797 * out the HOST SEGMENT SIGNATURE forces
6798 * the APE to assume OS absent status.
6800 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
6802 if (device_may_wakeup(&tp
->pdev
->dev
) &&
6803 (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)) {
6804 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
6805 TG3_APE_HOST_WOL_SPEED_AUTO
);
6806 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
6808 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
6810 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
6812 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
6814 case RESET_KIND_SUSPEND
:
6815 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
6821 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
6823 tg3_ape_send_event(tp
, event
);
6826 /* tp->lock is held. */
6827 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
6829 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
6830 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
6832 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
6834 case RESET_KIND_INIT
:
6835 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6839 case RESET_KIND_SHUTDOWN
:
6840 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6844 case RESET_KIND_SUSPEND
:
6845 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6854 if (kind
== RESET_KIND_INIT
||
6855 kind
== RESET_KIND_SUSPEND
)
6856 tg3_ape_driver_state_change(tp
, kind
);
6859 /* tp->lock is held. */
6860 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
6862 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
6864 case RESET_KIND_INIT
:
6865 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6866 DRV_STATE_START_DONE
);
6869 case RESET_KIND_SHUTDOWN
:
6870 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6871 DRV_STATE_UNLOAD_DONE
);
6879 if (kind
== RESET_KIND_SHUTDOWN
)
6880 tg3_ape_driver_state_change(tp
, kind
);
6883 /* tp->lock is held. */
6884 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
6886 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
6888 case RESET_KIND_INIT
:
6889 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6893 case RESET_KIND_SHUTDOWN
:
6894 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6898 case RESET_KIND_SUSPEND
:
6899 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6909 static int tg3_poll_fw(struct tg3
*tp
)
6914 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
6915 /* Wait up to 20ms for init done. */
6916 for (i
= 0; i
< 200; i
++) {
6917 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
6924 /* Wait for firmware initialization to complete. */
6925 for (i
= 0; i
< 100000; i
++) {
6926 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
6927 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
6932 /* Chip might not be fitted with firmware. Some Sun onboard
6933 * parts are configured like that. So don't signal the timeout
6934 * of the above loop as an error, but do report the lack of
6935 * running firmware once.
6938 !(tp
->tg3_flags2
& TG3_FLG2_NO_FWARE_REPORTED
)) {
6939 tp
->tg3_flags2
|= TG3_FLG2_NO_FWARE_REPORTED
;
6941 netdev_info(tp
->dev
, "No firmware running\n");
6944 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
6945 /* The 57765 A0 needs a little more
6946 * time to do some important work.
6954 /* Save PCI command register before chip reset */
6955 static void tg3_save_pci_state(struct tg3
*tp
)
6957 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
6960 /* Restore PCI state after chip reset */
6961 static void tg3_restore_pci_state(struct tg3
*tp
)
6965 /* Re-enable indirect register accesses. */
6966 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
6967 tp
->misc_host_ctrl
);
6969 /* Set MAX PCI retry to zero. */
6970 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
6971 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
6972 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
))
6973 val
|= PCISTATE_RETRY_SAME_DMA
;
6974 /* Allow reads and writes to the APE register and memory space. */
6975 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)
6976 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
6977 PCISTATE_ALLOW_APE_SHMEM_WR
|
6978 PCISTATE_ALLOW_APE_PSPACE_WR
;
6979 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
6981 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
6983 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
) {
6984 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)
6985 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
6987 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
6988 tp
->pci_cacheline_sz
);
6989 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
6994 /* Make sure PCI-X relaxed ordering bit is clear. */
6995 if (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) {
6998 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7000 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7001 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7005 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
7007 /* Chip reset on 5780 will reset MSI enable bit,
7008 * so need to restore it.
7010 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7013 pci_read_config_word(tp
->pdev
,
7014 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7016 pci_write_config_word(tp
->pdev
,
7017 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7018 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7019 val
= tr32(MSGINT_MODE
);
7020 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7025 static void tg3_stop_fw(struct tg3
*);
7027 /* tp->lock is held. */
7028 static int tg3_chip_reset(struct tg3
*tp
)
7031 void (*write_op
)(struct tg3
*, u32
, u32
);
7036 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7038 /* No matching tg3_nvram_unlock() after this because
7039 * chip reset below will undo the nvram lock.
7041 tp
->nvram_lock_cnt
= 0;
7043 /* GRC_MISC_CFG core clock reset will clear the memory
7044 * enable bit in PCI register 4 and the MSI enable bit
7045 * on some chips, so we save relevant registers here.
7047 tg3_save_pci_state(tp
);
7049 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7050 (tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
))
7051 tw32(GRC_FASTBOOT_PC
, 0);
7054 * We must avoid the readl() that normally takes place.
7055 * It locks machines, causes machine checks, and other
7056 * fun things. So, temporarily disable the 5701
7057 * hardware workaround, while we do the reset.
7059 write_op
= tp
->write32
;
7060 if (write_op
== tg3_write_flush_reg32
)
7061 tp
->write32
= tg3_write32
;
7063 /* Prevent the irq handler from reading or writing PCI registers
7064 * during chip reset when the memory enable bit in the PCI command
7065 * register may be cleared. The chip does not generate interrupt
7066 * at this time, but the irq handler may still be called due to irq
7067 * sharing or irqpoll.
7069 tp
->tg3_flags
|= TG3_FLAG_CHIP_RESETTING
;
7070 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7071 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7072 if (tnapi
->hw_status
) {
7073 tnapi
->hw_status
->status
= 0;
7074 tnapi
->hw_status
->status_tag
= 0;
7076 tnapi
->last_tag
= 0;
7077 tnapi
->last_irq_tag
= 0;
7081 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7082 synchronize_irq(tp
->napi
[i
].irq_vec
);
7084 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7085 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7086 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7090 val
= GRC_MISC_CFG_CORECLK_RESET
;
7092 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
7093 /* Force PCIe 1.0a mode */
7094 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7095 !(tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) &&
7096 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7097 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7098 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7100 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7101 tw32(GRC_MISC_CFG
, (1 << 29));
7106 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7107 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
7108 tw32(GRC_VCPU_EXT_CTRL
,
7109 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
7112 /* Manage gphy power for all CPMU absent PCIe devices. */
7113 if ((tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) &&
7114 !(tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
))
7115 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
7117 tw32(GRC_MISC_CFG
, val
);
7119 /* restore 5701 hardware bug workaround write method */
7120 tp
->write32
= write_op
;
7122 /* Unfortunately, we have to delay before the PCI read back.
7123 * Some 575X chips even will not respond to a PCI cfg access
7124 * when the reset command is given to the chip.
7126 * How do these hardware designers expect things to work
7127 * properly if the PCI write is posted for a long period
7128 * of time? It is always necessary to have some method by
7129 * which a register read back can occur to push the write
7130 * out which does the reset.
7132 * For most tg3 variants the trick below was working.
7137 /* Flush PCI posted writes. The normal MMIO registers
7138 * are inaccessible at this time so this is the only
7139 * way to make this reliably (actually, this is no longer
7140 * the case, see above). I tried to use indirect
7141 * register read/write but this upset some 5701 variants.
7143 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
7147 if ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) && tp
->pcie_cap
) {
7150 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
7154 /* Wait for link training to complete. */
7155 for (i
= 0; i
< 5000; i
++)
7158 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
7159 pci_write_config_dword(tp
->pdev
, 0xc4,
7160 cfg_val
| (1 << 15));
7163 /* Clear the "no snoop" and "relaxed ordering" bits. */
7164 pci_read_config_word(tp
->pdev
,
7165 tp
->pcie_cap
+ PCI_EXP_DEVCTL
,
7167 val16
&= ~(PCI_EXP_DEVCTL_RELAX_EN
|
7168 PCI_EXP_DEVCTL_NOSNOOP_EN
);
7170 * Older PCIe devices only support the 128 byte
7171 * MPS setting. Enforce the restriction.
7173 if (!(tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
))
7174 val16
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
7175 pci_write_config_word(tp
->pdev
,
7176 tp
->pcie_cap
+ PCI_EXP_DEVCTL
,
7179 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7181 /* Clear error status */
7182 pci_write_config_word(tp
->pdev
,
7183 tp
->pcie_cap
+ PCI_EXP_DEVSTA
,
7184 PCI_EXP_DEVSTA_CED
|
7185 PCI_EXP_DEVSTA_NFED
|
7186 PCI_EXP_DEVSTA_FED
|
7187 PCI_EXP_DEVSTA_URD
);
7190 tg3_restore_pci_state(tp
);
7192 tp
->tg3_flags
&= ~TG3_FLAG_CHIP_RESETTING
;
7195 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
7196 val
= tr32(MEMARB_MODE
);
7197 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
7199 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
7201 tw32(0x5000, 0x400);
7204 tw32(GRC_MODE
, tp
->grc_mode
);
7206 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
7209 tw32(0xc4, val
| (1 << 15));
7212 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
7213 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7214 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
7215 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
7216 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
7217 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
7220 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)
7221 tp
->mac_mode
= MAC_MODE_APE_TX_EN
|
7222 MAC_MODE_APE_RX_EN
|
7223 MAC_MODE_TDE_ENABLE
;
7225 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
7226 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
7228 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
7229 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7234 tw32_f(MAC_MODE
, val
);
7237 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
7239 err
= tg3_poll_fw(tp
);
7245 if ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
7246 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
7247 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7248 !(tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
)) {
7251 tw32(0x7c00, val
| (1 << 25));
7254 /* Reprobe ASF enable state. */
7255 tp
->tg3_flags
&= ~TG3_FLAG_ENABLE_ASF
;
7256 tp
->tg3_flags2
&= ~TG3_FLG2_ASF_NEW_HANDSHAKE
;
7257 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
7258 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
7261 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
7262 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
7263 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
7264 tp
->last_event_jiffies
= jiffies
;
7265 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
7266 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
7273 /* tp->lock is held. */
7274 static void tg3_stop_fw(struct tg3
*tp
)
7276 if ((tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) &&
7277 !(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)) {
7278 /* Wait for RX cpu to ACK the previous event. */
7279 tg3_wait_for_event_ack(tp
);
7281 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
7283 tg3_generate_fw_event(tp
);
7285 /* Wait for RX cpu to ACK this event. */
7286 tg3_wait_for_event_ack(tp
);
7290 /* tp->lock is held. */
7291 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
7297 tg3_write_sig_pre_reset(tp
, kind
);
7299 tg3_abort_hw(tp
, silent
);
7300 err
= tg3_chip_reset(tp
);
7302 __tg3_set_mac_addr(tp
, 0);
7304 tg3_write_sig_legacy(tp
, kind
);
7305 tg3_write_sig_post_reset(tp
, kind
);
7313 #define RX_CPU_SCRATCH_BASE 0x30000
7314 #define RX_CPU_SCRATCH_SIZE 0x04000
7315 #define TX_CPU_SCRATCH_BASE 0x34000
7316 #define TX_CPU_SCRATCH_SIZE 0x04000
7318 /* tp->lock is held. */
7319 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
7323 BUG_ON(offset
== TX_CPU_BASE
&&
7324 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
));
7326 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7327 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
7329 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
7332 if (offset
== RX_CPU_BASE
) {
7333 for (i
= 0; i
< 10000; i
++) {
7334 tw32(offset
+ CPU_STATE
, 0xffffffff);
7335 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7336 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7340 tw32(offset
+ CPU_STATE
, 0xffffffff);
7341 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7344 for (i
= 0; i
< 10000; i
++) {
7345 tw32(offset
+ CPU_STATE
, 0xffffffff);
7346 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7347 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7353 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
7354 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
7358 /* Clear firmware's nvram arbitration. */
7359 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
7360 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
7365 unsigned int fw_base
;
7366 unsigned int fw_len
;
7367 const __be32
*fw_data
;
7370 /* tp->lock is held. */
7371 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
7372 int cpu_scratch_size
, struct fw_info
*info
)
7374 int err
, lock_err
, i
;
7375 void (*write_op
)(struct tg3
*, u32
, u32
);
7377 if (cpu_base
== TX_CPU_BASE
&&
7378 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
7380 "%s: Trying to load TX cpu firmware which is 5705\n",
7385 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
7386 write_op
= tg3_write_mem
;
7388 write_op
= tg3_write_indirect_reg32
;
7390 /* It is possible that bootcode is still loading at this point.
7391 * Get the nvram lock first before halting the cpu.
7393 lock_err
= tg3_nvram_lock(tp
);
7394 err
= tg3_halt_cpu(tp
, cpu_base
);
7396 tg3_nvram_unlock(tp
);
7400 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
7401 write_op(tp
, cpu_scratch_base
+ i
, 0);
7402 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7403 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
7404 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
7405 write_op(tp
, (cpu_scratch_base
+
7406 (info
->fw_base
& 0xffff) +
7408 be32_to_cpu(info
->fw_data
[i
]));
7416 /* tp->lock is held. */
7417 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
7419 struct fw_info info
;
7420 const __be32
*fw_data
;
7423 fw_data
= (void *)tp
->fw
->data
;
7425 /* Firmware blob starts with version numbers, followed by
7426 start address and length. We are setting complete length.
7427 length = end_address_of_bss - start_address_of_text.
7428 Remainder is the blob to be loaded contiguously
7429 from start address. */
7431 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7432 info
.fw_len
= tp
->fw
->size
- 12;
7433 info
.fw_data
= &fw_data
[3];
7435 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
7436 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
7441 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
7442 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
7447 /* Now startup only the RX cpu. */
7448 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7449 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7451 for (i
= 0; i
< 5; i
++) {
7452 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
7454 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7455 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
7456 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7460 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
7461 "should be %08x\n", __func__
,
7462 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
7465 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7466 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
7471 /* 5705 needs a special version of the TSO firmware. */
7473 /* tp->lock is held. */
7474 static int tg3_load_tso_firmware(struct tg3
*tp
)
7476 struct fw_info info
;
7477 const __be32
*fw_data
;
7478 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
7481 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
7484 fw_data
= (void *)tp
->fw
->data
;
7486 /* Firmware blob starts with version numbers, followed by
7487 start address and length. We are setting complete length.
7488 length = end_address_of_bss - start_address_of_text.
7489 Remainder is the blob to be loaded contiguously
7490 from start address. */
7492 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7493 cpu_scratch_size
= tp
->fw_len
;
7494 info
.fw_len
= tp
->fw
->size
- 12;
7495 info
.fw_data
= &fw_data
[3];
7497 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7498 cpu_base
= RX_CPU_BASE
;
7499 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
7501 cpu_base
= TX_CPU_BASE
;
7502 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
7503 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
7506 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
7507 cpu_scratch_base
, cpu_scratch_size
,
7512 /* Now startup the cpu. */
7513 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7514 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7516 for (i
= 0; i
< 5; i
++) {
7517 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
7519 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7520 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
7521 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7526 "%s fails to set CPU PC, is %08x should be %08x\n",
7527 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
7530 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7531 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
7536 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
7538 struct tg3
*tp
= netdev_priv(dev
);
7539 struct sockaddr
*addr
= p
;
7540 int err
= 0, skip_mac_1
= 0;
7542 if (!is_valid_ether_addr(addr
->sa_data
))
7545 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7547 if (!netif_running(dev
))
7550 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
7551 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
7553 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
7554 addr0_low
= tr32(MAC_ADDR_0_LOW
);
7555 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
7556 addr1_low
= tr32(MAC_ADDR_1_LOW
);
7558 /* Skip MAC addr 1 if ASF is using it. */
7559 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
7560 !(addr1_high
== 0 && addr1_low
== 0))
7563 spin_lock_bh(&tp
->lock
);
7564 __tg3_set_mac_addr(tp
, skip_mac_1
);
7565 spin_unlock_bh(&tp
->lock
);
7570 /* tp->lock is held. */
7571 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
7572 dma_addr_t mapping
, u32 maxlen_flags
,
7576 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7577 ((u64
) mapping
>> 32));
7579 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
7580 ((u64
) mapping
& 0xffffffff));
7582 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
7585 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
7587 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
7591 static void __tg3_set_rx_mode(struct net_device
*);
7592 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
7596 if (!(tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
)) {
7597 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
7598 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
7599 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
7601 tw32(HOSTCC_TXCOL_TICKS
, 0);
7602 tw32(HOSTCC_TXMAX_FRAMES
, 0);
7603 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
7606 if (!(tp
->tg3_flags3
& TG3_FLG3_ENABLE_RSS
)) {
7607 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
7608 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
7609 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
7611 tw32(HOSTCC_RXCOL_TICKS
, 0);
7612 tw32(HOSTCC_RXMAX_FRAMES
, 0);
7613 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
7616 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
7617 u32 val
= ec
->stats_block_coalesce_usecs
;
7619 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
7620 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
7622 if (!netif_carrier_ok(tp
->dev
))
7625 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
7628 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
7631 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
7632 tw32(reg
, ec
->rx_coalesce_usecs
);
7633 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
7634 tw32(reg
, ec
->rx_max_coalesced_frames
);
7635 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7636 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
7638 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
) {
7639 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
7640 tw32(reg
, ec
->tx_coalesce_usecs
);
7641 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
7642 tw32(reg
, ec
->tx_max_coalesced_frames
);
7643 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7644 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
7648 for (; i
< tp
->irq_max
- 1; i
++) {
7649 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7650 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7651 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7653 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
) {
7654 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7655 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7656 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7661 /* tp->lock is held. */
7662 static void tg3_rings_reset(struct tg3
*tp
)
7665 u32 stblk
, txrcb
, rxrcb
, limit
;
7666 struct tg3_napi
*tnapi
= &tp
->napi
[0];
7668 /* Disable all transmit rings but the first. */
7669 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
7670 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
7671 else if (tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
)
7672 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
7673 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7674 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
7676 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7678 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7679 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
7680 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7681 BDINFO_FLAGS_DISABLED
);
7684 /* Disable all receive return rings but the first. */
7685 if (tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
)
7686 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
7687 else if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
7688 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
7689 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
7690 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7691 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
7693 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7695 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7696 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
7697 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7698 BDINFO_FLAGS_DISABLED
);
7700 /* Disable interrupts */
7701 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
7703 /* Zero mailbox registers. */
7704 if (tp
->tg3_flags
& TG3_FLAG_SUPPORT_MSIX
) {
7705 for (i
= 1; i
< tp
->irq_max
; i
++) {
7706 tp
->napi
[i
].tx_prod
= 0;
7707 tp
->napi
[i
].tx_cons
= 0;
7708 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
)
7709 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
7710 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
7711 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
7713 if (!(tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
))
7714 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7716 tp
->napi
[0].tx_prod
= 0;
7717 tp
->napi
[0].tx_cons
= 0;
7718 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7719 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
7722 /* Make sure the NIC-based send BD rings are disabled. */
7723 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
7724 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
7725 for (i
= 0; i
< 16; i
++)
7726 tw32_tx_mbox(mbox
+ i
* 8, 0);
7729 txrcb
= NIC_SRAM_SEND_RCB
;
7730 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
7732 /* Clear status block in ram. */
7733 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7735 /* Set status block DMA address */
7736 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
7737 ((u64
) tnapi
->status_mapping
>> 32));
7738 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
7739 ((u64
) tnapi
->status_mapping
& 0xffffffff));
7741 if (tnapi
->tx_ring
) {
7742 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
7743 (TG3_TX_RING_SIZE
<<
7744 BDINFO_FLAGS_MAXLEN_SHIFT
),
7745 NIC_SRAM_TX_BUFFER_DESC
);
7746 txrcb
+= TG3_BDINFO_SIZE
;
7749 if (tnapi
->rx_rcb
) {
7750 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
7751 (tp
->rx_ret_ring_mask
+ 1) <<
7752 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
7753 rxrcb
+= TG3_BDINFO_SIZE
;
7756 stblk
= HOSTCC_STATBLCK_RING1
;
7758 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
7759 u64 mapping
= (u64
)tnapi
->status_mapping
;
7760 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
7761 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
7763 /* Clear status block in ram. */
7764 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7766 if (tnapi
->tx_ring
) {
7767 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
7768 (TG3_TX_RING_SIZE
<<
7769 BDINFO_FLAGS_MAXLEN_SHIFT
),
7770 NIC_SRAM_TX_BUFFER_DESC
);
7771 txrcb
+= TG3_BDINFO_SIZE
;
7774 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
7775 ((tp
->rx_ret_ring_mask
+ 1) <<
7776 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
7779 rxrcb
+= TG3_BDINFO_SIZE
;
7783 /* tp->lock is held. */
7784 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
7786 u32 val
, rdmac_mode
;
7788 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
7790 tg3_disable_ints(tp
);
7794 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
7796 if (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
)
7797 tg3_abort_hw(tp
, 1);
7799 /* Enable MAC control of LPI */
7800 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
7801 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
7802 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
7803 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
7805 tw32_f(TG3_CPMU_EEE_CTRL
,
7806 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
7808 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
7809 TG3_CPMU_EEEMD_LPI_IN_TX
|
7810 TG3_CPMU_EEEMD_LPI_IN_RX
|
7811 TG3_CPMU_EEEMD_EEE_ENABLE
;
7813 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
7814 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
7816 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)
7817 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
7819 tw32_f(TG3_CPMU_EEE_MODE
, val
);
7821 tw32_f(TG3_CPMU_EEE_DBTMR1
,
7822 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
7823 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
7825 tw32_f(TG3_CPMU_EEE_DBTMR2
,
7826 TG3_CPMU_DBTMR2_APE_TX_2047US
|
7827 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
7833 err
= tg3_chip_reset(tp
);
7837 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
7839 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
7840 val
= tr32(TG3_CPMU_CTRL
);
7841 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
7842 tw32(TG3_CPMU_CTRL
, val
);
7844 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
7845 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
7846 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
7847 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
7849 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
7850 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
7851 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
7852 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
7854 val
= tr32(TG3_CPMU_HST_ACC
);
7855 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
7856 val
|= CPMU_HST_ACC_MACCLK_6_25
;
7857 tw32(TG3_CPMU_HST_ACC
, val
);
7860 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7861 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
7862 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
7863 PCIE_PWR_MGMT_L1_THRESH_4MS
;
7864 tw32(PCIE_PWR_MGMT_THRESH
, val
);
7866 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
7867 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
7869 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
7871 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7872 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7875 if (tp
->tg3_flags3
& TG3_FLG3_L1PLLPD_EN
) {
7876 u32 grc_mode
= tr32(GRC_MODE
);
7878 /* Access the lower 1K of PL PCIE block registers. */
7879 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
7880 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
7882 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
7883 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
7884 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
7886 tw32(GRC_MODE
, grc_mode
);
7889 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
7890 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
7891 u32 grc_mode
= tr32(GRC_MODE
);
7893 /* Access the lower 1K of PL PCIE block registers. */
7894 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
7895 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
7897 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
7898 TG3_PCIE_PL_LO_PHYCTL5
);
7899 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
7900 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
7902 tw32(GRC_MODE
, grc_mode
);
7905 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
7906 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
7907 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
7908 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
7911 /* This works around an issue with Athlon chipsets on
7912 * B3 tigon3 silicon. This bit has no effect on any
7913 * other revision. But do not set this on PCI Express
7914 * chips and don't even touch the clocks if the CPMU is present.
7916 if (!(tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
)) {
7917 if (!(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
7918 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
7919 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
7922 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7923 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
7924 val
= tr32(TG3PCI_PCISTATE
);
7925 val
|= PCISTATE_RETRY_SAME_DMA
;
7926 tw32(TG3PCI_PCISTATE
, val
);
7929 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
) {
7930 /* Allow reads and writes to the
7931 * APE register and memory space.
7933 val
= tr32(TG3PCI_PCISTATE
);
7934 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7935 PCISTATE_ALLOW_APE_SHMEM_WR
|
7936 PCISTATE_ALLOW_APE_PSPACE_WR
;
7937 tw32(TG3PCI_PCISTATE
, val
);
7940 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
7941 /* Enable some hw fixes. */
7942 val
= tr32(TG3PCI_MSI_DATA
);
7943 val
|= (1 << 26) | (1 << 28) | (1 << 29);
7944 tw32(TG3PCI_MSI_DATA
, val
);
7947 /* Descriptor ring init may make accesses to the
7948 * NIC SRAM area to setup the TX descriptors, so we
7949 * can only do this after the hardware has been
7950 * successfully reset.
7952 err
= tg3_init_rings(tp
);
7956 if (tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) {
7957 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
7958 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
7959 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
7960 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
7961 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
7962 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
7963 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
7964 /* This value is determined during the probe time DMA
7965 * engine test, tg3_test_dma.
7967 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
7970 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
7971 GRC_MODE_4X_NIC_SEND_RINGS
|
7972 GRC_MODE_NO_TX_PHDR_CSUM
|
7973 GRC_MODE_NO_RX_PHDR_CSUM
);
7974 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
7976 /* Pseudo-header checksum is done by hardware logic and not
7977 * the offload processers, so make the chip do the pseudo-
7978 * header checksums on receive. For transmit it is more
7979 * convenient to do the pseudo-header checksum in software
7980 * as Linux does that on transmit for us in all cases.
7982 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
7986 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
7988 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7989 val
= tr32(GRC_MISC_CFG
);
7991 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
7992 tw32(GRC_MISC_CFG
, val
);
7994 /* Initialize MBUF/DESC pool. */
7995 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
7997 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
7998 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
7999 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8000 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8002 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8003 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8004 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8005 } else if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
8008 fw_len
= tp
->fw_len
;
8009 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8010 tw32(BUFMGR_MB_POOL_ADDR
,
8011 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8012 tw32(BUFMGR_MB_POOL_SIZE
,
8013 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8016 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8017 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8018 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8019 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8020 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8021 tw32(BUFMGR_MB_HIGH_WATER
,
8022 tp
->bufmgr_config
.mbuf_high_water
);
8024 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8025 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8026 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8027 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8028 tw32(BUFMGR_MB_HIGH_WATER
,
8029 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8031 tw32(BUFMGR_DMA_LOW_WATER
,
8032 tp
->bufmgr_config
.dma_low_water
);
8033 tw32(BUFMGR_DMA_HIGH_WATER
,
8034 tp
->bufmgr_config
.dma_high_water
);
8036 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8037 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8038 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8039 tw32(BUFMGR_MODE
, val
);
8040 for (i
= 0; i
< 2000; i
++) {
8041 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8046 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8050 /* Setup replenish threshold. */
8051 val
= tp
->rx_pending
/ 8;
8054 else if (val
> tp
->rx_std_max_post
)
8055 val
= tp
->rx_std_max_post
;
8056 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
8057 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8058 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8060 if (val
> (TG3_RX_INTERNAL_RING_SZ_5906
/ 2))
8061 val
= TG3_RX_INTERNAL_RING_SZ_5906
/ 2;
8064 tw32(RCVBDI_STD_THRESH
, val
);
8066 /* Initialize TG3_BDINFO's at:
8067 * RCVDBDI_STD_BD: standard eth size rx ring
8068 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8069 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8072 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8073 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8074 * ring attribute flags
8075 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8077 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8078 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8080 * The size of each ring is fixed in the firmware, but the location is
8083 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8084 ((u64
) tpr
->rx_std_mapping
>> 32));
8085 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8086 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8087 if (!(tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
))
8088 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8089 NIC_SRAM_RX_BUFFER_DESC
);
8091 /* Disable the mini ring */
8092 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
8093 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8094 BDINFO_FLAGS_DISABLED
);
8096 /* Program the jumbo buffer descriptor ring control
8097 * blocks on those devices that have them.
8099 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8100 ((tp
->tg3_flags
& TG3_FLAG_JUMBO_CAPABLE
) &&
8101 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))) {
8102 /* Setup replenish threshold. */
8103 tw32(RCVBDI_JUMBO_THRESH
, tp
->rx_jumbo_pending
/ 8);
8105 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
8106 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8107 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8108 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8109 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8110 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8111 BDINFO_FLAGS_MAXLEN_SHIFT
;
8112 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8113 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8114 if (!(tp
->tg3_flags3
& TG3_FLG3_USE_JUMBO_BDFLAG
) ||
8115 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8116 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8117 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8119 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8120 BDINFO_FLAGS_DISABLED
);
8123 if (tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) {
8124 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8125 val
= TG3_RX_STD_MAX_SIZE_5700
;
8127 val
= TG3_RX_STD_MAX_SIZE_5717
;
8128 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8129 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8131 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8133 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8135 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8137 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8138 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8140 tpr
->rx_jmb_prod_idx
= (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) ?
8141 tp
->rx_jumbo_pending
: 0;
8142 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8144 if (tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) {
8145 tw32(STD_REPLENISH_LWM
, 32);
8146 tw32(JMB_REPLENISH_LWM
, 16);
8149 tg3_rings_reset(tp
);
8151 /* Initialize MAC address and backoff seed. */
8152 __tg3_set_mac_addr(tp
, 0);
8154 /* MTU + ethernet header + FCS + optional VLAN tag */
8155 tw32(MAC_RX_MTU_SIZE
,
8156 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8158 /* The slot time is changed by tg3_setup_phy if we
8159 * run at gigabit with half duplex.
8161 tw32(MAC_TX_LENGTHS
,
8162 (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
8163 (6 << TX_LENGTHS_IPG_SHIFT
) |
8164 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
8166 /* Receive rules. */
8167 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
8168 tw32(RCVLPC_CONFIG
, 0x0181);
8170 /* Calculate RDMAC_MODE setting early, we need it to determine
8171 * the RCVLPC_STATE_ENABLE mask.
8173 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
8174 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
8175 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
8176 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
8177 RDMAC_MODE_LNGREAD_ENAB
);
8179 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
8180 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
8182 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8183 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8184 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8185 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
8186 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
8187 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
8189 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8190 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8191 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
&&
8192 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8193 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
8194 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8195 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
)) {
8196 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8200 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)
8201 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8203 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
8204 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
8206 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO_3
) ||
8207 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8208 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8209 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
8211 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
8212 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8213 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8214 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
8215 (tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
)) {
8216 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
8217 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
) {
8218 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
8219 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
8220 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
8221 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
8222 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
8223 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
8225 tw32(TG3_RDMA_RSRVCTRL_REG
,
8226 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
8229 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
) {
8230 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
8231 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
8232 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
8233 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
8236 /* Receive/send statistics. */
8237 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
8238 val
= tr32(RCVLPC_STATS_ENABLE
);
8239 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
8240 tw32(RCVLPC_STATS_ENABLE
, val
);
8241 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
8242 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
8243 val
= tr32(RCVLPC_STATS_ENABLE
);
8244 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
8245 tw32(RCVLPC_STATS_ENABLE
, val
);
8247 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
8249 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
8250 tw32(SNDDATAI_STATSENAB
, 0xffffff);
8251 tw32(SNDDATAI_STATSCTRL
,
8252 (SNDDATAI_SCTRL_ENABLE
|
8253 SNDDATAI_SCTRL_FASTUPD
));
8255 /* Setup host coalescing engine. */
8256 tw32(HOSTCC_MODE
, 0);
8257 for (i
= 0; i
< 2000; i
++) {
8258 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
8263 __tg3_set_coalesce(tp
, &tp
->coal
);
8265 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
8266 /* Status/statistics block address. See tg3_timer,
8267 * the tg3_periodic_fetch_stats call there, and
8268 * tg3_get_stats to see how this works for 5705/5750 chips.
8270 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8271 ((u64
) tp
->stats_mapping
>> 32));
8272 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8273 ((u64
) tp
->stats_mapping
& 0xffffffff));
8274 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
8276 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
8278 /* Clear statistics and status block memory areas */
8279 for (i
= NIC_SRAM_STATS_BLK
;
8280 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
8282 tg3_write_mem(tp
, i
, 0);
8287 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
8289 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
8290 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
8291 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
8292 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
8294 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8295 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
8296 /* reset to prevent losing 1st rx packet intermittently */
8297 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8301 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)
8302 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
8305 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
8306 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
| MAC_MODE_FHDE_ENABLE
;
8307 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) &&
8308 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8309 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
8310 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8311 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
8314 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8315 * If TG3_FLG2_IS_NIC is zero, we should read the
8316 * register to preserve the GPIO settings for LOMs. The GPIOs,
8317 * whether used as inputs or outputs, are set by boot code after
8320 if (!(tp
->tg3_flags2
& TG3_FLG2_IS_NIC
)) {
8323 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
8324 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
8325 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
8327 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8328 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
8329 GRC_LCLCTRL_GPIO_OUTPUT3
;
8331 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
8332 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
8334 tp
->grc_local_ctrl
&= ~gpio_mask
;
8335 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
8337 /* GPIO1 must be driven high for eeprom write protect */
8338 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
)
8339 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
8340 GRC_LCLCTRL_GPIO_OUTPUT1
);
8342 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8345 if ((tp
->tg3_flags2
& TG3_FLG2_USING_MSIX
) &&
8347 val
= tr32(MSGINT_MODE
);
8348 val
|= MSGINT_MODE_MULTIVEC_EN
| MSGINT_MODE_ENABLE
;
8349 tw32(MSGINT_MODE
, val
);
8352 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
8353 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
8357 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
8358 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
8359 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
8360 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
8361 WDMAC_MODE_LNGREAD_ENAB
);
8363 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8364 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8365 if ((tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) &&
8366 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
8367 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
8369 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8370 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
)) {
8371 val
|= WDMAC_MODE_RX_ACCEL
;
8375 /* Enable host coalescing bug fix */
8376 if (tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
)
8377 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
8379 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
8380 val
|= WDMAC_MODE_BURST_ALL_DATA
;
8382 tw32_f(WDMAC_MODE
, val
);
8385 if (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) {
8388 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8390 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
8391 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
8392 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8393 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
8394 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
8395 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8397 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8401 tw32_f(RDMAC_MODE
, rdmac_mode
);
8404 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
8405 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
8406 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
8408 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8410 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
8412 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
8414 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
8415 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
8416 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
8417 if (tp
->tg3_flags3
& TG3_FLG3_LRG_PROD_RING_CAP
)
8418 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
8419 tw32(RCVDBDI_MODE
, val
);
8420 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
8421 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
8422 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
8423 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
8424 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
)
8425 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
8426 tw32(SNDBDI_MODE
, val
);
8427 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
8429 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
8430 err
= tg3_load_5701_a0_firmware_fix(tp
);
8435 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
8436 err
= tg3_load_tso_firmware(tp
);
8441 tp
->tx_mode
= TX_MODE_ENABLE
;
8442 if ((tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
) ||
8443 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
8444 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
8445 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8448 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_RSS
) {
8449 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8450 u8
*ent
= (u8
*)&val
;
8452 /* Setup the indirection table */
8453 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
8454 int idx
= i
% sizeof(val
);
8456 ent
[idx
] = i
% (tp
->irq_cnt
- 1);
8457 if (idx
== sizeof(val
) - 1) {
8463 /* Setup the "secret" hash key. */
8464 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
8465 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
8466 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
8467 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
8468 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
8469 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
8470 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
8471 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
8472 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
8473 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
8476 tp
->rx_mode
= RX_MODE_ENABLE
;
8477 if (tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
)
8478 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
8480 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_RSS
)
8481 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
8482 RX_MODE_RSS_ITBL_HASH_BITS_7
|
8483 RX_MODE_RSS_IPV6_HASH_EN
|
8484 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
8485 RX_MODE_RSS_IPV4_HASH_EN
|
8486 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
8488 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8491 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
8493 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
8494 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8495 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8498 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8501 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8502 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
8503 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
8504 /* Set drive transmission level to 1.2V */
8505 /* only if the signal pre-emphasis bit is not set */
8506 val
= tr32(MAC_SERDES_CFG
);
8509 tw32(MAC_SERDES_CFG
, val
);
8511 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
8512 tw32(MAC_SERDES_CFG
, 0x616000);
8515 /* Prevent chip from dropping frames when flow control
8518 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8522 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
8524 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
8525 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
8526 /* Use hardware link auto-negotiation */
8527 tp
->tg3_flags2
|= TG3_FLG2_HW_AUTONEG
;
8530 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8531 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
8534 tmp
= tr32(SERDES_RX_CTRL
);
8535 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
8536 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
8537 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
8538 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8541 if (!(tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
)) {
8542 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
8543 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
8544 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
8545 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
8546 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
8549 err
= tg3_setup_phy(tp
, 0);
8553 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8554 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8557 /* Clear CRC stats. */
8558 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
8559 tg3_writephy(tp
, MII_TG3_TEST1
,
8560 tmp
| MII_TG3_TEST1_CRC_EN
);
8561 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
8566 __tg3_set_rx_mode(tp
->dev
);
8568 /* Initialize receive rules. */
8569 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
8570 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8571 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
8572 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8574 if ((tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) &&
8575 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
8579 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)
8583 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
8585 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
8587 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
8589 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
8591 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
8593 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
8595 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
8597 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
8599 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
8601 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
8603 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
8605 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
8607 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8609 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8617 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)
8618 /* Write our heartbeat update interval to APE. */
8619 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
8620 APE_HOST_HEARTBEAT_INT_DISABLE
);
8622 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
8627 /* Called at device open time to get the chip ready for
8628 * packet processing. Invoked with tp->lock held.
8630 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
8632 tg3_switch_clocks(tp
);
8634 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
8636 return tg3_reset_hw(tp
, reset_phy
);
8639 #define TG3_STAT_ADD32(PSTAT, REG) \
8640 do { u32 __val = tr32(REG); \
8641 (PSTAT)->low += __val; \
8642 if ((PSTAT)->low < __val) \
8643 (PSTAT)->high += 1; \
8646 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
8648 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
8650 if (!netif_carrier_ok(tp
->dev
))
8653 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
8654 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
8655 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
8656 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
8657 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
8658 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
8659 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
8660 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
8661 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
8662 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
8663 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
8664 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
8665 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
8667 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
8668 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
8669 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
8670 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
8671 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
8672 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
8673 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
8674 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
8675 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
8676 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
8677 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
8678 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
8679 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
8680 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
8682 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
8683 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
8684 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
8687 static void tg3_timer(unsigned long __opaque
)
8689 struct tg3
*tp
= (struct tg3
*) __opaque
;
8694 spin_lock(&tp
->lock
);
8696 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
8697 /* All of this garbage is because when using non-tagged
8698 * IRQ status the mailbox/status_block protocol the chip
8699 * uses with the cpu is race prone.
8701 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
8702 tw32(GRC_LOCAL_CTRL
,
8703 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
8705 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
8706 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
8709 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
8710 tp
->tg3_flags2
|= TG3_FLG2_RESTART_TIMER
;
8711 spin_unlock(&tp
->lock
);
8712 schedule_work(&tp
->reset_task
);
8717 /* This part only runs once per second. */
8718 if (!--tp
->timer_counter
) {
8719 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
8720 tg3_periodic_fetch_stats(tp
);
8722 if (tp
->setlpicnt
&& !--tp
->setlpicnt
) {
8723 u32 val
= tr32(TG3_CPMU_EEE_MODE
);
8724 tw32(TG3_CPMU_EEE_MODE
,
8725 val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
8728 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
8732 mac_stat
= tr32(MAC_STATUS
);
8735 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
8736 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
8738 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
8742 tg3_setup_phy(tp
, 0);
8743 } else if (tp
->tg3_flags
& TG3_FLAG_POLL_SERDES
) {
8744 u32 mac_stat
= tr32(MAC_STATUS
);
8747 if (netif_carrier_ok(tp
->dev
) &&
8748 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
8751 if (!netif_carrier_ok(tp
->dev
) &&
8752 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
8753 MAC_STATUS_SIGNAL_DET
))) {
8757 if (!tp
->serdes_counter
) {
8760 ~MAC_MODE_PORT_MODE_MASK
));
8762 tw32_f(MAC_MODE
, tp
->mac_mode
);
8765 tg3_setup_phy(tp
, 0);
8767 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8768 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
8769 tg3_serdes_parallel_detect(tp
);
8772 tp
->timer_counter
= tp
->timer_multiplier
;
8775 /* Heartbeat is only sent once every 2 seconds.
8777 * The heartbeat is to tell the ASF firmware that the host
8778 * driver is still alive. In the event that the OS crashes,
8779 * ASF needs to reset the hardware to free up the FIFO space
8780 * that may be filled with rx packets destined for the host.
8781 * If the FIFO is full, ASF will no longer function properly.
8783 * Unintended resets have been reported on real time kernels
8784 * where the timer doesn't run on time. Netpoll will also have
8787 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8788 * to check the ring condition when the heartbeat is expiring
8789 * before doing the reset. This will prevent most unintended
8792 if (!--tp
->asf_counter
) {
8793 if ((tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) &&
8794 !(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)) {
8795 tg3_wait_for_event_ack(tp
);
8797 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
8798 FWCMD_NICDRV_ALIVE3
);
8799 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
8800 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
8801 TG3_FW_UPDATE_TIMEOUT_SEC
);
8803 tg3_generate_fw_event(tp
);
8805 tp
->asf_counter
= tp
->asf_multiplier
;
8808 spin_unlock(&tp
->lock
);
8811 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
8812 add_timer(&tp
->timer
);
8815 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
8818 unsigned long flags
;
8820 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
8822 if (tp
->irq_cnt
== 1)
8823 name
= tp
->dev
->name
;
8825 name
= &tnapi
->irq_lbl
[0];
8826 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
8827 name
[IFNAMSIZ
-1] = 0;
8830 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI_OR_MSIX
) {
8832 if (tp
->tg3_flags2
& TG3_FLG2_1SHOT_MSI
)
8837 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
8838 fn
= tg3_interrupt_tagged
;
8839 flags
= IRQF_SHARED
;
8842 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
8845 static int tg3_test_interrupt(struct tg3
*tp
)
8847 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8848 struct net_device
*dev
= tp
->dev
;
8849 int err
, i
, intr_ok
= 0;
8852 if (!netif_running(dev
))
8855 tg3_disable_ints(tp
);
8857 free_irq(tnapi
->irq_vec
, tnapi
);
8860 * Turn off MSI one shot mode. Otherwise this test has no
8861 * observable way to know whether the interrupt was delivered.
8863 if ((tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) &&
8864 (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
)) {
8865 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
8866 tw32(MSGINT_MODE
, val
);
8869 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
8870 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, tnapi
);
8874 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
8875 tg3_enable_ints(tp
);
8877 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
8880 for (i
= 0; i
< 5; i
++) {
8881 u32 int_mbox
, misc_host_ctrl
;
8883 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
8884 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
8886 if ((int_mbox
!= 0) ||
8887 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
8895 tg3_disable_ints(tp
);
8897 free_irq(tnapi
->irq_vec
, tnapi
);
8899 err
= tg3_request_irq(tp
, 0);
8905 /* Reenable MSI one shot mode. */
8906 if ((tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) &&
8907 (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
)) {
8908 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
8909 tw32(MSGINT_MODE
, val
);
8917 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8918 * successfully restored
8920 static int tg3_test_msi(struct tg3
*tp
)
8925 if (!(tp
->tg3_flags2
& TG3_FLG2_USING_MSI
))
8928 /* Turn off SERR reporting in case MSI terminates with Master
8931 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
8932 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
8933 pci_cmd
& ~PCI_COMMAND_SERR
);
8935 err
= tg3_test_interrupt(tp
);
8937 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
8942 /* other failures */
8946 /* MSI test failed, go back to INTx mode */
8947 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
8948 "to INTx mode. Please report this failure to the PCI "
8949 "maintainer and include system chipset information\n");
8951 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
8953 pci_disable_msi(tp
->pdev
);
8955 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
8956 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
8958 err
= tg3_request_irq(tp
, 0);
8962 /* Need to reset the chip because the MSI cycle may have terminated
8963 * with Master Abort.
8965 tg3_full_lock(tp
, 1);
8967 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8968 err
= tg3_init_hw(tp
, 1);
8970 tg3_full_unlock(tp
);
8973 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
8978 static int tg3_request_firmware(struct tg3
*tp
)
8980 const __be32
*fw_data
;
8982 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
8983 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
8988 fw_data
= (void *)tp
->fw
->data
;
8990 /* Firmware blob starts with version numbers, followed by
8991 * start address and _full_ length including BSS sections
8992 * (which must be longer than the actual data, of course
8995 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
8996 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
8997 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
8998 tp
->fw_len
, tp
->fw_needed
);
8999 release_firmware(tp
->fw
);
9004 /* We no longer need firmware; we have it. */
9005 tp
->fw_needed
= NULL
;
9009 static bool tg3_enable_msix(struct tg3
*tp
)
9011 int i
, rc
, cpus
= num_online_cpus();
9012 struct msix_entry msix_ent
[tp
->irq_max
];
9015 /* Just fallback to the simpler MSI mode. */
9019 * We want as many rx rings enabled as there are cpus.
9020 * The first MSIX vector only deals with link interrupts, etc,
9021 * so we add one to the number of vectors we are requesting.
9023 tp
->irq_cnt
= min_t(unsigned, cpus
+ 1, tp
->irq_max
);
9025 for (i
= 0; i
< tp
->irq_max
; i
++) {
9026 msix_ent
[i
].entry
= i
;
9027 msix_ent
[i
].vector
= 0;
9030 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
9033 } else if (rc
!= 0) {
9034 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
9036 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
9041 for (i
= 0; i
< tp
->irq_max
; i
++)
9042 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
9044 netif_set_real_num_tx_queues(tp
->dev
, 1);
9045 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
9046 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
9047 pci_disable_msix(tp
->pdev
);
9051 if (tp
->irq_cnt
> 1) {
9052 tp
->tg3_flags3
|= TG3_FLG3_ENABLE_RSS
;
9053 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
) {
9054 tp
->tg3_flags3
|= TG3_FLG3_ENABLE_TSS
;
9055 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
9062 static void tg3_ints_init(struct tg3
*tp
)
9064 if ((tp
->tg3_flags
& TG3_FLAG_SUPPORT_MSI_OR_MSIX
) &&
9065 !(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
9066 /* All MSI supporting chips should support tagged
9067 * status. Assert that this is the case.
9069 netdev_warn(tp
->dev
,
9070 "MSI without TAGGED_STATUS? Not using MSI\n");
9074 if ((tp
->tg3_flags
& TG3_FLAG_SUPPORT_MSIX
) && tg3_enable_msix(tp
))
9075 tp
->tg3_flags2
|= TG3_FLG2_USING_MSIX
;
9076 else if ((tp
->tg3_flags
& TG3_FLAG_SUPPORT_MSI
) &&
9077 pci_enable_msi(tp
->pdev
) == 0)
9078 tp
->tg3_flags2
|= TG3_FLG2_USING_MSI
;
9080 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI_OR_MSIX
) {
9081 u32 msi_mode
= tr32(MSGINT_MODE
);
9082 if ((tp
->tg3_flags2
& TG3_FLG2_USING_MSIX
) &&
9084 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
9085 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
9088 if (!(tp
->tg3_flags2
& TG3_FLG2_USING_MSIX
)) {
9090 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9091 netif_set_real_num_tx_queues(tp
->dev
, 1);
9092 netif_set_real_num_rx_queues(tp
->dev
, 1);
9096 static void tg3_ints_fini(struct tg3
*tp
)
9098 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSIX
)
9099 pci_disable_msix(tp
->pdev
);
9100 else if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
)
9101 pci_disable_msi(tp
->pdev
);
9102 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI_OR_MSIX
;
9103 tp
->tg3_flags3
&= ~(TG3_FLG3_ENABLE_RSS
| TG3_FLG3_ENABLE_TSS
);
9106 static int tg3_open(struct net_device
*dev
)
9108 struct tg3
*tp
= netdev_priv(dev
);
9111 if (tp
->fw_needed
) {
9112 err
= tg3_request_firmware(tp
);
9113 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9117 netdev_warn(tp
->dev
, "TSO capability disabled\n");
9118 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
9119 } else if (!(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
9120 netdev_notice(tp
->dev
, "TSO capability restored\n");
9121 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
9125 netif_carrier_off(tp
->dev
);
9127 err
= tg3_power_up(tp
);
9131 tg3_full_lock(tp
, 0);
9133 tg3_disable_ints(tp
);
9134 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
9136 tg3_full_unlock(tp
);
9139 * Setup interrupts first so we know how
9140 * many NAPI resources to allocate
9144 /* The placement of this call is tied
9145 * to the setup and use of Host TX descriptors.
9147 err
= tg3_alloc_consistent(tp
);
9153 tg3_napi_enable(tp
);
9155 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9156 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9157 err
= tg3_request_irq(tp
, i
);
9159 for (i
--; i
>= 0; i
--)
9160 free_irq(tnapi
->irq_vec
, tnapi
);
9168 tg3_full_lock(tp
, 0);
9170 err
= tg3_init_hw(tp
, 1);
9172 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9175 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
9176 tp
->timer_offset
= HZ
;
9178 tp
->timer_offset
= HZ
/ 10;
9180 BUG_ON(tp
->timer_offset
> HZ
);
9181 tp
->timer_counter
= tp
->timer_multiplier
=
9182 (HZ
/ tp
->timer_offset
);
9183 tp
->asf_counter
= tp
->asf_multiplier
=
9184 ((HZ
/ tp
->timer_offset
) * 2);
9186 init_timer(&tp
->timer
);
9187 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9188 tp
->timer
.data
= (unsigned long) tp
;
9189 tp
->timer
.function
= tg3_timer
;
9192 tg3_full_unlock(tp
);
9197 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
9198 err
= tg3_test_msi(tp
);
9201 tg3_full_lock(tp
, 0);
9202 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9204 tg3_full_unlock(tp
);
9209 if (!(tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) &&
9210 (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
)) {
9211 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
9213 tw32(PCIE_TRANSACTION_CFG
,
9214 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
9220 tg3_full_lock(tp
, 0);
9222 add_timer(&tp
->timer
);
9223 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
9224 tg3_enable_ints(tp
);
9226 tg3_full_unlock(tp
);
9228 netif_tx_start_all_queues(dev
);
9233 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9234 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9235 free_irq(tnapi
->irq_vec
, tnapi
);
9239 tg3_napi_disable(tp
);
9241 tg3_free_consistent(tp
);
9248 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*,
9249 struct rtnl_link_stats64
*);
9250 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
9252 static int tg3_close(struct net_device
*dev
)
9255 struct tg3
*tp
= netdev_priv(dev
);
9257 tg3_napi_disable(tp
);
9258 cancel_work_sync(&tp
->reset_task
);
9260 netif_tx_stop_all_queues(dev
);
9262 del_timer_sync(&tp
->timer
);
9266 tg3_full_lock(tp
, 1);
9268 tg3_disable_ints(tp
);
9270 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9272 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
9274 tg3_full_unlock(tp
);
9276 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9277 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9278 free_irq(tnapi
->irq_vec
, tnapi
);
9283 tg3_get_stats64(tp
->dev
, &tp
->net_stats_prev
);
9285 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
9286 sizeof(tp
->estats_prev
));
9290 tg3_free_consistent(tp
);
9294 netif_carrier_off(tp
->dev
);
9299 static inline u64
get_stat64(tg3_stat64_t
*val
)
9301 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
9304 static u64
calc_crc_errors(struct tg3
*tp
)
9306 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9308 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9309 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9310 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
9313 spin_lock_bh(&tp
->lock
);
9314 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
9315 tg3_writephy(tp
, MII_TG3_TEST1
,
9316 val
| MII_TG3_TEST1_CRC_EN
);
9317 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
9320 spin_unlock_bh(&tp
->lock
);
9322 tp
->phy_crc_errors
+= val
;
9324 return tp
->phy_crc_errors
;
9327 return get_stat64(&hw_stats
->rx_fcs_errors
);
9330 #define ESTAT_ADD(member) \
9331 estats->member = old_estats->member + \
9332 get_stat64(&hw_stats->member)
9334 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
9336 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
9337 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
9338 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9343 ESTAT_ADD(rx_octets
);
9344 ESTAT_ADD(rx_fragments
);
9345 ESTAT_ADD(rx_ucast_packets
);
9346 ESTAT_ADD(rx_mcast_packets
);
9347 ESTAT_ADD(rx_bcast_packets
);
9348 ESTAT_ADD(rx_fcs_errors
);
9349 ESTAT_ADD(rx_align_errors
);
9350 ESTAT_ADD(rx_xon_pause_rcvd
);
9351 ESTAT_ADD(rx_xoff_pause_rcvd
);
9352 ESTAT_ADD(rx_mac_ctrl_rcvd
);
9353 ESTAT_ADD(rx_xoff_entered
);
9354 ESTAT_ADD(rx_frame_too_long_errors
);
9355 ESTAT_ADD(rx_jabbers
);
9356 ESTAT_ADD(rx_undersize_packets
);
9357 ESTAT_ADD(rx_in_length_errors
);
9358 ESTAT_ADD(rx_out_length_errors
);
9359 ESTAT_ADD(rx_64_or_less_octet_packets
);
9360 ESTAT_ADD(rx_65_to_127_octet_packets
);
9361 ESTAT_ADD(rx_128_to_255_octet_packets
);
9362 ESTAT_ADD(rx_256_to_511_octet_packets
);
9363 ESTAT_ADD(rx_512_to_1023_octet_packets
);
9364 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
9365 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
9366 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
9367 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
9368 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
9370 ESTAT_ADD(tx_octets
);
9371 ESTAT_ADD(tx_collisions
);
9372 ESTAT_ADD(tx_xon_sent
);
9373 ESTAT_ADD(tx_xoff_sent
);
9374 ESTAT_ADD(tx_flow_control
);
9375 ESTAT_ADD(tx_mac_errors
);
9376 ESTAT_ADD(tx_single_collisions
);
9377 ESTAT_ADD(tx_mult_collisions
);
9378 ESTAT_ADD(tx_deferred
);
9379 ESTAT_ADD(tx_excessive_collisions
);
9380 ESTAT_ADD(tx_late_collisions
);
9381 ESTAT_ADD(tx_collide_2times
);
9382 ESTAT_ADD(tx_collide_3times
);
9383 ESTAT_ADD(tx_collide_4times
);
9384 ESTAT_ADD(tx_collide_5times
);
9385 ESTAT_ADD(tx_collide_6times
);
9386 ESTAT_ADD(tx_collide_7times
);
9387 ESTAT_ADD(tx_collide_8times
);
9388 ESTAT_ADD(tx_collide_9times
);
9389 ESTAT_ADD(tx_collide_10times
);
9390 ESTAT_ADD(tx_collide_11times
);
9391 ESTAT_ADD(tx_collide_12times
);
9392 ESTAT_ADD(tx_collide_13times
);
9393 ESTAT_ADD(tx_collide_14times
);
9394 ESTAT_ADD(tx_collide_15times
);
9395 ESTAT_ADD(tx_ucast_packets
);
9396 ESTAT_ADD(tx_mcast_packets
);
9397 ESTAT_ADD(tx_bcast_packets
);
9398 ESTAT_ADD(tx_carrier_sense_errors
);
9399 ESTAT_ADD(tx_discards
);
9400 ESTAT_ADD(tx_errors
);
9402 ESTAT_ADD(dma_writeq_full
);
9403 ESTAT_ADD(dma_write_prioq_full
);
9404 ESTAT_ADD(rxbds_empty
);
9405 ESTAT_ADD(rx_discards
);
9406 ESTAT_ADD(rx_errors
);
9407 ESTAT_ADD(rx_threshold_hit
);
9409 ESTAT_ADD(dma_readq_full
);
9410 ESTAT_ADD(dma_read_prioq_full
);
9411 ESTAT_ADD(tx_comp_queue_full
);
9413 ESTAT_ADD(ring_set_send_prod_index
);
9414 ESTAT_ADD(ring_status_update
);
9415 ESTAT_ADD(nic_irqs
);
9416 ESTAT_ADD(nic_avoided_irqs
);
9417 ESTAT_ADD(nic_tx_threshold_hit
);
9422 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
9423 struct rtnl_link_stats64
*stats
)
9425 struct tg3
*tp
= netdev_priv(dev
);
9426 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
9427 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9432 stats
->rx_packets
= old_stats
->rx_packets
+
9433 get_stat64(&hw_stats
->rx_ucast_packets
) +
9434 get_stat64(&hw_stats
->rx_mcast_packets
) +
9435 get_stat64(&hw_stats
->rx_bcast_packets
);
9437 stats
->tx_packets
= old_stats
->tx_packets
+
9438 get_stat64(&hw_stats
->tx_ucast_packets
) +
9439 get_stat64(&hw_stats
->tx_mcast_packets
) +
9440 get_stat64(&hw_stats
->tx_bcast_packets
);
9442 stats
->rx_bytes
= old_stats
->rx_bytes
+
9443 get_stat64(&hw_stats
->rx_octets
);
9444 stats
->tx_bytes
= old_stats
->tx_bytes
+
9445 get_stat64(&hw_stats
->tx_octets
);
9447 stats
->rx_errors
= old_stats
->rx_errors
+
9448 get_stat64(&hw_stats
->rx_errors
);
9449 stats
->tx_errors
= old_stats
->tx_errors
+
9450 get_stat64(&hw_stats
->tx_errors
) +
9451 get_stat64(&hw_stats
->tx_mac_errors
) +
9452 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
9453 get_stat64(&hw_stats
->tx_discards
);
9455 stats
->multicast
= old_stats
->multicast
+
9456 get_stat64(&hw_stats
->rx_mcast_packets
);
9457 stats
->collisions
= old_stats
->collisions
+
9458 get_stat64(&hw_stats
->tx_collisions
);
9460 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
9461 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
9462 get_stat64(&hw_stats
->rx_undersize_packets
);
9464 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
9465 get_stat64(&hw_stats
->rxbds_empty
);
9466 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
9467 get_stat64(&hw_stats
->rx_align_errors
);
9468 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
9469 get_stat64(&hw_stats
->tx_discards
);
9470 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
9471 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
9473 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
9474 calc_crc_errors(tp
);
9476 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
9477 get_stat64(&hw_stats
->rx_discards
);
9479 stats
->rx_dropped
= tp
->rx_dropped
;
9484 static inline u32
calc_crc(unsigned char *buf
, int len
)
9492 for (j
= 0; j
< len
; j
++) {
9495 for (k
= 0; k
< 8; k
++) {
9508 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9510 /* accept or reject all multicast frames */
9511 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9512 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9513 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9514 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9517 static void __tg3_set_rx_mode(struct net_device
*dev
)
9519 struct tg3
*tp
= netdev_priv(dev
);
9522 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9523 RX_MODE_KEEP_VLAN_TAG
);
9525 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9526 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9529 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
9530 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9533 if (dev
->flags
& IFF_PROMISC
) {
9534 /* Promiscuous mode. */
9535 rx_mode
|= RX_MODE_PROMISC
;
9536 } else if (dev
->flags
& IFF_ALLMULTI
) {
9537 /* Accept all multicast. */
9538 tg3_set_multi(tp
, 1);
9539 } else if (netdev_mc_empty(dev
)) {
9540 /* Reject all multicast. */
9541 tg3_set_multi(tp
, 0);
9543 /* Accept one or more multicast(s). */
9544 struct netdev_hw_addr
*ha
;
9545 u32 mc_filter
[4] = { 0, };
9550 netdev_for_each_mc_addr(ha
, dev
) {
9551 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9553 regidx
= (bit
& 0x60) >> 5;
9555 mc_filter
[regidx
] |= (1 << bit
);
9558 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9559 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9560 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9561 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9564 if (rx_mode
!= tp
->rx_mode
) {
9565 tp
->rx_mode
= rx_mode
;
9566 tw32_f(MAC_RX_MODE
, rx_mode
);
9571 static void tg3_set_rx_mode(struct net_device
*dev
)
9573 struct tg3
*tp
= netdev_priv(dev
);
9575 if (!netif_running(dev
))
9578 tg3_full_lock(tp
, 0);
9579 __tg3_set_rx_mode(dev
);
9580 tg3_full_unlock(tp
);
9583 #define TG3_REGDUMP_LEN (32 * 1024)
9585 static int tg3_get_regs_len(struct net_device
*dev
)
9587 return TG3_REGDUMP_LEN
;
9590 static void tg3_get_regs(struct net_device
*dev
,
9591 struct ethtool_regs
*regs
, void *_p
)
9594 struct tg3
*tp
= netdev_priv(dev
);
9600 memset(p
, 0, TG3_REGDUMP_LEN
);
9602 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9605 tg3_full_lock(tp
, 0);
9607 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9608 #define GET_REG32_LOOP(base, len) \
9609 do { p = (u32 *)(orig_p + (base)); \
9610 for (i = 0; i < len; i += 4) \
9611 __GET_REG32((base) + i); \
9613 #define GET_REG32_1(reg) \
9614 do { p = (u32 *)(orig_p + (reg)); \
9615 __GET_REG32((reg)); \
9618 GET_REG32_LOOP(TG3PCI_VENDOR
, 0xb0);
9619 GET_REG32_LOOP(MAILBOX_INTERRUPT_0
, 0x200);
9620 GET_REG32_LOOP(MAC_MODE
, 0x4f0);
9621 GET_REG32_LOOP(SNDDATAI_MODE
, 0xe0);
9622 GET_REG32_1(SNDDATAC_MODE
);
9623 GET_REG32_LOOP(SNDBDS_MODE
, 0x80);
9624 GET_REG32_LOOP(SNDBDI_MODE
, 0x48);
9625 GET_REG32_1(SNDBDC_MODE
);
9626 GET_REG32_LOOP(RCVLPC_MODE
, 0x20);
9627 GET_REG32_LOOP(RCVLPC_SELLST_BASE
, 0x15c);
9628 GET_REG32_LOOP(RCVDBDI_MODE
, 0x0c);
9629 GET_REG32_LOOP(RCVDBDI_JUMBO_BD
, 0x3c);
9630 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0
, 0x44);
9631 GET_REG32_1(RCVDCC_MODE
);
9632 GET_REG32_LOOP(RCVBDI_MODE
, 0x20);
9633 GET_REG32_LOOP(RCVCC_MODE
, 0x14);
9634 GET_REG32_LOOP(RCVLSC_MODE
, 0x08);
9635 GET_REG32_1(MBFREE_MODE
);
9636 GET_REG32_LOOP(HOSTCC_MODE
, 0x100);
9637 GET_REG32_LOOP(MEMARB_MODE
, 0x10);
9638 GET_REG32_LOOP(BUFMGR_MODE
, 0x58);
9639 GET_REG32_LOOP(RDMAC_MODE
, 0x08);
9640 GET_REG32_LOOP(WDMAC_MODE
, 0x08);
9641 GET_REG32_1(RX_CPU_MODE
);
9642 GET_REG32_1(RX_CPU_STATE
);
9643 GET_REG32_1(RX_CPU_PGMCTR
);
9644 GET_REG32_1(RX_CPU_HWBKPT
);
9645 GET_REG32_1(TX_CPU_MODE
);
9646 GET_REG32_1(TX_CPU_STATE
);
9647 GET_REG32_1(TX_CPU_PGMCTR
);
9648 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0
, 0x110);
9649 GET_REG32_LOOP(FTQ_RESET
, 0x120);
9650 GET_REG32_LOOP(MSGINT_MODE
, 0x0c);
9651 GET_REG32_1(DMAC_MODE
);
9652 GET_REG32_LOOP(GRC_MODE
, 0x4c);
9653 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
9654 GET_REG32_LOOP(NVRAM_CMD
, 0x24);
9657 #undef GET_REG32_LOOP
9660 tg3_full_unlock(tp
);
9663 static int tg3_get_eeprom_len(struct net_device
*dev
)
9665 struct tg3
*tp
= netdev_priv(dev
);
9667 return tp
->nvram_size
;
9670 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
9672 struct tg3
*tp
= netdev_priv(dev
);
9675 u32 i
, offset
, len
, b_offset
, b_count
;
9678 if (tp
->tg3_flags3
& TG3_FLG3_NO_NVRAM
)
9681 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9684 offset
= eeprom
->offset
;
9688 eeprom
->magic
= TG3_EEPROM_MAGIC
;
9691 /* adjustments to start on required 4 byte boundary */
9692 b_offset
= offset
& 3;
9693 b_count
= 4 - b_offset
;
9694 if (b_count
> len
) {
9695 /* i.e. offset=1 len=2 */
9698 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
9701 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
9704 eeprom
->len
+= b_count
;
9707 /* read bytes upto the last 4 byte boundary */
9708 pd
= &data
[eeprom
->len
];
9709 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
9710 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
9715 memcpy(pd
+ i
, &val
, 4);
9720 /* read last bytes not ending on 4 byte boundary */
9721 pd
= &data
[eeprom
->len
];
9723 b_offset
= offset
+ len
- b_count
;
9724 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
9727 memcpy(pd
, &val
, b_count
);
9728 eeprom
->len
+= b_count
;
9733 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
9735 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
9737 struct tg3
*tp
= netdev_priv(dev
);
9739 u32 offset
, len
, b_offset
, odd_len
;
9743 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9746 if ((tp
->tg3_flags3
& TG3_FLG3_NO_NVRAM
) ||
9747 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
9750 offset
= eeprom
->offset
;
9753 if ((b_offset
= (offset
& 3))) {
9754 /* adjustments to start on required 4 byte boundary */
9755 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
9766 /* adjustments to end on required 4 byte boundary */
9768 len
= (len
+ 3) & ~3;
9769 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
9775 if (b_offset
|| odd_len
) {
9776 buf
= kmalloc(len
, GFP_KERNEL
);
9780 memcpy(buf
, &start
, 4);
9782 memcpy(buf
+len
-4, &end
, 4);
9783 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
9786 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
9794 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
9796 struct tg3
*tp
= netdev_priv(dev
);
9798 if (tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
) {
9799 struct phy_device
*phydev
;
9800 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
9802 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
9803 return phy_ethtool_gset(phydev
, cmd
);
9806 cmd
->supported
= (SUPPORTED_Autoneg
);
9808 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
9809 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
9810 SUPPORTED_1000baseT_Full
);
9812 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
9813 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
9814 SUPPORTED_100baseT_Full
|
9815 SUPPORTED_10baseT_Half
|
9816 SUPPORTED_10baseT_Full
|
9818 cmd
->port
= PORT_TP
;
9820 cmd
->supported
|= SUPPORTED_FIBRE
;
9821 cmd
->port
= PORT_FIBRE
;
9824 cmd
->advertising
= tp
->link_config
.advertising
;
9825 if (netif_running(dev
)) {
9826 cmd
->speed
= tp
->link_config
.active_speed
;
9827 cmd
->duplex
= tp
->link_config
.active_duplex
;
9829 cmd
->speed
= SPEED_INVALID
;
9830 cmd
->duplex
= DUPLEX_INVALID
;
9832 cmd
->phy_address
= tp
->phy_addr
;
9833 cmd
->transceiver
= XCVR_INTERNAL
;
9834 cmd
->autoneg
= tp
->link_config
.autoneg
;
9840 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
9842 struct tg3
*tp
= netdev_priv(dev
);
9844 if (tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
) {
9845 struct phy_device
*phydev
;
9846 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
9848 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
9849 return phy_ethtool_sset(phydev
, cmd
);
9852 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
9853 cmd
->autoneg
!= AUTONEG_DISABLE
)
9856 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
9857 cmd
->duplex
!= DUPLEX_FULL
&&
9858 cmd
->duplex
!= DUPLEX_HALF
)
9861 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
9862 u32 mask
= ADVERTISED_Autoneg
|
9864 ADVERTISED_Asym_Pause
;
9866 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
9867 mask
|= ADVERTISED_1000baseT_Half
|
9868 ADVERTISED_1000baseT_Full
;
9870 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
9871 mask
|= ADVERTISED_100baseT_Half
|
9872 ADVERTISED_100baseT_Full
|
9873 ADVERTISED_10baseT_Half
|
9874 ADVERTISED_10baseT_Full
|
9877 mask
|= ADVERTISED_FIBRE
;
9879 if (cmd
->advertising
& ~mask
)
9882 mask
&= (ADVERTISED_1000baseT_Half
|
9883 ADVERTISED_1000baseT_Full
|
9884 ADVERTISED_100baseT_Half
|
9885 ADVERTISED_100baseT_Full
|
9886 ADVERTISED_10baseT_Half
|
9887 ADVERTISED_10baseT_Full
);
9889 cmd
->advertising
&= mask
;
9891 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
9892 if (cmd
->speed
!= SPEED_1000
)
9895 if (cmd
->duplex
!= DUPLEX_FULL
)
9898 if (cmd
->speed
!= SPEED_100
&&
9899 cmd
->speed
!= SPEED_10
)
9904 tg3_full_lock(tp
, 0);
9906 tp
->link_config
.autoneg
= cmd
->autoneg
;
9907 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
9908 tp
->link_config
.advertising
= (cmd
->advertising
|
9909 ADVERTISED_Autoneg
);
9910 tp
->link_config
.speed
= SPEED_INVALID
;
9911 tp
->link_config
.duplex
= DUPLEX_INVALID
;
9913 tp
->link_config
.advertising
= 0;
9914 tp
->link_config
.speed
= cmd
->speed
;
9915 tp
->link_config
.duplex
= cmd
->duplex
;
9918 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
9919 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
9920 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
9922 if (netif_running(dev
))
9923 tg3_setup_phy(tp
, 1);
9925 tg3_full_unlock(tp
);
9930 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
9932 struct tg3
*tp
= netdev_priv(dev
);
9934 strcpy(info
->driver
, DRV_MODULE_NAME
);
9935 strcpy(info
->version
, DRV_MODULE_VERSION
);
9936 strcpy(info
->fw_version
, tp
->fw_ver
);
9937 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
9940 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
9942 struct tg3
*tp
= netdev_priv(dev
);
9944 if ((tp
->tg3_flags
& TG3_FLAG_WOL_CAP
) &&
9945 device_can_wakeup(&tp
->pdev
->dev
))
9946 wol
->supported
= WAKE_MAGIC
;
9950 if ((tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) &&
9951 device_can_wakeup(&tp
->pdev
->dev
))
9952 wol
->wolopts
= WAKE_MAGIC
;
9953 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
9956 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
9958 struct tg3
*tp
= netdev_priv(dev
);
9959 struct device
*dp
= &tp
->pdev
->dev
;
9961 if (wol
->wolopts
& ~WAKE_MAGIC
)
9963 if ((wol
->wolopts
& WAKE_MAGIC
) &&
9964 !((tp
->tg3_flags
& TG3_FLAG_WOL_CAP
) && device_can_wakeup(dp
)))
9967 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
9969 spin_lock_bh(&tp
->lock
);
9970 if (device_may_wakeup(dp
))
9971 tp
->tg3_flags
|= TG3_FLAG_WOL_ENABLE
;
9973 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
9974 spin_unlock_bh(&tp
->lock
);
9980 static u32
tg3_get_msglevel(struct net_device
*dev
)
9982 struct tg3
*tp
= netdev_priv(dev
);
9983 return tp
->msg_enable
;
9986 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
9988 struct tg3
*tp
= netdev_priv(dev
);
9989 tp
->msg_enable
= value
;
9992 static int tg3_set_tso(struct net_device
*dev
, u32 value
)
9994 struct tg3
*tp
= netdev_priv(dev
);
9996 if (!(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
10001 if ((dev
->features
& NETIF_F_IPV6_CSUM
) &&
10002 ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO_2
) ||
10003 (tp
->tg3_flags2
& TG3_FLG2_HW_TSO_3
))) {
10005 dev
->features
|= NETIF_F_TSO6
;
10006 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO_3
) ||
10007 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
10008 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
10009 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
10010 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
10011 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
10012 dev
->features
|= NETIF_F_TSO_ECN
;
10014 dev
->features
&= ~(NETIF_F_TSO6
| NETIF_F_TSO_ECN
);
10016 return ethtool_op_set_tso(dev
, value
);
10019 static int tg3_nway_reset(struct net_device
*dev
)
10021 struct tg3
*tp
= netdev_priv(dev
);
10024 if (!netif_running(dev
))
10027 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10030 if (tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
) {
10031 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10033 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10037 spin_lock_bh(&tp
->lock
);
10039 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10040 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10041 ((bmcr
& BMCR_ANENABLE
) ||
10042 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10043 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10047 spin_unlock_bh(&tp
->lock
);
10053 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10055 struct tg3
*tp
= netdev_priv(dev
);
10057 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10058 ering
->rx_mini_max_pending
= 0;
10059 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
)
10060 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10062 ering
->rx_jumbo_max_pending
= 0;
10064 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10066 ering
->rx_pending
= tp
->rx_pending
;
10067 ering
->rx_mini_pending
= 0;
10068 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
)
10069 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10071 ering
->rx_jumbo_pending
= 0;
10073 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
10076 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10078 struct tg3
*tp
= netdev_priv(dev
);
10079 int i
, irq_sync
= 0, err
= 0;
10081 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
10082 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
10083 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
10084 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
10085 ((tp
->tg3_flags2
& TG3_FLG2_TSO_BUG
) &&
10086 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
10089 if (netif_running(dev
)) {
10091 tg3_netif_stop(tp
);
10095 tg3_full_lock(tp
, irq_sync
);
10097 tp
->rx_pending
= ering
->rx_pending
;
10099 if ((tp
->tg3_flags2
& TG3_FLG2_MAX_RXPEND_64
) &&
10100 tp
->rx_pending
> 63)
10101 tp
->rx_pending
= 63;
10102 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
10104 for (i
= 0; i
< tp
->irq_max
; i
++)
10105 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
10107 if (netif_running(dev
)) {
10108 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10109 err
= tg3_restart_hw(tp
, 1);
10111 tg3_netif_start(tp
);
10114 tg3_full_unlock(tp
);
10116 if (irq_sync
&& !err
)
10122 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10124 struct tg3
*tp
= netdev_priv(dev
);
10126 epause
->autoneg
= (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) != 0;
10128 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
)
10129 epause
->rx_pause
= 1;
10131 epause
->rx_pause
= 0;
10133 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
)
10134 epause
->tx_pause
= 1;
10136 epause
->tx_pause
= 0;
10139 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10141 struct tg3
*tp
= netdev_priv(dev
);
10144 if (tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
) {
10146 struct phy_device
*phydev
;
10148 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10150 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
10151 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
10152 (epause
->rx_pause
!= epause
->tx_pause
)))
10155 tp
->link_config
.flowctrl
= 0;
10156 if (epause
->rx_pause
) {
10157 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10159 if (epause
->tx_pause
) {
10160 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10161 newadv
= ADVERTISED_Pause
;
10163 newadv
= ADVERTISED_Pause
|
10164 ADVERTISED_Asym_Pause
;
10165 } else if (epause
->tx_pause
) {
10166 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10167 newadv
= ADVERTISED_Asym_Pause
;
10171 if (epause
->autoneg
)
10172 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
10174 tp
->tg3_flags
&= ~TG3_FLAG_PAUSE_AUTONEG
;
10176 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
10177 u32 oldadv
= phydev
->advertising
&
10178 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
10179 if (oldadv
!= newadv
) {
10180 phydev
->advertising
&=
10181 ~(ADVERTISED_Pause
|
10182 ADVERTISED_Asym_Pause
);
10183 phydev
->advertising
|= newadv
;
10184 if (phydev
->autoneg
) {
10186 * Always renegotiate the link to
10187 * inform our link partner of our
10188 * flow control settings, even if the
10189 * flow control is forced. Let
10190 * tg3_adjust_link() do the final
10191 * flow control setup.
10193 return phy_start_aneg(phydev
);
10197 if (!epause
->autoneg
)
10198 tg3_setup_flow_control(tp
, 0, 0);
10200 tp
->link_config
.orig_advertising
&=
10201 ~(ADVERTISED_Pause
|
10202 ADVERTISED_Asym_Pause
);
10203 tp
->link_config
.orig_advertising
|= newadv
;
10208 if (netif_running(dev
)) {
10209 tg3_netif_stop(tp
);
10213 tg3_full_lock(tp
, irq_sync
);
10215 if (epause
->autoneg
)
10216 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
10218 tp
->tg3_flags
&= ~TG3_FLAG_PAUSE_AUTONEG
;
10219 if (epause
->rx_pause
)
10220 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10222 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
10223 if (epause
->tx_pause
)
10224 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10226 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
10228 if (netif_running(dev
)) {
10229 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10230 err
= tg3_restart_hw(tp
, 1);
10232 tg3_netif_start(tp
);
10235 tg3_full_unlock(tp
);
10241 static u32
tg3_get_rx_csum(struct net_device
*dev
)
10243 struct tg3
*tp
= netdev_priv(dev
);
10244 return (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0;
10247 static int tg3_set_rx_csum(struct net_device
*dev
, u32 data
)
10249 struct tg3
*tp
= netdev_priv(dev
);
10251 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
10257 spin_lock_bh(&tp
->lock
);
10259 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
10261 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
10262 spin_unlock_bh(&tp
->lock
);
10267 static int tg3_set_tx_csum(struct net_device
*dev
, u32 data
)
10269 struct tg3
*tp
= netdev_priv(dev
);
10271 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
10277 if (tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
)
10278 ethtool_op_set_tx_ipv6_csum(dev
, data
);
10280 ethtool_op_set_tx_csum(dev
, data
);
10285 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
10289 return TG3_NUM_TEST
;
10291 return TG3_NUM_STATS
;
10293 return -EOPNOTSUPP
;
10297 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10299 switch (stringset
) {
10301 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
10304 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
10307 WARN_ON(1); /* we need a WARN() */
10312 static int tg3_phys_id(struct net_device
*dev
, u32 data
)
10314 struct tg3
*tp
= netdev_priv(dev
);
10317 if (!netif_running(tp
->dev
))
10321 data
= UINT_MAX
/ 2;
10323 for (i
= 0; i
< (data
* 2); i
++) {
10325 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10326 LED_CTRL_1000MBPS_ON
|
10327 LED_CTRL_100MBPS_ON
|
10328 LED_CTRL_10MBPS_ON
|
10329 LED_CTRL_TRAFFIC_OVERRIDE
|
10330 LED_CTRL_TRAFFIC_BLINK
|
10331 LED_CTRL_TRAFFIC_LED
);
10334 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10335 LED_CTRL_TRAFFIC_OVERRIDE
);
10337 if (msleep_interruptible(500))
10340 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10344 static void tg3_get_ethtool_stats(struct net_device
*dev
,
10345 struct ethtool_stats
*estats
, u64
*tmp_stats
)
10347 struct tg3
*tp
= netdev_priv(dev
);
10348 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
10351 #define NVRAM_TEST_SIZE 0x100
10352 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10353 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10354 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10355 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10356 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10358 static int tg3_test_nvram(struct tg3
*tp
)
10362 int i
, j
, k
, err
= 0, size
;
10364 if (tp
->tg3_flags3
& TG3_FLG3_NO_NVRAM
)
10367 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
10370 if (magic
== TG3_EEPROM_MAGIC
)
10371 size
= NVRAM_TEST_SIZE
;
10372 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
10373 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
10374 TG3_EEPROM_SB_FORMAT_1
) {
10375 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
10376 case TG3_EEPROM_SB_REVISION_0
:
10377 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
10379 case TG3_EEPROM_SB_REVISION_2
:
10380 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
10382 case TG3_EEPROM_SB_REVISION_3
:
10383 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
10390 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
10391 size
= NVRAM_SELFBOOT_HW_SIZE
;
10395 buf
= kmalloc(size
, GFP_KERNEL
);
10400 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
10401 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
10408 /* Selfboot format */
10409 magic
= be32_to_cpu(buf
[0]);
10410 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
10411 TG3_EEPROM_MAGIC_FW
) {
10412 u8
*buf8
= (u8
*) buf
, csum8
= 0;
10414 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
10415 TG3_EEPROM_SB_REVISION_2
) {
10416 /* For rev 2, the csum doesn't include the MBA. */
10417 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
10419 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
10422 for (i
= 0; i
< size
; i
++)
10435 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
10436 TG3_EEPROM_MAGIC_HW
) {
10437 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
10438 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
10439 u8
*buf8
= (u8
*) buf
;
10441 /* Separate the parity bits and the data bytes. */
10442 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
10443 if ((i
== 0) || (i
== 8)) {
10447 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
10448 parity
[k
++] = buf8
[i
] & msk
;
10450 } else if (i
== 16) {
10454 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
10455 parity
[k
++] = buf8
[i
] & msk
;
10458 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
10459 parity
[k
++] = buf8
[i
] & msk
;
10462 data
[j
++] = buf8
[i
];
10466 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
10467 u8 hw8
= hweight8(data
[i
]);
10469 if ((hw8
& 0x1) && parity
[i
])
10471 else if (!(hw8
& 0x1) && !parity
[i
])
10480 /* Bootstrap checksum at offset 0x10 */
10481 csum
= calc_crc((unsigned char *) buf
, 0x10);
10482 if (csum
!= le32_to_cpu(buf
[0x10/4]))
10485 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10486 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
10487 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
10490 for (i
= 0; i
< TG3_NVM_VPD_LEN
; i
+= 4) {
10491 /* The data is in little-endian format in NVRAM.
10492 * Use the big-endian read routines to preserve
10493 * the byte order as it exists in NVRAM.
10495 if (tg3_nvram_read_be32(tp
, TG3_NVM_VPD_OFF
+ i
, &buf
[i
/4]))
10499 i
= pci_vpd_find_tag((u8
*)buf
, 0, TG3_NVM_VPD_LEN
,
10500 PCI_VPD_LRDT_RO_DATA
);
10502 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
10506 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> TG3_NVM_VPD_LEN
)
10509 i
+= PCI_VPD_LRDT_TAG_SIZE
;
10510 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
10511 PCI_VPD_RO_KEYWORD_CHKSUM
);
10515 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
10517 for (i
= 0; i
<= j
; i
++)
10518 csum8
+= ((u8
*)buf
)[i
];
10532 #define TG3_SERDES_TIMEOUT_SEC 2
10533 #define TG3_COPPER_TIMEOUT_SEC 6
10535 static int tg3_test_link(struct tg3
*tp
)
10539 if (!netif_running(tp
->dev
))
10542 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
10543 max
= TG3_SERDES_TIMEOUT_SEC
;
10545 max
= TG3_COPPER_TIMEOUT_SEC
;
10547 for (i
= 0; i
< max
; i
++) {
10548 if (netif_carrier_ok(tp
->dev
))
10551 if (msleep_interruptible(1000))
10558 /* Only test the commonly used registers */
10559 static int tg3_test_registers(struct tg3
*tp
)
10561 int i
, is_5705
, is_5750
;
10562 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
10566 #define TG3_FL_5705 0x1
10567 #define TG3_FL_NOT_5705 0x2
10568 #define TG3_FL_NOT_5788 0x4
10569 #define TG3_FL_NOT_5750 0x8
10573 /* MAC Control Registers */
10574 { MAC_MODE
, TG3_FL_NOT_5705
,
10575 0x00000000, 0x00ef6f8c },
10576 { MAC_MODE
, TG3_FL_5705
,
10577 0x00000000, 0x01ef6b8c },
10578 { MAC_STATUS
, TG3_FL_NOT_5705
,
10579 0x03800107, 0x00000000 },
10580 { MAC_STATUS
, TG3_FL_5705
,
10581 0x03800100, 0x00000000 },
10582 { MAC_ADDR_0_HIGH
, 0x0000,
10583 0x00000000, 0x0000ffff },
10584 { MAC_ADDR_0_LOW
, 0x0000,
10585 0x00000000, 0xffffffff },
10586 { MAC_RX_MTU_SIZE
, 0x0000,
10587 0x00000000, 0x0000ffff },
10588 { MAC_TX_MODE
, 0x0000,
10589 0x00000000, 0x00000070 },
10590 { MAC_TX_LENGTHS
, 0x0000,
10591 0x00000000, 0x00003fff },
10592 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
10593 0x00000000, 0x000007fc },
10594 { MAC_RX_MODE
, TG3_FL_5705
,
10595 0x00000000, 0x000007dc },
10596 { MAC_HASH_REG_0
, 0x0000,
10597 0x00000000, 0xffffffff },
10598 { MAC_HASH_REG_1
, 0x0000,
10599 0x00000000, 0xffffffff },
10600 { MAC_HASH_REG_2
, 0x0000,
10601 0x00000000, 0xffffffff },
10602 { MAC_HASH_REG_3
, 0x0000,
10603 0x00000000, 0xffffffff },
10605 /* Receive Data and Receive BD Initiator Control Registers. */
10606 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
10607 0x00000000, 0xffffffff },
10608 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
10609 0x00000000, 0xffffffff },
10610 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
10611 0x00000000, 0x00000003 },
10612 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
10613 0x00000000, 0xffffffff },
10614 { RCVDBDI_STD_BD
+0, 0x0000,
10615 0x00000000, 0xffffffff },
10616 { RCVDBDI_STD_BD
+4, 0x0000,
10617 0x00000000, 0xffffffff },
10618 { RCVDBDI_STD_BD
+8, 0x0000,
10619 0x00000000, 0xffff0002 },
10620 { RCVDBDI_STD_BD
+0xc, 0x0000,
10621 0x00000000, 0xffffffff },
10623 /* Receive BD Initiator Control Registers. */
10624 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
10625 0x00000000, 0xffffffff },
10626 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
10627 0x00000000, 0x000003ff },
10628 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
10629 0x00000000, 0xffffffff },
10631 /* Host Coalescing Control Registers. */
10632 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
10633 0x00000000, 0x00000004 },
10634 { HOSTCC_MODE
, TG3_FL_5705
,
10635 0x00000000, 0x000000f6 },
10636 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
10637 0x00000000, 0xffffffff },
10638 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
10639 0x00000000, 0x000003ff },
10640 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
10641 0x00000000, 0xffffffff },
10642 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
10643 0x00000000, 0x000003ff },
10644 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
10645 0x00000000, 0xffffffff },
10646 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10647 0x00000000, 0x000000ff },
10648 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
10649 0x00000000, 0xffffffff },
10650 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10651 0x00000000, 0x000000ff },
10652 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
10653 0x00000000, 0xffffffff },
10654 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
10655 0x00000000, 0xffffffff },
10656 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
10657 0x00000000, 0xffffffff },
10658 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10659 0x00000000, 0x000000ff },
10660 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
10661 0x00000000, 0xffffffff },
10662 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10663 0x00000000, 0x000000ff },
10664 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
10665 0x00000000, 0xffffffff },
10666 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
10667 0x00000000, 0xffffffff },
10668 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
10669 0x00000000, 0xffffffff },
10670 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
10671 0x00000000, 0xffffffff },
10672 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
10673 0x00000000, 0xffffffff },
10674 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
10675 0xffffffff, 0x00000000 },
10676 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
10677 0xffffffff, 0x00000000 },
10679 /* Buffer Manager Control Registers. */
10680 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
10681 0x00000000, 0x007fff80 },
10682 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
10683 0x00000000, 0x007fffff },
10684 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
10685 0x00000000, 0x0000003f },
10686 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
10687 0x00000000, 0x000001ff },
10688 { BUFMGR_MB_HIGH_WATER
, 0x0000,
10689 0x00000000, 0x000001ff },
10690 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
10691 0xffffffff, 0x00000000 },
10692 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
10693 0xffffffff, 0x00000000 },
10695 /* Mailbox Registers */
10696 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
10697 0x00000000, 0x000001ff },
10698 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
10699 0x00000000, 0x000001ff },
10700 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
10701 0x00000000, 0x000007ff },
10702 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
10703 0x00000000, 0x000001ff },
10705 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10708 is_5705
= is_5750
= 0;
10709 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
10711 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
10715 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
10716 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
10719 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
10722 if ((tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
10723 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
10726 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
10729 offset
= (u32
) reg_tbl
[i
].offset
;
10730 read_mask
= reg_tbl
[i
].read_mask
;
10731 write_mask
= reg_tbl
[i
].write_mask
;
10733 /* Save the original register content */
10734 save_val
= tr32(offset
);
10736 /* Determine the read-only value. */
10737 read_val
= save_val
& read_mask
;
10739 /* Write zero to the register, then make sure the read-only bits
10740 * are not changed and the read/write bits are all zeros.
10744 val
= tr32(offset
);
10746 /* Test the read-only and read/write bits. */
10747 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
10750 /* Write ones to all the bits defined by RdMask and WrMask, then
10751 * make sure the read-only bits are not changed and the
10752 * read/write bits are all ones.
10754 tw32(offset
, read_mask
| write_mask
);
10756 val
= tr32(offset
);
10758 /* Test the read-only bits. */
10759 if ((val
& read_mask
) != read_val
)
10762 /* Test the read/write bits. */
10763 if ((val
& write_mask
) != write_mask
)
10766 tw32(offset
, save_val
);
10772 if (netif_msg_hw(tp
))
10773 netdev_err(tp
->dev
,
10774 "Register test failed at offset %x\n", offset
);
10775 tw32(offset
, save_val
);
10779 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
10781 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10785 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
10786 for (j
= 0; j
< len
; j
+= 4) {
10789 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
10790 tg3_read_mem(tp
, offset
+ j
, &val
);
10791 if (val
!= test_pattern
[i
])
10798 static int tg3_test_memory(struct tg3
*tp
)
10800 static struct mem_entry
{
10803 } mem_tbl_570x
[] = {
10804 { 0x00000000, 0x00b50},
10805 { 0x00002000, 0x1c000},
10806 { 0xffffffff, 0x00000}
10807 }, mem_tbl_5705
[] = {
10808 { 0x00000100, 0x0000c},
10809 { 0x00000200, 0x00008},
10810 { 0x00004000, 0x00800},
10811 { 0x00006000, 0x01000},
10812 { 0x00008000, 0x02000},
10813 { 0x00010000, 0x0e000},
10814 { 0xffffffff, 0x00000}
10815 }, mem_tbl_5755
[] = {
10816 { 0x00000200, 0x00008},
10817 { 0x00004000, 0x00800},
10818 { 0x00006000, 0x00800},
10819 { 0x00008000, 0x02000},
10820 { 0x00010000, 0x0c000},
10821 { 0xffffffff, 0x00000}
10822 }, mem_tbl_5906
[] = {
10823 { 0x00000200, 0x00008},
10824 { 0x00004000, 0x00400},
10825 { 0x00006000, 0x00400},
10826 { 0x00008000, 0x01000},
10827 { 0x00010000, 0x01000},
10828 { 0xffffffff, 0x00000}
10829 }, mem_tbl_5717
[] = {
10830 { 0x00000200, 0x00008},
10831 { 0x00010000, 0x0a000},
10832 { 0x00020000, 0x13c00},
10833 { 0xffffffff, 0x00000}
10834 }, mem_tbl_57765
[] = {
10835 { 0x00000200, 0x00008},
10836 { 0x00004000, 0x00800},
10837 { 0x00006000, 0x09800},
10838 { 0x00010000, 0x0a000},
10839 { 0xffffffff, 0x00000}
10841 struct mem_entry
*mem_tbl
;
10845 if (tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
)
10846 mem_tbl
= mem_tbl_5717
;
10847 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
10848 mem_tbl
= mem_tbl_57765
;
10849 else if (tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
)
10850 mem_tbl
= mem_tbl_5755
;
10851 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
10852 mem_tbl
= mem_tbl_5906
;
10853 else if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
10854 mem_tbl
= mem_tbl_5705
;
10856 mem_tbl
= mem_tbl_570x
;
10858 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
10859 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
10867 #define TG3_MAC_LOOPBACK 0
10868 #define TG3_PHY_LOOPBACK 1
10870 static int tg3_run_loopback(struct tg3
*tp
, int loopback_mode
)
10872 u32 mac_mode
, rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
10873 u32 desc_idx
, coal_now
;
10874 struct sk_buff
*skb
, *rx_skb
;
10877 int num_pkts
, tx_len
, rx_len
, i
, err
;
10878 struct tg3_rx_buffer_desc
*desc
;
10879 struct tg3_napi
*tnapi
, *rnapi
;
10880 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
10882 tnapi
= &tp
->napi
[0];
10883 rnapi
= &tp
->napi
[0];
10884 if (tp
->irq_cnt
> 1) {
10885 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_RSS
)
10886 rnapi
= &tp
->napi
[1];
10887 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_TSS
)
10888 tnapi
= &tp
->napi
[1];
10890 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
10892 if (loopback_mode
== TG3_MAC_LOOPBACK
) {
10893 /* HW errata - mac loopback fails in some cases on 5780.
10894 * Normal traffic and PHY loopback are not affected by
10895 * errata. Also, the MAC loopback test is deprecated for
10896 * all newer ASIC revisions.
10898 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
10899 (tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
))
10902 mac_mode
= tp
->mac_mode
&
10903 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
10904 mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
10905 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
10906 mac_mode
|= MAC_MODE_LINK_POLARITY
;
10907 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
10908 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
10910 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
10911 tw32(MAC_MODE
, mac_mode
);
10912 } else if (loopback_mode
== TG3_PHY_LOOPBACK
) {
10915 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
10916 tg3_phy_fet_toggle_apd(tp
, false);
10917 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED100
;
10919 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED1000
;
10921 tg3_phy_toggle_automdix(tp
, 0);
10923 tg3_writephy(tp
, MII_BMCR
, val
);
10926 mac_mode
= tp
->mac_mode
&
10927 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
10928 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
10929 tg3_writephy(tp
, MII_TG3_FET_PTEST
,
10930 MII_TG3_FET_PTEST_FRC_TX_LINK
|
10931 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
10932 /* The write needs to be flushed for the AC131 */
10933 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
10934 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
10935 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
10937 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
10939 /* reset to prevent losing 1st rx packet intermittently */
10940 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10941 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10943 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10945 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
10946 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
10947 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
10948 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
10949 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
10950 mac_mode
|= MAC_MODE_LINK_POLARITY
;
10951 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
10952 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
10954 tw32(MAC_MODE
, mac_mode
);
10956 /* Wait for link */
10957 for (i
= 0; i
< 100; i
++) {
10958 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
10969 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
10973 tx_data
= skb_put(skb
, tx_len
);
10974 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
10975 memset(tx_data
+ 6, 0x0, 8);
10977 tw32(MAC_RX_MTU_SIZE
, tx_len
+ 4);
10979 for (i
= 14; i
< tx_len
; i
++)
10980 tx_data
[i
] = (u8
) (i
& 0xff);
10982 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
10983 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
10984 dev_kfree_skb(skb
);
10988 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
10993 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
10997 tg3_set_txd(tnapi
, tnapi
->tx_prod
, map
, tx_len
, 0, 1);
11002 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
11003 tr32_mailbox(tnapi
->prodmbox
);
11007 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11008 for (i
= 0; i
< 35; i
++) {
11009 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11014 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
11015 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11016 if ((tx_idx
== tnapi
->tx_prod
) &&
11017 (rx_idx
== (rx_start_idx
+ num_pkts
)))
11021 pci_unmap_single(tp
->pdev
, map
, tx_len
, PCI_DMA_TODEVICE
);
11022 dev_kfree_skb(skb
);
11024 if (tx_idx
!= tnapi
->tx_prod
)
11027 if (rx_idx
!= rx_start_idx
+ num_pkts
)
11030 desc
= &rnapi
->rx_rcb
[rx_start_idx
];
11031 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
11032 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
11033 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
11036 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
11037 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
11040 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4;
11041 if (rx_len
!= tx_len
)
11044 rx_skb
= tpr
->rx_std_buffers
[desc_idx
].skb
;
11046 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
], mapping
);
11047 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
, PCI_DMA_FROMDEVICE
);
11049 for (i
= 14; i
< tx_len
; i
++) {
11050 if (*(rx_skb
->data
+ i
) != (u8
) (i
& 0xff))
11055 /* tg3_free_rings will unmap and free the rx_skb */
11060 #define TG3_MAC_LOOPBACK_FAILED 1
11061 #define TG3_PHY_LOOPBACK_FAILED 2
11062 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
11063 TG3_PHY_LOOPBACK_FAILED)
11065 static int tg3_test_loopback(struct tg3
*tp
)
11068 u32 eee_cap
, cpmuctrl
= 0;
11070 if (!netif_running(tp
->dev
))
11071 return TG3_LOOPBACK_FAILED
;
11073 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
11074 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11076 err
= tg3_reset_hw(tp
, 1);
11078 err
= TG3_LOOPBACK_FAILED
;
11082 /* Turn off gphy autopowerdown. */
11083 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11084 tg3_phy_toggle_apd(tp
, false);
11086 if (tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
) {
11090 tw32(TG3_CPMU_MUTEX_REQ
, CPMU_MUTEX_REQ_DRIVER
);
11092 /* Wait for up to 40 microseconds to acquire lock. */
11093 for (i
= 0; i
< 4; i
++) {
11094 status
= tr32(TG3_CPMU_MUTEX_GNT
);
11095 if (status
== CPMU_MUTEX_GNT_DRIVER
)
11100 if (status
!= CPMU_MUTEX_GNT_DRIVER
) {
11101 err
= TG3_LOOPBACK_FAILED
;
11105 /* Turn off link-based power management. */
11106 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
11107 tw32(TG3_CPMU_CTRL
,
11108 cpmuctrl
& ~(CPMU_CTRL_LINK_SPEED_MODE
|
11109 CPMU_CTRL_LINK_AWARE_MODE
));
11112 if (tg3_run_loopback(tp
, TG3_MAC_LOOPBACK
))
11113 err
|= TG3_MAC_LOOPBACK_FAILED
;
11115 if (tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
) {
11116 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
11118 /* Release the mutex */
11119 tw32(TG3_CPMU_MUTEX_GNT
, CPMU_MUTEX_GNT_DRIVER
);
11122 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11123 !(tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
)) {
11124 if (tg3_run_loopback(tp
, TG3_PHY_LOOPBACK
))
11125 err
|= TG3_PHY_LOOPBACK_FAILED
;
11128 /* Re-enable gphy autopowerdown. */
11129 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11130 tg3_phy_toggle_apd(tp
, true);
11133 tp
->phy_flags
|= eee_cap
;
11138 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
11141 struct tg3
*tp
= netdev_priv(dev
);
11143 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11146 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
11148 if (tg3_test_nvram(tp
) != 0) {
11149 etest
->flags
|= ETH_TEST_FL_FAILED
;
11152 if (tg3_test_link(tp
) != 0) {
11153 etest
->flags
|= ETH_TEST_FL_FAILED
;
11156 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
11157 int err
, err2
= 0, irq_sync
= 0;
11159 if (netif_running(dev
)) {
11161 tg3_netif_stop(tp
);
11165 tg3_full_lock(tp
, irq_sync
);
11167 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
11168 err
= tg3_nvram_lock(tp
);
11169 tg3_halt_cpu(tp
, RX_CPU_BASE
);
11170 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
11171 tg3_halt_cpu(tp
, TX_CPU_BASE
);
11173 tg3_nvram_unlock(tp
);
11175 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
11178 if (tg3_test_registers(tp
) != 0) {
11179 etest
->flags
|= ETH_TEST_FL_FAILED
;
11182 if (tg3_test_memory(tp
) != 0) {
11183 etest
->flags
|= ETH_TEST_FL_FAILED
;
11186 if ((data
[4] = tg3_test_loopback(tp
)) != 0)
11187 etest
->flags
|= ETH_TEST_FL_FAILED
;
11189 tg3_full_unlock(tp
);
11191 if (tg3_test_interrupt(tp
) != 0) {
11192 etest
->flags
|= ETH_TEST_FL_FAILED
;
11196 tg3_full_lock(tp
, 0);
11198 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11199 if (netif_running(dev
)) {
11200 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
11201 err2
= tg3_restart_hw(tp
, 1);
11203 tg3_netif_start(tp
);
11206 tg3_full_unlock(tp
);
11208 if (irq_sync
&& !err2
)
11211 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11212 tg3_power_down(tp
);
11216 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11218 struct mii_ioctl_data
*data
= if_mii(ifr
);
11219 struct tg3
*tp
= netdev_priv(dev
);
11222 if (tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
) {
11223 struct phy_device
*phydev
;
11224 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11226 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11227 return phy_mii_ioctl(phydev
, ifr
, cmd
);
11232 data
->phy_id
= tp
->phy_addr
;
11235 case SIOCGMIIREG
: {
11238 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11239 break; /* We have no PHY */
11241 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) ||
11242 ((tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) &&
11243 !netif_running(dev
)))
11246 spin_lock_bh(&tp
->lock
);
11247 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
11248 spin_unlock_bh(&tp
->lock
);
11250 data
->val_out
= mii_regval
;
11256 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11257 break; /* We have no PHY */
11259 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) ||
11260 ((tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) &&
11261 !netif_running(dev
)))
11264 spin_lock_bh(&tp
->lock
);
11265 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
11266 spin_unlock_bh(&tp
->lock
);
11274 return -EOPNOTSUPP
;
11277 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11279 struct tg3
*tp
= netdev_priv(dev
);
11281 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
11285 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11287 struct tg3
*tp
= netdev_priv(dev
);
11288 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
11289 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
11291 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
11292 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
11293 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
11294 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
11295 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
11298 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
11299 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
11300 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
11301 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
11302 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
11303 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
11304 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
11305 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
11306 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
11307 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
11310 /* No rx interrupts will be generated if both are zero */
11311 if ((ec
->rx_coalesce_usecs
== 0) &&
11312 (ec
->rx_max_coalesced_frames
== 0))
11315 /* No tx interrupts will be generated if both are zero */
11316 if ((ec
->tx_coalesce_usecs
== 0) &&
11317 (ec
->tx_max_coalesced_frames
== 0))
11320 /* Only copy relevant parameters, ignore all others. */
11321 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
11322 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
11323 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
11324 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
11325 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
11326 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
11327 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
11328 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
11329 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
11331 if (netif_running(dev
)) {
11332 tg3_full_lock(tp
, 0);
11333 __tg3_set_coalesce(tp
, &tp
->coal
);
11334 tg3_full_unlock(tp
);
11339 static const struct ethtool_ops tg3_ethtool_ops
= {
11340 .get_settings
= tg3_get_settings
,
11341 .set_settings
= tg3_set_settings
,
11342 .get_drvinfo
= tg3_get_drvinfo
,
11343 .get_regs_len
= tg3_get_regs_len
,
11344 .get_regs
= tg3_get_regs
,
11345 .get_wol
= tg3_get_wol
,
11346 .set_wol
= tg3_set_wol
,
11347 .get_msglevel
= tg3_get_msglevel
,
11348 .set_msglevel
= tg3_set_msglevel
,
11349 .nway_reset
= tg3_nway_reset
,
11350 .get_link
= ethtool_op_get_link
,
11351 .get_eeprom_len
= tg3_get_eeprom_len
,
11352 .get_eeprom
= tg3_get_eeprom
,
11353 .set_eeprom
= tg3_set_eeprom
,
11354 .get_ringparam
= tg3_get_ringparam
,
11355 .set_ringparam
= tg3_set_ringparam
,
11356 .get_pauseparam
= tg3_get_pauseparam
,
11357 .set_pauseparam
= tg3_set_pauseparam
,
11358 .get_rx_csum
= tg3_get_rx_csum
,
11359 .set_rx_csum
= tg3_set_rx_csum
,
11360 .set_tx_csum
= tg3_set_tx_csum
,
11361 .set_sg
= ethtool_op_set_sg
,
11362 .set_tso
= tg3_set_tso
,
11363 .self_test
= tg3_self_test
,
11364 .get_strings
= tg3_get_strings
,
11365 .phys_id
= tg3_phys_id
,
11366 .get_ethtool_stats
= tg3_get_ethtool_stats
,
11367 .get_coalesce
= tg3_get_coalesce
,
11368 .set_coalesce
= tg3_set_coalesce
,
11369 .get_sset_count
= tg3_get_sset_count
,
11372 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
11374 u32 cursize
, val
, magic
;
11376 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
11378 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11381 if ((magic
!= TG3_EEPROM_MAGIC
) &&
11382 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
11383 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
11387 * Size the chip by reading offsets at increasing powers of two.
11388 * When we encounter our validation signature, we know the addressing
11389 * has wrapped around, and thus have our chip size.
11393 while (cursize
< tp
->nvram_size
) {
11394 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
11403 tp
->nvram_size
= cursize
;
11406 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
11410 if ((tp
->tg3_flags3
& TG3_FLG3_NO_NVRAM
) ||
11411 tg3_nvram_read(tp
, 0, &val
) != 0)
11414 /* Selfboot format */
11415 if (val
!= TG3_EEPROM_MAGIC
) {
11416 tg3_get_eeprom_size(tp
);
11420 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
11422 /* This is confusing. We want to operate on the
11423 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11424 * call will read from NVRAM and byteswap the data
11425 * according to the byteswapping settings for all
11426 * other register accesses. This ensures the data we
11427 * want will always reside in the lower 16-bits.
11428 * However, the data in NVRAM is in LE format, which
11429 * means the data from the NVRAM read will always be
11430 * opposite the endianness of the CPU. The 16-bit
11431 * byteswap then brings the data to CPU endianness.
11433 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
11437 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11440 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
11444 nvcfg1
= tr32(NVRAM_CFG1
);
11445 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
11446 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11448 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11449 tw32(NVRAM_CFG1
, nvcfg1
);
11452 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) ||
11453 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
11454 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
11455 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
11456 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11457 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11458 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11460 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
11461 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11462 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
11464 case FLASH_VENDOR_ATMEL_EEPROM
:
11465 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11466 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11467 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11469 case FLASH_VENDOR_ST
:
11470 tp
->nvram_jedecnum
= JEDEC_ST
;
11471 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
11472 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11474 case FLASH_VENDOR_SAIFUN
:
11475 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
11476 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
11478 case FLASH_VENDOR_SST_SMALL
:
11479 case FLASH_VENDOR_SST_LARGE
:
11480 tp
->nvram_jedecnum
= JEDEC_SST
;
11481 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
11485 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11486 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11487 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11491 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
11493 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
11494 case FLASH_5752PAGE_SIZE_256
:
11495 tp
->nvram_pagesize
= 256;
11497 case FLASH_5752PAGE_SIZE_512
:
11498 tp
->nvram_pagesize
= 512;
11500 case FLASH_5752PAGE_SIZE_1K
:
11501 tp
->nvram_pagesize
= 1024;
11503 case FLASH_5752PAGE_SIZE_2K
:
11504 tp
->nvram_pagesize
= 2048;
11506 case FLASH_5752PAGE_SIZE_4K
:
11507 tp
->nvram_pagesize
= 4096;
11509 case FLASH_5752PAGE_SIZE_264
:
11510 tp
->nvram_pagesize
= 264;
11512 case FLASH_5752PAGE_SIZE_528
:
11513 tp
->nvram_pagesize
= 528;
11518 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
11522 nvcfg1
= tr32(NVRAM_CFG1
);
11524 /* NVRAM protection for TPM */
11525 if (nvcfg1
& (1 << 27))
11526 tp
->tg3_flags3
|= TG3_FLG3_PROTECTED_NVRAM
;
11528 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11529 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
11530 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
11531 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11532 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11534 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11535 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11536 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11537 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11539 case FLASH_5752VENDOR_ST_M45PE10
:
11540 case FLASH_5752VENDOR_ST_M45PE20
:
11541 case FLASH_5752VENDOR_ST_M45PE40
:
11542 tp
->nvram_jedecnum
= JEDEC_ST
;
11543 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11544 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11548 if (tp
->tg3_flags2
& TG3_FLG2_FLASH
) {
11549 tg3_nvram_get_pagesize(tp
, nvcfg1
);
11551 /* For eeprom, set pagesize to maximum eeprom size */
11552 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11554 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11555 tw32(NVRAM_CFG1
, nvcfg1
);
11559 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
11561 u32 nvcfg1
, protect
= 0;
11563 nvcfg1
= tr32(NVRAM_CFG1
);
11565 /* NVRAM protection for TPM */
11566 if (nvcfg1
& (1 << 27)) {
11567 tp
->tg3_flags3
|= TG3_FLG3_PROTECTED_NVRAM
;
11571 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
11573 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
11574 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
11575 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
11576 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
11577 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11578 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11579 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11580 tp
->nvram_pagesize
= 264;
11581 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
11582 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
11583 tp
->nvram_size
= (protect
? 0x3e200 :
11584 TG3_NVRAM_SIZE_512KB
);
11585 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
11586 tp
->nvram_size
= (protect
? 0x1f200 :
11587 TG3_NVRAM_SIZE_256KB
);
11589 tp
->nvram_size
= (protect
? 0x1f200 :
11590 TG3_NVRAM_SIZE_128KB
);
11592 case FLASH_5752VENDOR_ST_M45PE10
:
11593 case FLASH_5752VENDOR_ST_M45PE20
:
11594 case FLASH_5752VENDOR_ST_M45PE40
:
11595 tp
->nvram_jedecnum
= JEDEC_ST
;
11596 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11597 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11598 tp
->nvram_pagesize
= 256;
11599 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
11600 tp
->nvram_size
= (protect
?
11601 TG3_NVRAM_SIZE_64KB
:
11602 TG3_NVRAM_SIZE_128KB
);
11603 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
11604 tp
->nvram_size
= (protect
?
11605 TG3_NVRAM_SIZE_64KB
:
11606 TG3_NVRAM_SIZE_256KB
);
11608 tp
->nvram_size
= (protect
?
11609 TG3_NVRAM_SIZE_128KB
:
11610 TG3_NVRAM_SIZE_512KB
);
11615 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
11619 nvcfg1
= tr32(NVRAM_CFG1
);
11621 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11622 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
11623 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
11624 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
11625 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
11626 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11627 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11628 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11630 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11631 tw32(NVRAM_CFG1
, nvcfg1
);
11633 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11634 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
11635 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
11636 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
11637 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11638 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11639 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11640 tp
->nvram_pagesize
= 264;
11642 case FLASH_5752VENDOR_ST_M45PE10
:
11643 case FLASH_5752VENDOR_ST_M45PE20
:
11644 case FLASH_5752VENDOR_ST_M45PE40
:
11645 tp
->nvram_jedecnum
= JEDEC_ST
;
11646 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11647 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11648 tp
->nvram_pagesize
= 256;
11653 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
11655 u32 nvcfg1
, protect
= 0;
11657 nvcfg1
= tr32(NVRAM_CFG1
);
11659 /* NVRAM protection for TPM */
11660 if (nvcfg1
& (1 << 27)) {
11661 tp
->tg3_flags3
|= TG3_FLG3_PROTECTED_NVRAM
;
11665 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
11667 case FLASH_5761VENDOR_ATMEL_ADB021D
:
11668 case FLASH_5761VENDOR_ATMEL_ADB041D
:
11669 case FLASH_5761VENDOR_ATMEL_ADB081D
:
11670 case FLASH_5761VENDOR_ATMEL_ADB161D
:
11671 case FLASH_5761VENDOR_ATMEL_MDB021D
:
11672 case FLASH_5761VENDOR_ATMEL_MDB041D
:
11673 case FLASH_5761VENDOR_ATMEL_MDB081D
:
11674 case FLASH_5761VENDOR_ATMEL_MDB161D
:
11675 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11676 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11677 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11678 tp
->tg3_flags3
|= TG3_FLG3_NO_NVRAM_ADDR_TRANS
;
11679 tp
->nvram_pagesize
= 256;
11681 case FLASH_5761VENDOR_ST_A_M45PE20
:
11682 case FLASH_5761VENDOR_ST_A_M45PE40
:
11683 case FLASH_5761VENDOR_ST_A_M45PE80
:
11684 case FLASH_5761VENDOR_ST_A_M45PE16
:
11685 case FLASH_5761VENDOR_ST_M_M45PE20
:
11686 case FLASH_5761VENDOR_ST_M_M45PE40
:
11687 case FLASH_5761VENDOR_ST_M_M45PE80
:
11688 case FLASH_5761VENDOR_ST_M_M45PE16
:
11689 tp
->nvram_jedecnum
= JEDEC_ST
;
11690 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11691 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11692 tp
->nvram_pagesize
= 256;
11697 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
11700 case FLASH_5761VENDOR_ATMEL_ADB161D
:
11701 case FLASH_5761VENDOR_ATMEL_MDB161D
:
11702 case FLASH_5761VENDOR_ST_A_M45PE16
:
11703 case FLASH_5761VENDOR_ST_M_M45PE16
:
11704 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
11706 case FLASH_5761VENDOR_ATMEL_ADB081D
:
11707 case FLASH_5761VENDOR_ATMEL_MDB081D
:
11708 case FLASH_5761VENDOR_ST_A_M45PE80
:
11709 case FLASH_5761VENDOR_ST_M_M45PE80
:
11710 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
11712 case FLASH_5761VENDOR_ATMEL_ADB041D
:
11713 case FLASH_5761VENDOR_ATMEL_MDB041D
:
11714 case FLASH_5761VENDOR_ST_A_M45PE40
:
11715 case FLASH_5761VENDOR_ST_M_M45PE40
:
11716 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11718 case FLASH_5761VENDOR_ATMEL_ADB021D
:
11719 case FLASH_5761VENDOR_ATMEL_MDB021D
:
11720 case FLASH_5761VENDOR_ST_A_M45PE20
:
11721 case FLASH_5761VENDOR_ST_M_M45PE20
:
11722 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
11728 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
11730 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11731 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11732 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11735 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
11739 nvcfg1
= tr32(NVRAM_CFG1
);
11741 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11742 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
11743 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
11744 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11745 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11746 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11748 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11749 tw32(NVRAM_CFG1
, nvcfg1
);
11751 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11752 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
11753 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
11754 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
11755 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
11756 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
11757 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
11758 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11759 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11760 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11762 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11763 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11764 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
11765 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
11766 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
11768 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
11769 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
11770 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
11772 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
11773 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
11774 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11778 case FLASH_5752VENDOR_ST_M45PE10
:
11779 case FLASH_5752VENDOR_ST_M45PE20
:
11780 case FLASH_5752VENDOR_ST_M45PE40
:
11781 tp
->nvram_jedecnum
= JEDEC_ST
;
11782 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11783 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11785 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11786 case FLASH_5752VENDOR_ST_M45PE10
:
11787 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
11789 case FLASH_5752VENDOR_ST_M45PE20
:
11790 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
11792 case FLASH_5752VENDOR_ST_M45PE40
:
11793 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11798 tp
->tg3_flags3
|= TG3_FLG3_NO_NVRAM
;
11802 tg3_nvram_get_pagesize(tp
, nvcfg1
);
11803 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
11804 tp
->tg3_flags3
|= TG3_FLG3_NO_NVRAM_ADDR_TRANS
;
11808 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
11812 nvcfg1
= tr32(NVRAM_CFG1
);
11814 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11815 case FLASH_5717VENDOR_ATMEL_EEPROM
:
11816 case FLASH_5717VENDOR_MICRO_EEPROM
:
11817 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11818 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11819 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11821 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11822 tw32(NVRAM_CFG1
, nvcfg1
);
11824 case FLASH_5717VENDOR_ATMEL_MDB011D
:
11825 case FLASH_5717VENDOR_ATMEL_ADB011B
:
11826 case FLASH_5717VENDOR_ATMEL_ADB011D
:
11827 case FLASH_5717VENDOR_ATMEL_MDB021D
:
11828 case FLASH_5717VENDOR_ATMEL_ADB021B
:
11829 case FLASH_5717VENDOR_ATMEL_ADB021D
:
11830 case FLASH_5717VENDOR_ATMEL_45USPT
:
11831 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11832 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11833 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11835 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11836 case FLASH_5717VENDOR_ATMEL_MDB021D
:
11837 case FLASH_5717VENDOR_ATMEL_ADB021B
:
11838 case FLASH_5717VENDOR_ATMEL_ADB021D
:
11839 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
11842 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
11846 case FLASH_5717VENDOR_ST_M_M25PE10
:
11847 case FLASH_5717VENDOR_ST_A_M25PE10
:
11848 case FLASH_5717VENDOR_ST_M_M45PE10
:
11849 case FLASH_5717VENDOR_ST_A_M45PE10
:
11850 case FLASH_5717VENDOR_ST_M_M25PE20
:
11851 case FLASH_5717VENDOR_ST_A_M25PE20
:
11852 case FLASH_5717VENDOR_ST_M_M45PE20
:
11853 case FLASH_5717VENDOR_ST_A_M45PE20
:
11854 case FLASH_5717VENDOR_ST_25USPT
:
11855 case FLASH_5717VENDOR_ST_45USPT
:
11856 tp
->nvram_jedecnum
= JEDEC_ST
;
11857 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
11858 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
11860 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11861 case FLASH_5717VENDOR_ST_M_M25PE20
:
11862 case FLASH_5717VENDOR_ST_A_M25PE20
:
11863 case FLASH_5717VENDOR_ST_M_M45PE20
:
11864 case FLASH_5717VENDOR_ST_A_M45PE20
:
11865 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
11868 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
11873 tp
->tg3_flags3
|= TG3_FLG3_NO_NVRAM
;
11877 tg3_nvram_get_pagesize(tp
, nvcfg1
);
11878 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
11879 tp
->tg3_flags3
|= TG3_FLG3_NO_NVRAM_ADDR_TRANS
;
11882 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11883 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
11885 tw32_f(GRC_EEPROM_ADDR
,
11886 (EEPROM_ADDR_FSM_RESET
|
11887 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
11888 EEPROM_ADDR_CLKPERD_SHIFT
)));
11892 /* Enable seeprom accesses. */
11893 tw32_f(GRC_LOCAL_CTRL
,
11894 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
11897 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
11898 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
11899 tp
->tg3_flags
|= TG3_FLAG_NVRAM
;
11901 if (tg3_nvram_lock(tp
)) {
11902 netdev_warn(tp
->dev
,
11903 "Cannot get nvram lock, %s failed\n",
11907 tg3_enable_nvram_access(tp
);
11909 tp
->nvram_size
= 0;
11911 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
11912 tg3_get_5752_nvram_info(tp
);
11913 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
11914 tg3_get_5755_nvram_info(tp
);
11915 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
11916 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
11917 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
11918 tg3_get_5787_nvram_info(tp
);
11919 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
11920 tg3_get_5761_nvram_info(tp
);
11921 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11922 tg3_get_5906_nvram_info(tp
);
11923 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
11924 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
11925 tg3_get_57780_nvram_info(tp
);
11926 else if (tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
)
11927 tg3_get_5717_nvram_info(tp
);
11929 tg3_get_nvram_info(tp
);
11931 if (tp
->nvram_size
== 0)
11932 tg3_get_nvram_size(tp
);
11934 tg3_disable_nvram_access(tp
);
11935 tg3_nvram_unlock(tp
);
11938 tp
->tg3_flags
&= ~(TG3_FLAG_NVRAM
| TG3_FLAG_NVRAM_BUFFERED
);
11940 tg3_get_eeprom_size(tp
);
11944 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
11945 u32 offset
, u32 len
, u8
*buf
)
11950 for (i
= 0; i
< len
; i
+= 4) {
11956 memcpy(&data
, buf
+ i
, 4);
11959 * The SEEPROM interface expects the data to always be opposite
11960 * the native endian format. We accomplish this by reversing
11961 * all the operations that would have been performed on the
11962 * data from a call to tg3_nvram_read_be32().
11964 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
11966 val
= tr32(GRC_EEPROM_ADDR
);
11967 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
11969 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
11971 tw32(GRC_EEPROM_ADDR
, val
|
11972 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
11973 (addr
& EEPROM_ADDR_ADDR_MASK
) |
11974 EEPROM_ADDR_START
|
11975 EEPROM_ADDR_WRITE
);
11977 for (j
= 0; j
< 1000; j
++) {
11978 val
= tr32(GRC_EEPROM_ADDR
);
11980 if (val
& EEPROM_ADDR_COMPLETE
)
11984 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
11993 /* offset and length are dword aligned */
11994 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
11998 u32 pagesize
= tp
->nvram_pagesize
;
11999 u32 pagemask
= pagesize
- 1;
12003 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
12009 u32 phy_addr
, page_off
, size
;
12011 phy_addr
= offset
& ~pagemask
;
12013 for (j
= 0; j
< pagesize
; j
+= 4) {
12014 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
12015 (__be32
*) (tmp
+ j
));
12022 page_off
= offset
& pagemask
;
12029 memcpy(tmp
+ page_off
, buf
, size
);
12031 offset
= offset
+ (pagesize
- page_off
);
12033 tg3_enable_nvram_access(tp
);
12036 * Before we can erase the flash page, we need
12037 * to issue a special "write enable" command.
12039 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12041 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12044 /* Erase the target page */
12045 tw32(NVRAM_ADDR
, phy_addr
);
12047 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
12048 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
12050 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12053 /* Issue another write enable to start the write. */
12054 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12056 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12059 for (j
= 0; j
< pagesize
; j
+= 4) {
12062 data
= *((__be32
*) (tmp
+ j
));
12064 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12066 tw32(NVRAM_ADDR
, phy_addr
+ j
);
12068 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
12072 nvram_cmd
|= NVRAM_CMD_FIRST
;
12073 else if (j
== (pagesize
- 4))
12074 nvram_cmd
|= NVRAM_CMD_LAST
;
12076 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12083 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12084 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
12091 /* offset and length are dword aligned */
12092 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
12097 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
12098 u32 page_off
, phy_addr
, nvram_cmd
;
12101 memcpy(&data
, buf
+ i
, 4);
12102 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12104 page_off
= offset
% tp
->nvram_pagesize
;
12106 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
12108 tw32(NVRAM_ADDR
, phy_addr
);
12110 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
12112 if (page_off
== 0 || i
== 0)
12113 nvram_cmd
|= NVRAM_CMD_FIRST
;
12114 if (page_off
== (tp
->nvram_pagesize
- 4))
12115 nvram_cmd
|= NVRAM_CMD_LAST
;
12117 if (i
== (len
- 4))
12118 nvram_cmd
|= NVRAM_CMD_LAST
;
12120 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
12121 !(tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
) &&
12122 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
12123 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
12125 if ((ret
= tg3_nvram_exec_cmd(tp
,
12126 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
12131 if (!(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
12132 /* We always do complete word writes to eeprom. */
12133 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
12136 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12142 /* offset and length are dword aligned */
12143 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
12147 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
12148 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
12149 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
12153 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
)) {
12154 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
12158 ret
= tg3_nvram_lock(tp
);
12162 tg3_enable_nvram_access(tp
);
12163 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
12164 !(tp
->tg3_flags3
& TG3_FLG3_PROTECTED_NVRAM
))
12165 tw32(NVRAM_WRITE1
, 0x406);
12167 grc_mode
= tr32(GRC_MODE
);
12168 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
12170 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) ||
12171 !(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
12173 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
12176 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
12180 grc_mode
= tr32(GRC_MODE
);
12181 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
12183 tg3_disable_nvram_access(tp
);
12184 tg3_nvram_unlock(tp
);
12187 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
12188 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
12195 struct subsys_tbl_ent
{
12196 u16 subsys_vendor
, subsys_devid
;
12200 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
12201 /* Broadcom boards. */
12202 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12203 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
12204 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12205 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
12206 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12207 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
12208 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12209 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
12210 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12211 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
12212 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12213 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
12214 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12215 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
12216 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12217 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
12218 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12219 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
12220 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12221 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
12222 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12223 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
12226 { TG3PCI_SUBVENDOR_ID_3COM
,
12227 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
12228 { TG3PCI_SUBVENDOR_ID_3COM
,
12229 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
12230 { TG3PCI_SUBVENDOR_ID_3COM
,
12231 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
12232 { TG3PCI_SUBVENDOR_ID_3COM
,
12233 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
12234 { TG3PCI_SUBVENDOR_ID_3COM
,
12235 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
12238 { TG3PCI_SUBVENDOR_ID_DELL
,
12239 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
12240 { TG3PCI_SUBVENDOR_ID_DELL
,
12241 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
12242 { TG3PCI_SUBVENDOR_ID_DELL
,
12243 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
12244 { TG3PCI_SUBVENDOR_ID_DELL
,
12245 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
12247 /* Compaq boards. */
12248 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12249 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
12250 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12251 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
12252 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12253 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
12254 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12255 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
12256 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12257 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
12260 { TG3PCI_SUBVENDOR_ID_IBM
,
12261 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
12264 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
12268 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
12269 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
12270 tp
->pdev
->subsystem_vendor
) &&
12271 (subsys_id_to_phy_id
[i
].subsys_devid
==
12272 tp
->pdev
->subsystem_device
))
12273 return &subsys_id_to_phy_id
[i
];
12278 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
12283 /* On some early chips the SRAM cannot be accessed in D3hot state,
12284 * so need make sure we're in D0.
12286 pci_read_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
12287 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
12288 pci_write_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
12291 /* Make sure register accesses (indirect or otherwise)
12292 * will function correctly.
12294 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
12295 tp
->misc_host_ctrl
);
12297 /* The memory arbiter has to be enabled in order for SRAM accesses
12298 * to succeed. Normally on powerup the tg3 chip firmware will make
12299 * sure it is enabled, but other entities such as system netboot
12300 * code might disable it.
12302 val
= tr32(MEMARB_MODE
);
12303 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
12305 tp
->phy_id
= TG3_PHY_ID_INVALID
;
12306 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12308 /* Assume an onboard device and WOL capable by default. */
12309 tp
->tg3_flags
|= TG3_FLAG_EEPROM_WRITE_PROT
| TG3_FLAG_WOL_CAP
;
12311 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
12312 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
12313 tp
->tg3_flags
&= ~TG3_FLAG_EEPROM_WRITE_PROT
;
12314 tp
->tg3_flags2
|= TG3_FLG2_IS_NIC
;
12316 val
= tr32(VCPU_CFGSHDW
);
12317 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
12318 tp
->tg3_flags
|= TG3_FLAG_ASPM_WORKAROUND
;
12319 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
12320 (val
& VCPU_CFGSHDW_WOL_MAGPKT
))
12321 tp
->tg3_flags
|= TG3_FLAG_WOL_ENABLE
;
12325 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
12326 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
12327 u32 nic_cfg
, led_cfg
;
12328 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
12329 int eeprom_phy_serdes
= 0;
12331 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
12332 tp
->nic_sram_data_cfg
= nic_cfg
;
12334 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
12335 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
12336 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
) &&
12337 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) &&
12338 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
) &&
12339 (ver
> 0) && (ver
< 0x100))
12340 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
12342 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12343 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
12345 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
12346 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
12347 eeprom_phy_serdes
= 1;
12349 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
12350 if (nic_phy_id
!= 0) {
12351 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
12352 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
12354 eeprom_phy_id
= (id1
>> 16) << 10;
12355 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
12356 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
12360 tp
->phy_id
= eeprom_phy_id
;
12361 if (eeprom_phy_serdes
) {
12362 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
12363 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12365 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
12368 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
12369 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
12370 SHASTA_EXT_LED_MODE_MASK
);
12372 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
12376 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
12377 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12380 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
12381 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12384 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
12385 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
12387 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12388 * read on some older 5700/5701 bootcode.
12390 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12392 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12394 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12398 case SHASTA_EXT_LED_SHARED
:
12399 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
12400 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
12401 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
12402 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12403 LED_CTRL_MODE_PHY_2
);
12406 case SHASTA_EXT_LED_MAC
:
12407 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
12410 case SHASTA_EXT_LED_COMBO
:
12411 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
12412 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
12413 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12414 LED_CTRL_MODE_PHY_2
);
12419 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
12420 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
12421 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
12422 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12424 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
12425 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12427 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
12428 tp
->tg3_flags
|= TG3_FLAG_EEPROM_WRITE_PROT
;
12429 if ((tp
->pdev
->subsystem_vendor
==
12430 PCI_VENDOR_ID_ARIMA
) &&
12431 (tp
->pdev
->subsystem_device
== 0x205a ||
12432 tp
->pdev
->subsystem_device
== 0x2063))
12433 tp
->tg3_flags
&= ~TG3_FLAG_EEPROM_WRITE_PROT
;
12435 tp
->tg3_flags
&= ~TG3_FLAG_EEPROM_WRITE_PROT
;
12436 tp
->tg3_flags2
|= TG3_FLG2_IS_NIC
;
12439 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
12440 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
12441 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
12442 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
12445 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
12446 (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
12447 tp
->tg3_flags3
|= TG3_FLG3_ENABLE_APE
;
12449 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
12450 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
12451 tp
->tg3_flags
&= ~TG3_FLAG_WOL_CAP
;
12453 if ((tp
->tg3_flags
& TG3_FLAG_WOL_CAP
) &&
12454 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
))
12455 tp
->tg3_flags
|= TG3_FLAG_WOL_ENABLE
;
12457 if (cfg2
& (1 << 17))
12458 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
12460 /* serdes signal pre-emphasis in register 0x590 set by */
12461 /* bootcode if bit 18 is set */
12462 if (cfg2
& (1 << 18))
12463 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
12465 if (((tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) ||
12466 ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
12467 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
))) &&
12468 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
12469 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
12471 if ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
12472 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
12473 !(tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
)) {
12476 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
12477 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
12478 tp
->tg3_flags
|= TG3_FLAG_ASPM_WORKAROUND
;
12481 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
12482 tp
->tg3_flags3
|= TG3_FLG3_RGMII_INBAND_DISABLE
;
12483 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
12484 tp
->tg3_flags3
|= TG3_FLG3_RGMII_EXT_IBND_RX_EN
;
12485 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
12486 tp
->tg3_flags3
|= TG3_FLG3_RGMII_EXT_IBND_TX_EN
;
12489 if (tp
->tg3_flags
& TG3_FLAG_WOL_CAP
)
12490 device_set_wakeup_enable(&tp
->pdev
->dev
,
12491 tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
);
12493 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
12496 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
12501 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
12502 tw32(OTP_CTRL
, cmd
);
12504 /* Wait for up to 1 ms for command to execute. */
12505 for (i
= 0; i
< 100; i
++) {
12506 val
= tr32(OTP_STATUS
);
12507 if (val
& OTP_STATUS_CMD_DONE
)
12512 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
12515 /* Read the gphy configuration from the OTP region of the chip. The gphy
12516 * configuration is a 32-bit value that straddles the alignment boundary.
12517 * We do two 32-bit reads and then shift and merge the results.
12519 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
12521 u32 bhalf_otp
, thalf_otp
;
12523 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
12525 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
12528 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
12530 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
12533 thalf_otp
= tr32(OTP_READ_DATA
);
12535 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
12537 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
12540 bhalf_otp
= tr32(OTP_READ_DATA
);
12542 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
12545 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
12547 u32 adv
= ADVERTISED_Autoneg
|
12550 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12551 adv
|= ADVERTISED_1000baseT_Half
|
12552 ADVERTISED_1000baseT_Full
;
12554 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
12555 adv
|= ADVERTISED_100baseT_Half
|
12556 ADVERTISED_100baseT_Full
|
12557 ADVERTISED_10baseT_Half
|
12558 ADVERTISED_10baseT_Full
|
12561 adv
|= ADVERTISED_FIBRE
;
12563 tp
->link_config
.advertising
= adv
;
12564 tp
->link_config
.speed
= SPEED_INVALID
;
12565 tp
->link_config
.duplex
= DUPLEX_INVALID
;
12566 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
12567 tp
->link_config
.active_speed
= SPEED_INVALID
;
12568 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
12569 tp
->link_config
.orig_speed
= SPEED_INVALID
;
12570 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
12571 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
12574 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
12576 u32 hw_phy_id_1
, hw_phy_id_2
;
12577 u32 hw_phy_id
, hw_phy_id_masked
;
12580 /* flow control autonegotiation is default behavior */
12581 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
12582 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
12584 if (tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
)
12585 return tg3_phy_init(tp
);
12587 /* Reading the PHY ID register can conflict with ASF
12588 * firmware access to the PHY hardware.
12591 if ((tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) ||
12592 (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)) {
12593 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
12595 /* Now read the physical PHY_ID from the chip and verify
12596 * that it is sane. If it doesn't look good, we fall back
12597 * to either the hard-coded table based PHY_ID and failing
12598 * that the value found in the eeprom area.
12600 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
12601 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
12603 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
12604 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
12605 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
12607 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
12610 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
12611 tp
->phy_id
= hw_phy_id
;
12612 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
12613 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12615 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
12617 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
12618 /* Do nothing, phy ID already set up in
12619 * tg3_get_eeprom_hw_cfg().
12622 struct subsys_tbl_ent
*p
;
12624 /* No eeprom signature? Try the hardcoded
12625 * subsys device table.
12627 p
= tg3_lookup_by_subsys(tp
);
12631 tp
->phy_id
= p
->phy_id
;
12633 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
12634 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12638 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
12639 ((tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
12640 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
12641 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
12642 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
12643 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
12645 tg3_phy_init_link_config(tp
);
12647 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
12648 !(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
) &&
12649 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
12650 u32 bmsr
, adv_reg
, tg3_ctrl
, mask
;
12652 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
12653 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
12654 (bmsr
& BMSR_LSTATUS
))
12655 goto skip_phy_reset
;
12657 err
= tg3_phy_reset(tp
);
12661 adv_reg
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
12662 ADVERTISE_100HALF
| ADVERTISE_100FULL
|
12663 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
12665 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
12666 tg3_ctrl
= (MII_TG3_CTRL_ADV_1000_HALF
|
12667 MII_TG3_CTRL_ADV_1000_FULL
);
12668 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
12669 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
12670 tg3_ctrl
|= (MII_TG3_CTRL_AS_MASTER
|
12671 MII_TG3_CTRL_ENABLE_AS_MASTER
);
12674 mask
= (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
12675 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
12676 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
);
12677 if (!tg3_copper_is_advertising_all(tp
, mask
)) {
12678 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
12680 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12681 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
12683 tg3_writephy(tp
, MII_BMCR
,
12684 BMCR_ANENABLE
| BMCR_ANRESTART
);
12686 tg3_phy_set_wirespeed(tp
);
12688 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
12689 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12690 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
12694 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
12695 err
= tg3_init_5401phy_dsp(tp
);
12699 err
= tg3_init_5401phy_dsp(tp
);
12705 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
12708 unsigned int block_end
, rosize
, len
;
12712 if ((tp
->tg3_flags3
& TG3_FLG3_NO_NVRAM
) ||
12713 tg3_nvram_read(tp
, 0x0, &magic
))
12716 vpd_data
= kmalloc(TG3_NVM_VPD_LEN
, GFP_KERNEL
);
12720 if (magic
== TG3_EEPROM_MAGIC
) {
12721 for (i
= 0; i
< TG3_NVM_VPD_LEN
; i
+= 4) {
12724 /* The data is in little-endian format in NVRAM.
12725 * Use the big-endian read routines to preserve
12726 * the byte order as it exists in NVRAM.
12728 if (tg3_nvram_read_be32(tp
, TG3_NVM_VPD_OFF
+ i
, &tmp
))
12729 goto out_not_found
;
12731 memcpy(&vpd_data
[i
], &tmp
, sizeof(tmp
));
12735 unsigned int pos
= 0;
12737 for (; pos
< TG3_NVM_VPD_LEN
&& i
< 3; i
++, pos
+= cnt
) {
12738 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12739 TG3_NVM_VPD_LEN
- pos
,
12741 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12744 goto out_not_found
;
12746 if (pos
!= TG3_NVM_VPD_LEN
)
12747 goto out_not_found
;
12750 i
= pci_vpd_find_tag(vpd_data
, 0, TG3_NVM_VPD_LEN
,
12751 PCI_VPD_LRDT_RO_DATA
);
12753 goto out_not_found
;
12755 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
12756 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
12757 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12759 if (block_end
> TG3_NVM_VPD_LEN
)
12760 goto out_not_found
;
12762 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
12763 PCI_VPD_RO_KEYWORD_MFR_ID
);
12765 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
12767 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12768 if (j
+ len
> block_end
|| len
!= 4 ||
12769 memcmp(&vpd_data
[j
], "1028", 4))
12772 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
12773 PCI_VPD_RO_KEYWORD_VENDOR0
);
12777 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
12779 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12780 if (j
+ len
> block_end
)
12783 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
12784 strncat(tp
->fw_ver
, " bc ", TG3_NVM_VPD_LEN
- len
- 1);
12788 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
12789 PCI_VPD_RO_KEYWORD_PARTNO
);
12791 goto out_not_found
;
12793 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
12795 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12796 if (len
> TG3_BPN_SIZE
||
12797 (len
+ i
) > TG3_NVM_VPD_LEN
)
12798 goto out_not_found
;
12800 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
12804 if (tp
->board_part_number
[0])
12808 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
12809 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
12810 strcpy(tp
->board_part_number
, "BCM5717");
12811 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
12812 strcpy(tp
->board_part_number
, "BCM5718");
12815 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
12816 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
12817 strcpy(tp
->board_part_number
, "BCM57780");
12818 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
12819 strcpy(tp
->board_part_number
, "BCM57760");
12820 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
12821 strcpy(tp
->board_part_number
, "BCM57790");
12822 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
12823 strcpy(tp
->board_part_number
, "BCM57788");
12826 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
12827 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
12828 strcpy(tp
->board_part_number
, "BCM57761");
12829 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
12830 strcpy(tp
->board_part_number
, "BCM57765");
12831 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
12832 strcpy(tp
->board_part_number
, "BCM57781");
12833 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
12834 strcpy(tp
->board_part_number
, "BCM57785");
12835 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
12836 strcpy(tp
->board_part_number
, "BCM57791");
12837 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
12838 strcpy(tp
->board_part_number
, "BCM57795");
12841 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
12842 strcpy(tp
->board_part_number
, "BCM95906");
12845 strcpy(tp
->board_part_number
, "none");
12849 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
12853 if (tg3_nvram_read(tp
, offset
, &val
) ||
12854 (val
& 0xfc000000) != 0x0c000000 ||
12855 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
12862 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
12864 u32 val
, offset
, start
, ver_offset
;
12866 bool newver
= false;
12868 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
12869 tg3_nvram_read(tp
, 0x4, &start
))
12872 offset
= tg3_nvram_logical_addr(tp
, offset
);
12874 if (tg3_nvram_read(tp
, offset
, &val
))
12877 if ((val
& 0xfc000000) == 0x0c000000) {
12878 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
12885 dst_off
= strlen(tp
->fw_ver
);
12888 if (TG3_VER_SIZE
- dst_off
< 16 ||
12889 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
12892 offset
= offset
+ ver_offset
- start
;
12893 for (i
= 0; i
< 16; i
+= 4) {
12895 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
12898 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
12903 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
12906 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
12907 TG3_NVM_BCVER_MAJSFT
;
12908 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
12909 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
12910 "v%d.%02d", major
, minor
);
12914 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
12916 u32 val
, major
, minor
;
12918 /* Use native endian representation */
12919 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
12922 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
12923 TG3_NVM_HWSB_CFG1_MAJSFT
;
12924 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
12925 TG3_NVM_HWSB_CFG1_MINSFT
;
12927 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
12930 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
12932 u32 offset
, major
, minor
, build
;
12934 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
12936 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
12939 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
12940 case TG3_EEPROM_SB_REVISION_0
:
12941 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
12943 case TG3_EEPROM_SB_REVISION_2
:
12944 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
12946 case TG3_EEPROM_SB_REVISION_3
:
12947 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
12949 case TG3_EEPROM_SB_REVISION_4
:
12950 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
12952 case TG3_EEPROM_SB_REVISION_5
:
12953 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
12955 case TG3_EEPROM_SB_REVISION_6
:
12956 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
12962 if (tg3_nvram_read(tp
, offset
, &val
))
12965 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
12966 TG3_EEPROM_SB_EDH_BLD_SHFT
;
12967 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
12968 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
12969 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
12971 if (minor
> 99 || build
> 26)
12974 offset
= strlen(tp
->fw_ver
);
12975 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
12976 " v%d.%02d", major
, minor
);
12979 offset
= strlen(tp
->fw_ver
);
12980 if (offset
< TG3_VER_SIZE
- 1)
12981 tp
->fw_ver
[offset
] = 'a' + build
- 1;
12985 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
12987 u32 val
, offset
, start
;
12990 for (offset
= TG3_NVM_DIR_START
;
12991 offset
< TG3_NVM_DIR_END
;
12992 offset
+= TG3_NVM_DIRENT_SIZE
) {
12993 if (tg3_nvram_read(tp
, offset
, &val
))
12996 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
13000 if (offset
== TG3_NVM_DIR_END
)
13003 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
13004 start
= 0x08000000;
13005 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
13008 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
13009 !tg3_fw_img_is_valid(tp
, offset
) ||
13010 tg3_nvram_read(tp
, offset
+ 8, &val
))
13013 offset
+= val
- start
;
13015 vlen
= strlen(tp
->fw_ver
);
13017 tp
->fw_ver
[vlen
++] = ',';
13018 tp
->fw_ver
[vlen
++] = ' ';
13020 for (i
= 0; i
< 4; i
++) {
13022 if (tg3_nvram_read_be32(tp
, offset
, &v
))
13025 offset
+= sizeof(v
);
13027 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
13028 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
13032 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
13037 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
13043 if (!(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
) ||
13044 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
13047 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
13048 if (apedata
!= APE_SEG_SIG_MAGIC
)
13051 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
13052 if (!(apedata
& APE_FW_STATUS_READY
))
13055 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
13057 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
) {
13058 tp
->tg3_flags3
|= TG3_FLG3_APE_HAS_NCSI
;
13064 vlen
= strlen(tp
->fw_ver
);
13066 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
13068 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
13069 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
13070 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
13071 (apedata
& APE_FW_VERSION_BLDMSK
));
13074 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
13077 bool vpd_vers
= false;
13079 if (tp
->fw_ver
[0] != 0)
13082 if (tp
->tg3_flags3
& TG3_FLG3_NO_NVRAM
) {
13083 strcat(tp
->fw_ver
, "sb");
13087 if (tg3_nvram_read(tp
, 0, &val
))
13090 if (val
== TG3_EEPROM_MAGIC
)
13091 tg3_read_bc_ver(tp
);
13092 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
13093 tg3_read_sb_ver(tp
, val
);
13094 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
13095 tg3_read_hwsb_ver(tp
);
13099 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) ||
13100 (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
) || vpd_vers
)
13103 tg3_read_mgmtfw_ver(tp
);
13106 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
13109 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*);
13111 static inline void vlan_features_add(struct net_device
*dev
, unsigned long flags
)
13113 dev
->vlan_features
|= flags
;
13116 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
13118 if (tp
->tg3_flags3
& TG3_FLG3_LRG_PROD_RING_CAP
)
13119 return TG3_RX_RET_MAX_SIZE_5717
;
13120 else if ((tp
->tg3_flags
& TG3_FLAG_JUMBO_CAPABLE
) &&
13121 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
13122 return TG3_RX_RET_MAX_SIZE_5700
;
13124 return TG3_RX_RET_MAX_SIZE_5705
;
13127 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
13128 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
13129 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
13130 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
13134 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
13137 u32 pci_state_reg
, grc_misc_cfg
;
13142 /* Force memory write invalidate off. If we leave it on,
13143 * then on 5700_BX chips we have to enable a workaround.
13144 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13145 * to match the cacheline size. The Broadcom driver have this
13146 * workaround but turns MWI off all the times so never uses
13147 * it. This seems to suggest that the workaround is insufficient.
13149 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13150 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
13151 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13153 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13154 * has the register indirect write enable bit set before
13155 * we try to access any of the MMIO registers. It is also
13156 * critical that the PCI-X hw workaround situation is decided
13157 * before that as well.
13159 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13162 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
13163 MISC_HOST_CTRL_CHIPREV_SHIFT
);
13164 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
13165 u32 prod_id_asic_rev
;
13167 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
13168 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
13169 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
)
13170 pci_read_config_dword(tp
->pdev
,
13171 TG3PCI_GEN2_PRODID_ASICREV
,
13172 &prod_id_asic_rev
);
13173 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
13174 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
13175 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
13176 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
13177 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
13178 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13179 pci_read_config_dword(tp
->pdev
,
13180 TG3PCI_GEN15_PRODID_ASICREV
,
13181 &prod_id_asic_rev
);
13183 pci_read_config_dword(tp
->pdev
, TG3PCI_PRODID_ASICREV
,
13184 &prod_id_asic_rev
);
13186 tp
->pci_chip_rev_id
= prod_id_asic_rev
;
13189 /* Wrong chip ID in 5752 A0. This code can be removed later
13190 * as A0 is not in production.
13192 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
13193 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
13195 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13196 * we need to disable memory and use config. cycles
13197 * only to access all registers. The 5702/03 chips
13198 * can mistakenly decode the special cycles from the
13199 * ICH chipsets as memory write cycles, causing corruption
13200 * of register and memory space. Only certain ICH bridges
13201 * will drive special cycles with non-zero data during the
13202 * address phase which can fall within the 5703's address
13203 * range. This is not an ICH bug as the PCI spec allows
13204 * non-zero address during special cycles. However, only
13205 * these ICH bridges are known to drive non-zero addresses
13206 * during special cycles.
13208 * Since special cycles do not cross PCI bridges, we only
13209 * enable this workaround if the 5703 is on the secondary
13210 * bus of these ICH bridges.
13212 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
13213 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
13214 static struct tg3_dev_id
{
13218 } ich_chipsets
[] = {
13219 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
13221 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
13223 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
13225 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
13229 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
13230 struct pci_dev
*bridge
= NULL
;
13232 while (pci_id
->vendor
!= 0) {
13233 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
13239 if (pci_id
->rev
!= PCI_ANY_ID
) {
13240 if (bridge
->revision
> pci_id
->rev
)
13243 if (bridge
->subordinate
&&
13244 (bridge
->subordinate
->number
==
13245 tp
->pdev
->bus
->number
)) {
13247 tp
->tg3_flags2
|= TG3_FLG2_ICH_WORKAROUND
;
13248 pci_dev_put(bridge
);
13254 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
13255 static struct tg3_dev_id
{
13258 } bridge_chipsets
[] = {
13259 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
13260 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
13263 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
13264 struct pci_dev
*bridge
= NULL
;
13266 while (pci_id
->vendor
!= 0) {
13267 bridge
= pci_get_device(pci_id
->vendor
,
13274 if (bridge
->subordinate
&&
13275 (bridge
->subordinate
->number
<=
13276 tp
->pdev
->bus
->number
) &&
13277 (bridge
->subordinate
->subordinate
>=
13278 tp
->pdev
->bus
->number
)) {
13279 tp
->tg3_flags3
|= TG3_FLG3_5701_DMA_BUG
;
13280 pci_dev_put(bridge
);
13286 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13287 * DMA addresses > 40-bit. This bridge may have other additional
13288 * 57xx devices behind it in some 4-port NIC designs for example.
13289 * Any tg3 device found behind the bridge will also need the 40-bit
13292 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
13293 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
13294 tp
->tg3_flags2
|= TG3_FLG2_5780_CLASS
;
13295 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
13296 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
13298 struct pci_dev
*bridge
= NULL
;
13301 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
13302 PCI_DEVICE_ID_SERVERWORKS_EPB
,
13304 if (bridge
&& bridge
->subordinate
&&
13305 (bridge
->subordinate
->number
<=
13306 tp
->pdev
->bus
->number
) &&
13307 (bridge
->subordinate
->subordinate
>=
13308 tp
->pdev
->bus
->number
)) {
13309 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
13310 pci_dev_put(bridge
);
13316 /* Initialize misc host control in PCI block. */
13317 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
13318 MISC_HOST_CTRL_CHIPREV
);
13319 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13320 tp
->misc_host_ctrl
);
13322 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
13323 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
||
13324 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
13325 tp
->pdev_peer
= tg3_find_peer(tp
);
13327 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13328 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
13329 tp
->tg3_flags3
|= TG3_FLG3_5717_PLUS
;
13331 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
13332 (tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
))
13333 tp
->tg3_flags3
|= TG3_FLG3_57765_PLUS
;
13335 /* Intentionally exclude ASIC_REV_5906 */
13336 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13337 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13338 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13339 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13340 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13341 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13342 (tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
))
13343 tp
->tg3_flags3
|= TG3_FLG3_5755_PLUS
;
13345 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
13346 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
13347 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
13348 (tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
) ||
13349 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
13350 tp
->tg3_flags2
|= TG3_FLG2_5750_PLUS
;
13352 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) ||
13353 (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
13354 tp
->tg3_flags2
|= TG3_FLG2_5705_PLUS
;
13356 /* 5700 B0 chips do not support checksumming correctly due
13357 * to hardware bugs.
13359 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5700_B0
)
13360 tp
->tg3_flags
|= TG3_FLAG_BROKEN_CHECKSUMS
;
13362 unsigned long features
= NETIF_F_IP_CSUM
| NETIF_F_SG
| NETIF_F_GRO
;
13364 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
13365 if (tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
)
13366 features
|= NETIF_F_IPV6_CSUM
;
13367 tp
->dev
->features
|= features
;
13368 vlan_features_add(tp
->dev
, features
);
13371 /* Determine TSO capabilities */
13372 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
13373 ; /* Do nothing. HW bug. */
13374 else if (tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
)
13375 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO_3
;
13376 else if ((tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
) ||
13377 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13378 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO_2
;
13379 else if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
13380 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO_1
| TG3_FLG2_TSO_BUG
;
13381 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
13382 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
13383 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_BUG
;
13384 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13385 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13386 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
13387 tp
->tg3_flags2
|= TG3_FLG2_TSO_BUG
;
13388 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
13389 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
13391 tp
->fw_needed
= FIRMWARE_TG3TSO
;
13396 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
13397 tp
->tg3_flags
|= TG3_FLAG_SUPPORT_MSI
;
13398 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
13399 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
13400 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
13401 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
13402 tp
->pdev_peer
== tp
->pdev
))
13403 tp
->tg3_flags
&= ~TG3_FLAG_SUPPORT_MSI
;
13405 if ((tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
) ||
13406 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13407 tp
->tg3_flags2
|= TG3_FLG2_1SHOT_MSI
;
13410 if (tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) {
13411 tp
->tg3_flags
|= TG3_FLAG_SUPPORT_MSIX
;
13412 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
13416 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13417 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13418 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13419 tp
->tg3_flags3
|= TG3_FLG3_SHORT_DMA_BUG
;
13420 else if (!(tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
)) {
13421 tp
->tg3_flags3
|= TG3_FLG3_4G_DMA_BNDRY_BUG
;
13422 tp
->tg3_flags3
|= TG3_FLG3_40BIT_DMA_LIMIT_BUG
;
13425 if (tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
)
13426 tp
->tg3_flags3
|= TG3_FLG3_LRG_PROD_RING_CAP
;
13428 if ((tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) &&
13429 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5719
)
13430 tp
->tg3_flags3
|= TG3_FLG3_USE_JUMBO_BDFLAG
;
13432 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) ||
13433 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) ||
13434 (tp
->tg3_flags3
& TG3_FLG3_USE_JUMBO_BDFLAG
))
13435 tp
->tg3_flags
|= TG3_FLAG_JUMBO_CAPABLE
;
13437 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
13440 tp
->pcie_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_EXP
);
13441 if (tp
->pcie_cap
!= 0) {
13444 tp
->tg3_flags2
|= TG3_FLG2_PCI_EXPRESS
;
13446 tp
->pcie_readrq
= 4096;
13447 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
13448 tp
->pcie_readrq
= 2048;
13450 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
13452 pci_read_config_word(tp
->pdev
,
13453 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
13455 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
13456 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13457 tp
->tg3_flags2
&= ~TG3_FLG2_HW_TSO_2
;
13458 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13459 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13460 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
13461 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
13462 tp
->tg3_flags3
|= TG3_FLG3_CLKREQ_BUG
;
13463 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
13464 tp
->tg3_flags3
|= TG3_FLG3_L1PLLPD_EN
;
13466 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
13467 tp
->tg3_flags2
|= TG3_FLG2_PCI_EXPRESS
;
13468 } else if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) ||
13469 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
13470 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
13471 if (!tp
->pcix_cap
) {
13472 dev_err(&tp
->pdev
->dev
,
13473 "Cannot find PCI-X capability, aborting\n");
13477 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
13478 tp
->tg3_flags
|= TG3_FLAG_PCIX_MODE
;
13481 /* If we have an AMD 762 or VIA K8T800 chipset, write
13482 * reordering to the mailbox registers done by the host
13483 * controller can cause major troubles. We read back from
13484 * every mailbox register write to force the writes to be
13485 * posted to the chip in order.
13487 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
13488 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
13489 tp
->tg3_flags
|= TG3_FLAG_MBOX_WRITE_REORDER
;
13491 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
13492 &tp
->pci_cacheline_sz
);
13493 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
13494 &tp
->pci_lat_timer
);
13495 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
13496 tp
->pci_lat_timer
< 64) {
13497 tp
->pci_lat_timer
= 64;
13498 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
13499 tp
->pci_lat_timer
);
13502 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
13503 /* 5700 BX chips need to have their TX producer index
13504 * mailboxes written twice to workaround a bug.
13506 tp
->tg3_flags
|= TG3_FLAG_TXD_MBOX_HWBUG
;
13508 /* If we are in PCI-X mode, enable register write workaround.
13510 * The workaround is to use indirect register accesses
13511 * for all chip writes not to mailbox registers.
13513 if (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) {
13516 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
13518 /* The chip can have it's power management PCI config
13519 * space registers clobbered due to this bug.
13520 * So explicitly force the chip into D0 here.
13522 pci_read_config_dword(tp
->pdev
,
13523 tp
->pm_cap
+ PCI_PM_CTRL
,
13525 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
13526 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
13527 pci_write_config_dword(tp
->pdev
,
13528 tp
->pm_cap
+ PCI_PM_CTRL
,
13531 /* Also, force SERR#/PERR# in PCI command. */
13532 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13533 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
13534 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13538 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
13539 tp
->tg3_flags
|= TG3_FLAG_PCI_HIGH_SPEED
;
13540 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
13541 tp
->tg3_flags
|= TG3_FLAG_PCI_32BIT
;
13543 /* Chip-specific fixup from Broadcom driver */
13544 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
13545 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
13546 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
13547 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
13550 /* Default fast path register access methods */
13551 tp
->read32
= tg3_read32
;
13552 tp
->write32
= tg3_write32
;
13553 tp
->read32_mbox
= tg3_read32
;
13554 tp
->write32_mbox
= tg3_write32
;
13555 tp
->write32_tx_mbox
= tg3_write32
;
13556 tp
->write32_rx_mbox
= tg3_write32
;
13558 /* Various workaround register access methods */
13559 if (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
)
13560 tp
->write32
= tg3_write_indirect_reg32
;
13561 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
13562 ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
13563 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
13565 * Back to back register writes can cause problems on these
13566 * chips, the workaround is to read back all reg writes
13567 * except those to mailbox regs.
13569 * See tg3_write_indirect_reg32().
13571 tp
->write32
= tg3_write_flush_reg32
;
13574 if ((tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
) ||
13575 (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)) {
13576 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
13577 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
13578 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
13581 if (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
) {
13582 tp
->read32
= tg3_read_indirect_reg32
;
13583 tp
->write32
= tg3_write_indirect_reg32
;
13584 tp
->read32_mbox
= tg3_read_indirect_mbox
;
13585 tp
->write32_mbox
= tg3_write_indirect_mbox
;
13586 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
13587 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
13592 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13593 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
13594 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13596 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13597 tp
->read32_mbox
= tg3_read32_mbox_5906
;
13598 tp
->write32_mbox
= tg3_write32_mbox_5906
;
13599 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
13600 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
13603 if (tp
->write32
== tg3_write_indirect_reg32
||
13604 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
13605 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13606 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
13607 tp
->tg3_flags
|= TG3_FLAG_SRAM_USE_CONFIG
;
13609 /* Get eeprom hw config before calling tg3_set_power_state().
13610 * In particular, the TG3_FLG2_IS_NIC flag must be
13611 * determined before calling tg3_set_power_state() so that
13612 * we know whether or not to switch out of Vaux power.
13613 * When the flag is set, it means that GPIO1 is used for eeprom
13614 * write protect and also implies that it is a LOM where GPIOs
13615 * are not used to switch power.
13617 tg3_get_eeprom_hw_cfg(tp
);
13619 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
) {
13620 /* Allow reads and writes to the
13621 * APE register and memory space.
13623 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
13624 PCISTATE_ALLOW_APE_SHMEM_WR
|
13625 PCISTATE_ALLOW_APE_PSPACE_WR
;
13626 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
13630 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13631 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13632 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13633 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13634 (tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
))
13635 tp
->tg3_flags
|= TG3_FLAG_CPMU_PRESENT
;
13637 /* Set up tp->grc_local_ctrl before calling tg_power_up().
13638 * GPIO1 driven high will bring 5700's external PHY out of reset.
13639 * It is also used as eeprom write protect on LOMs.
13641 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
13642 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
13643 (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
))
13644 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
13645 GRC_LCLCTRL_GPIO_OUTPUT1
);
13646 /* Unused GPIO3 must be driven as output on 5752 because there
13647 * are no pull-up resistors on unused GPIO pins.
13649 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
13650 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
13652 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13653 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13654 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
13655 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
13657 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
13658 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
13659 /* Turn off the debug UART. */
13660 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
13661 if (tp
->tg3_flags2
& TG3_FLG2_IS_NIC
)
13662 /* Keep VMain power. */
13663 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
13664 GRC_LCLCTRL_GPIO_OUTPUT0
;
13667 /* Force the chip into D0. */
13668 err
= tg3_power_up(tp
);
13670 dev_err(&tp
->pdev
->dev
, "Transition to D0 failed\n");
13674 /* Derive initial jumbo mode from MTU assigned in
13675 * ether_setup() via the alloc_etherdev() call
13677 if (tp
->dev
->mtu
> ETH_DATA_LEN
&&
13678 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
13679 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
13681 /* Determine WakeOnLan speed to use. */
13682 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13683 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
13684 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
13685 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
13686 tp
->tg3_flags
&= ~(TG3_FLAG_WOL_SPEED_100MB
);
13688 tp
->tg3_flags
|= TG3_FLAG_WOL_SPEED_100MB
;
13691 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13692 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
13694 /* A few boards don't want Ethernet@WireSpeed phy feature */
13695 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
13696 ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
13697 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
13698 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
13699 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
13700 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13701 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
13703 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
13704 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
13705 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
13706 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
13707 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
13709 if ((tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) &&
13710 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
13711 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
13712 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
13713 !(tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
)) {
13714 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13715 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13716 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13717 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
13718 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
13719 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
13720 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
13721 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
13722 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
13724 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
13727 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
13728 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
13729 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
13730 if (tp
->phy_otp
== 0)
13731 tp
->phy_otp
= TG3_OTP_DEFAULT
;
13734 if (tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
)
13735 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
13737 tp
->mi_mode
= MAC_MI_MODE_BASE
;
13739 tp
->coalesce_mode
= 0;
13740 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
13741 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
13742 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
13744 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13745 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
13746 tp
->tg3_flags3
|= TG3_FLG3_USE_PHYLIB
;
13748 err
= tg3_mdio_init(tp
);
13752 /* Initialize data/descriptor byte/word swapping. */
13753 val
= tr32(GRC_MODE
);
13754 val
&= GRC_MODE_HOST_STACKUP
;
13755 tw32(GRC_MODE
, val
| tp
->grc_mode
);
13757 tg3_switch_clocks(tp
);
13759 /* Clear this out for sanity. */
13760 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
13762 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
13764 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
13765 (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) == 0) {
13766 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
13768 if (chiprevid
== CHIPREV_ID_5701_A0
||
13769 chiprevid
== CHIPREV_ID_5701_B0
||
13770 chiprevid
== CHIPREV_ID_5701_B2
||
13771 chiprevid
== CHIPREV_ID_5701_B5
) {
13772 void __iomem
*sram_base
;
13774 /* Write some dummy words into the SRAM status block
13775 * area, see if it reads back correctly. If the return
13776 * value is bad, force enable the PCIX workaround.
13778 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
13780 writel(0x00000000, sram_base
);
13781 writel(0x00000000, sram_base
+ 4);
13782 writel(0xffffffff, sram_base
+ 4);
13783 if (readl(sram_base
) != 0x00000000)
13784 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
13789 tg3_nvram_init(tp
);
13791 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
13792 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
13794 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
13795 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
13796 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
13797 tp
->tg3_flags2
|= TG3_FLG2_IS_5788
;
13799 if (!(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
13800 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
))
13801 tp
->tg3_flags
|= TG3_FLAG_TAGGED_STATUS
;
13802 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
13803 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
13804 HOSTCC_MODE_CLRTICK_TXBD
);
13806 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
13807 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13808 tp
->misc_host_ctrl
);
13811 /* Preserve the APE MAC_MODE bits */
13812 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)
13813 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
13815 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
13817 /* these are limited to 10/100 only */
13818 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
13819 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
13820 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
13821 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
13822 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
13823 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
13824 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
13825 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
13826 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
13827 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
13828 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
13829 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
13830 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
13831 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
13832 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
13833 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
13835 err
= tg3_phy_probe(tp
);
13837 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
13838 /* ... but do not return immediately ... */
13843 tg3_read_fw_ver(tp
);
13845 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
13846 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
13848 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
13849 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
13851 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
13854 /* 5700 {AX,BX} chips have a broken status block link
13855 * change bit implementation, so we must use the
13856 * status register in those cases.
13858 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
13859 tp
->tg3_flags
|= TG3_FLAG_USE_LINKCHG_REG
;
13861 tp
->tg3_flags
&= ~TG3_FLAG_USE_LINKCHG_REG
;
13863 /* The led_ctrl is set during tg3_phy_probe, here we might
13864 * have to force the link status polling mechanism based
13865 * upon subsystem IDs.
13867 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
13868 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
13869 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
13870 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
13871 tp
->tg3_flags
|= TG3_FLAG_USE_LINKCHG_REG
;
13874 /* For all SERDES we poll the MAC status register. */
13875 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13876 tp
->tg3_flags
|= TG3_FLAG_POLL_SERDES
;
13878 tp
->tg3_flags
&= ~TG3_FLAG_POLL_SERDES
;
13880 tp
->rx_offset
= NET_IP_ALIGN
;
13881 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
13882 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
13883 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0) {
13885 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13886 tp
->rx_copy_thresh
= ~(u16
)0;
13890 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
13891 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
13892 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
13894 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
13896 /* Increment the rx prod index on the rx std ring by at most
13897 * 8 for these chips to workaround hw errata.
13899 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
13900 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
13901 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
13902 tp
->rx_std_max_post
= 8;
13904 if (tp
->tg3_flags
& TG3_FLAG_ASPM_WORKAROUND
)
13905 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
13906 PCIE_PWR_MGMT_L1_THRESH_MSK
;
13911 #ifdef CONFIG_SPARC
13912 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
13914 struct net_device
*dev
= tp
->dev
;
13915 struct pci_dev
*pdev
= tp
->pdev
;
13916 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
13917 const unsigned char *addr
;
13920 addr
= of_get_property(dp
, "local-mac-address", &len
);
13921 if (addr
&& len
== 6) {
13922 memcpy(dev
->dev_addr
, addr
, 6);
13923 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
13929 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
13931 struct net_device
*dev
= tp
->dev
;
13933 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
13934 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
13939 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
13941 struct net_device
*dev
= tp
->dev
;
13942 u32 hi
, lo
, mac_offset
;
13945 #ifdef CONFIG_SPARC
13946 if (!tg3_get_macaddr_sparc(tp
))
13951 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
13952 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
13953 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
13955 if (tg3_nvram_lock(tp
))
13956 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
13958 tg3_nvram_unlock(tp
);
13959 } else if (tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
) {
13960 if (PCI_FUNC(tp
->pdev
->devfn
) & 1)
13962 if (PCI_FUNC(tp
->pdev
->devfn
) > 1)
13963 mac_offset
+= 0x18c;
13964 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13967 /* First try to get it from MAC address mailbox. */
13968 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
13969 if ((hi
>> 16) == 0x484b) {
13970 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
13971 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
13973 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
13974 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
13975 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
13976 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
13977 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
13979 /* Some old bootcode may report a 0 MAC address in SRAM */
13980 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
13983 /* Next, try NVRAM. */
13984 if (!(tp
->tg3_flags3
& TG3_FLG3_NO_NVRAM
) &&
13985 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
13986 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
13987 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
13988 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
13990 /* Finally just fetch it out of the MAC control regs. */
13992 hi
= tr32(MAC_ADDR_0_HIGH
);
13993 lo
= tr32(MAC_ADDR_0_LOW
);
13995 dev
->dev_addr
[5] = lo
& 0xff;
13996 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
13997 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
13998 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
13999 dev
->dev_addr
[1] = hi
& 0xff;
14000 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14004 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
14005 #ifdef CONFIG_SPARC
14006 if (!tg3_get_default_macaddr_sparc(tp
))
14011 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
14015 #define BOUNDARY_SINGLE_CACHELINE 1
14016 #define BOUNDARY_MULTI_CACHELINE 2
14018 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
14020 int cacheline_size
;
14024 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
14026 cacheline_size
= 1024;
14028 cacheline_size
= (int) byte
* 4;
14030 /* On 5703 and later chips, the boundary bits have no
14033 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14034 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14035 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
14038 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14039 goal
= BOUNDARY_MULTI_CACHELINE
;
14041 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14042 goal
= BOUNDARY_SINGLE_CACHELINE
;
14048 if (tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) {
14049 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
14056 /* PCI controllers on most RISC systems tend to disconnect
14057 * when a device tries to burst across a cache-line boundary.
14058 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14060 * Unfortunately, for PCI-E there are only limited
14061 * write-side controls for this, and thus for reads
14062 * we will still get the disconnects. We'll also waste
14063 * these PCI cycles for both read and write for chips
14064 * other than 5700 and 5701 which do not implement the
14067 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
14068 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
14069 switch (cacheline_size
) {
14074 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14075 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
14076 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
14078 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14079 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14084 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
14085 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
14089 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14090 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14093 } else if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
14094 switch (cacheline_size
) {
14098 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14099 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14100 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
14106 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14107 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
14111 switch (cacheline_size
) {
14113 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14114 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
14115 DMA_RWCTRL_WRITE_BNDRY_16
);
14120 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14121 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
14122 DMA_RWCTRL_WRITE_BNDRY_32
);
14127 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14128 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
14129 DMA_RWCTRL_WRITE_BNDRY_64
);
14134 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14135 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
14136 DMA_RWCTRL_WRITE_BNDRY_128
);
14141 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
14142 DMA_RWCTRL_WRITE_BNDRY_256
);
14145 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
14146 DMA_RWCTRL_WRITE_BNDRY_512
);
14150 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
14151 DMA_RWCTRL_WRITE_BNDRY_1024
);
14160 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
14162 struct tg3_internal_buffer_desc test_desc
;
14163 u32 sram_dma_descs
;
14166 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
14168 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
14169 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
14170 tw32(RDMAC_STATUS
, 0);
14171 tw32(WDMAC_STATUS
, 0);
14173 tw32(BUFMGR_MODE
, 0);
14174 tw32(FTQ_RESET
, 0);
14176 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
14177 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
14178 test_desc
.nic_mbuf
= 0x00002100;
14179 test_desc
.len
= size
;
14182 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14183 * the *second* time the tg3 driver was getting loaded after an
14186 * Broadcom tells me:
14187 * ...the DMA engine is connected to the GRC block and a DMA
14188 * reset may affect the GRC block in some unpredictable way...
14189 * The behavior of resets to individual blocks has not been tested.
14191 * Broadcom noted the GRC reset will also reset all sub-components.
14194 test_desc
.cqid_sqid
= (13 << 8) | 2;
14196 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
14199 test_desc
.cqid_sqid
= (16 << 8) | 7;
14201 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
14204 test_desc
.flags
= 0x00000005;
14206 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
14209 val
= *(((u32
*)&test_desc
) + i
);
14210 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
14211 sram_dma_descs
+ (i
* sizeof(u32
)));
14212 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
14214 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14217 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
14219 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
14222 for (i
= 0; i
< 40; i
++) {
14226 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
14228 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
14229 if ((val
& 0xffff) == sram_dma_descs
) {
14240 #define TEST_BUFFER_SIZE 0x2000
14242 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
14243 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
14247 static int __devinit
tg3_test_dma(struct tg3
*tp
)
14249 dma_addr_t buf_dma
;
14250 u32
*buf
, saved_dma_rwctrl
;
14253 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
14254 &buf_dma
, GFP_KERNEL
);
14260 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
14261 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
14263 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
14265 if (tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
)
14268 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
14269 /* DMA read watermark not used on PCIE */
14270 tp
->dma_rwctrl
|= 0x00180000;
14271 } else if (!(tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
14272 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14273 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
14274 tp
->dma_rwctrl
|= 0x003f0000;
14276 tp
->dma_rwctrl
|= 0x003f000f;
14278 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14279 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
14280 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
14281 u32 read_water
= 0x7;
14283 /* If the 5704 is behind the EPB bridge, we can
14284 * do the less restrictive ONE_DMA workaround for
14285 * better performance.
14287 if ((tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) &&
14288 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14289 tp
->dma_rwctrl
|= 0x8000;
14290 else if (ccval
== 0x6 || ccval
== 0x7)
14291 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
14293 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
14295 /* Set bit 23 to enable PCIX hw bug fix */
14297 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
14298 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
14300 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
14301 /* 5780 always in PCIX mode */
14302 tp
->dma_rwctrl
|= 0x00144000;
14303 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
14304 /* 5714 always in PCIX mode */
14305 tp
->dma_rwctrl
|= 0x00148000;
14307 tp
->dma_rwctrl
|= 0x001b000f;
14311 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14312 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14313 tp
->dma_rwctrl
&= 0xfffffff0;
14315 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14316 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
14317 /* Remove this if it causes problems for some boards. */
14318 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
14320 /* On 5700/5701 chips, we need to set this bit.
14321 * Otherwise the chip will issue cacheline transactions
14322 * to streamable DMA memory with not all the byte
14323 * enables turned on. This is an error on several
14324 * RISC PCI controllers, in particular sparc64.
14326 * On 5703/5704 chips, this bit has been reassigned
14327 * a different meaning. In particular, it is used
14328 * on those chips to enable a PCI-X workaround.
14330 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
14333 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14336 /* Unneeded, already done by tg3_get_invariants. */
14337 tg3_switch_clocks(tp
);
14340 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14341 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
14344 /* It is best to perform DMA test with maximum write burst size
14345 * to expose the 5700/5701 write DMA bug.
14347 saved_dma_rwctrl
= tp
->dma_rwctrl
;
14348 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14349 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14354 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
14357 /* Send the buffer to the chip. */
14358 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
14360 dev_err(&tp
->pdev
->dev
,
14361 "%s: Buffer write failed. err = %d\n",
14367 /* validate data reached card RAM correctly. */
14368 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14370 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
14371 if (le32_to_cpu(val
) != p
[i
]) {
14372 dev_err(&tp
->pdev
->dev
,
14373 "%s: Buffer corrupted on device! "
14374 "(%d != %d)\n", __func__
, val
, i
);
14375 /* ret = -ENODEV here? */
14380 /* Now read it back. */
14381 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
14383 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
14384 "err = %d\n", __func__
, ret
);
14389 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14393 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14394 DMA_RWCTRL_WRITE_BNDRY_16
) {
14395 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14396 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14397 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14400 dev_err(&tp
->pdev
->dev
,
14401 "%s: Buffer corrupted on read back! "
14402 "(%d != %d)\n", __func__
, p
[i
], i
);
14408 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
14414 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14415 DMA_RWCTRL_WRITE_BNDRY_16
) {
14417 /* DMA test passed without adjusting DMA boundary,
14418 * now look for chipsets that are known to expose the
14419 * DMA bug without failing the test.
14421 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
14422 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14423 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14425 /* Safe to use the calculated DMA boundary. */
14426 tp
->dma_rwctrl
= saved_dma_rwctrl
;
14429 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14433 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
14438 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
14440 if (tp
->tg3_flags3
& TG3_FLG3_57765_PLUS
) {
14441 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14442 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14443 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14444 DEFAULT_MB_MACRX_LOW_WATER_57765
;
14445 tp
->bufmgr_config
.mbuf_high_water
=
14446 DEFAULT_MB_HIGH_WATER_57765
;
14448 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14449 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14450 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14451 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
14452 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14453 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
14454 } else if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
14455 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14456 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14457 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14458 DEFAULT_MB_MACRX_LOW_WATER_5705
;
14459 tp
->bufmgr_config
.mbuf_high_water
=
14460 DEFAULT_MB_HIGH_WATER_5705
;
14461 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14462 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14463 DEFAULT_MB_MACRX_LOW_WATER_5906
;
14464 tp
->bufmgr_config
.mbuf_high_water
=
14465 DEFAULT_MB_HIGH_WATER_5906
;
14468 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14469 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
14470 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14471 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
14472 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14473 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
14475 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14476 DEFAULT_MB_RDMA_LOW_WATER
;
14477 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14478 DEFAULT_MB_MACRX_LOW_WATER
;
14479 tp
->bufmgr_config
.mbuf_high_water
=
14480 DEFAULT_MB_HIGH_WATER
;
14482 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14483 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
14484 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14485 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
14486 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14487 DEFAULT_MB_HIGH_WATER_JUMBO
;
14490 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
14491 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
14494 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
14496 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
14497 case TG3_PHY_ID_BCM5400
: return "5400";
14498 case TG3_PHY_ID_BCM5401
: return "5401";
14499 case TG3_PHY_ID_BCM5411
: return "5411";
14500 case TG3_PHY_ID_BCM5701
: return "5701";
14501 case TG3_PHY_ID_BCM5703
: return "5703";
14502 case TG3_PHY_ID_BCM5704
: return "5704";
14503 case TG3_PHY_ID_BCM5705
: return "5705";
14504 case TG3_PHY_ID_BCM5750
: return "5750";
14505 case TG3_PHY_ID_BCM5752
: return "5752";
14506 case TG3_PHY_ID_BCM5714
: return "5714";
14507 case TG3_PHY_ID_BCM5780
: return "5780";
14508 case TG3_PHY_ID_BCM5755
: return "5755";
14509 case TG3_PHY_ID_BCM5787
: return "5787";
14510 case TG3_PHY_ID_BCM5784
: return "5784";
14511 case TG3_PHY_ID_BCM5756
: return "5722/5756";
14512 case TG3_PHY_ID_BCM5906
: return "5906";
14513 case TG3_PHY_ID_BCM5761
: return "5761";
14514 case TG3_PHY_ID_BCM5718C
: return "5718C";
14515 case TG3_PHY_ID_BCM5718S
: return "5718S";
14516 case TG3_PHY_ID_BCM57765
: return "57765";
14517 case TG3_PHY_ID_BCM5719C
: return "5719C";
14518 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
14519 case 0: return "serdes";
14520 default: return "unknown";
14524 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
14526 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
14527 strcpy(str
, "PCI Express");
14529 } else if (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) {
14530 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
14532 strcpy(str
, "PCIX:");
14534 if ((clock_ctrl
== 7) ||
14535 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
14536 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
14537 strcat(str
, "133MHz");
14538 else if (clock_ctrl
== 0)
14539 strcat(str
, "33MHz");
14540 else if (clock_ctrl
== 2)
14541 strcat(str
, "50MHz");
14542 else if (clock_ctrl
== 4)
14543 strcat(str
, "66MHz");
14544 else if (clock_ctrl
== 6)
14545 strcat(str
, "100MHz");
14547 strcpy(str
, "PCI:");
14548 if (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
)
14549 strcat(str
, "66MHz");
14551 strcat(str
, "33MHz");
14553 if (tp
->tg3_flags
& TG3_FLAG_PCI_32BIT
)
14554 strcat(str
, ":32-bit");
14556 strcat(str
, ":64-bit");
14560 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
14562 struct pci_dev
*peer
;
14563 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
14565 for (func
= 0; func
< 8; func
++) {
14566 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
14567 if (peer
&& peer
!= tp
->pdev
)
14571 /* 5704 can be configured in single-port mode, set peer to
14572 * tp->pdev in that case.
14580 * We don't need to keep the refcount elevated; there's no way
14581 * to remove one half of this device without removing the other
14588 static void __devinit
tg3_init_coal(struct tg3
*tp
)
14590 struct ethtool_coalesce
*ec
= &tp
->coal
;
14592 memset(ec
, 0, sizeof(*ec
));
14593 ec
->cmd
= ETHTOOL_GCOALESCE
;
14594 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
14595 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
14596 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
14597 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
14598 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
14599 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
14600 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
14601 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
14602 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
14604 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
14605 HOSTCC_MODE_CLRTICK_TXBD
)) {
14606 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
14607 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
14608 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
14609 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
14612 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
14613 ec
->rx_coalesce_usecs_irq
= 0;
14614 ec
->tx_coalesce_usecs_irq
= 0;
14615 ec
->stats_block_coalesce_usecs
= 0;
14619 static const struct net_device_ops tg3_netdev_ops
= {
14620 .ndo_open
= tg3_open
,
14621 .ndo_stop
= tg3_close
,
14622 .ndo_start_xmit
= tg3_start_xmit
,
14623 .ndo_get_stats64
= tg3_get_stats64
,
14624 .ndo_validate_addr
= eth_validate_addr
,
14625 .ndo_set_multicast_list
= tg3_set_rx_mode
,
14626 .ndo_set_mac_address
= tg3_set_mac_addr
,
14627 .ndo_do_ioctl
= tg3_ioctl
,
14628 .ndo_tx_timeout
= tg3_tx_timeout
,
14629 .ndo_change_mtu
= tg3_change_mtu
,
14630 #ifdef CONFIG_NET_POLL_CONTROLLER
14631 .ndo_poll_controller
= tg3_poll_controller
,
14635 static const struct net_device_ops tg3_netdev_ops_dma_bug
= {
14636 .ndo_open
= tg3_open
,
14637 .ndo_stop
= tg3_close
,
14638 .ndo_start_xmit
= tg3_start_xmit_dma_bug
,
14639 .ndo_get_stats64
= tg3_get_stats64
,
14640 .ndo_validate_addr
= eth_validate_addr
,
14641 .ndo_set_multicast_list
= tg3_set_rx_mode
,
14642 .ndo_set_mac_address
= tg3_set_mac_addr
,
14643 .ndo_do_ioctl
= tg3_ioctl
,
14644 .ndo_tx_timeout
= tg3_tx_timeout
,
14645 .ndo_change_mtu
= tg3_change_mtu
,
14646 #ifdef CONFIG_NET_POLL_CONTROLLER
14647 .ndo_poll_controller
= tg3_poll_controller
,
14651 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
14652 const struct pci_device_id
*ent
)
14654 struct net_device
*dev
;
14656 int i
, err
, pm_cap
;
14657 u32 sndmbx
, rcvmbx
, intmbx
;
14659 u64 dma_mask
, persist_dma_mask
;
14661 printk_once(KERN_INFO
"%s\n", version
);
14663 err
= pci_enable_device(pdev
);
14665 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
14669 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
14671 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
14672 goto err_out_disable_pdev
;
14675 pci_set_master(pdev
);
14677 /* Find power-management capability. */
14678 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
14680 dev_err(&pdev
->dev
,
14681 "Cannot find Power Management capability, aborting\n");
14683 goto err_out_free_res
;
14686 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
14688 dev_err(&pdev
->dev
, "Etherdev alloc failed, aborting\n");
14690 goto err_out_free_res
;
14693 SET_NETDEV_DEV(dev
, &pdev
->dev
);
14695 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
14697 tp
= netdev_priv(dev
);
14700 tp
->pm_cap
= pm_cap
;
14701 tp
->rx_mode
= TG3_DEF_RX_MODE
;
14702 tp
->tx_mode
= TG3_DEF_TX_MODE
;
14705 tp
->msg_enable
= tg3_debug
;
14707 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
14709 /* The word/byte swap controls here control register access byte
14710 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14713 tp
->misc_host_ctrl
=
14714 MISC_HOST_CTRL_MASK_PCI_INT
|
14715 MISC_HOST_CTRL_WORD_SWAP
|
14716 MISC_HOST_CTRL_INDIR_ACCESS
|
14717 MISC_HOST_CTRL_PCISTATE_RW
;
14719 /* The NONFRM (non-frame) byte/word swap controls take effect
14720 * on descriptor entries, anything which isn't packet data.
14722 * The StrongARM chips on the board (one for tx, one for rx)
14723 * are running in big-endian mode.
14725 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
14726 GRC_MODE_WSWAP_NONFRM_DATA
);
14727 #ifdef __BIG_ENDIAN
14728 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
14730 spin_lock_init(&tp
->lock
);
14731 spin_lock_init(&tp
->indirect_lock
);
14732 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
14734 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
14736 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
14738 goto err_out_free_dev
;
14741 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
14742 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
14744 dev
->ethtool_ops
= &tg3_ethtool_ops
;
14745 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
14746 dev
->irq
= pdev
->irq
;
14748 err
= tg3_get_invariants(tp
);
14750 dev_err(&pdev
->dev
,
14751 "Problem fetching invariants of chip, aborting\n");
14752 goto err_out_iounmap
;
14755 if ((tp
->tg3_flags3
& TG3_FLG3_5755_PLUS
) &&
14756 !(tp
->tg3_flags3
& TG3_FLG3_5717_PLUS
))
14757 dev
->netdev_ops
= &tg3_netdev_ops
;
14759 dev
->netdev_ops
= &tg3_netdev_ops_dma_bug
;
14762 /* The EPB bridge inside 5714, 5715, and 5780 and any
14763 * device behind the EPB cannot support DMA addresses > 40-bit.
14764 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14765 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14766 * do DMA address check in tg3_start_xmit().
14768 if (tp
->tg3_flags2
& TG3_FLG2_IS_5788
)
14769 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
14770 else if (tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) {
14771 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
14772 #ifdef CONFIG_HIGHMEM
14773 dma_mask
= DMA_BIT_MASK(64);
14776 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
14778 /* Configure DMA attributes. */
14779 if (dma_mask
> DMA_BIT_MASK(32)) {
14780 err
= pci_set_dma_mask(pdev
, dma_mask
);
14782 dev
->features
|= NETIF_F_HIGHDMA
;
14783 err
= pci_set_consistent_dma_mask(pdev
,
14786 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
14787 "DMA for consistent allocations\n");
14788 goto err_out_iounmap
;
14792 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
14793 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
14795 dev_err(&pdev
->dev
,
14796 "No usable DMA configuration, aborting\n");
14797 goto err_out_iounmap
;
14801 tg3_init_bufmgr_config(tp
);
14803 /* Selectively allow TSO based on operating conditions */
14804 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) ||
14805 (tp
->fw_needed
&& !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)))
14806 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
14808 tp
->tg3_flags2
&= ~(TG3_FLG2_TSO_CAPABLE
| TG3_FLG2_TSO_BUG
);
14809 tp
->fw_needed
= NULL
;
14812 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
14813 tp
->fw_needed
= FIRMWARE_TG3
;
14815 /* TSO is on by default on chips that support hardware TSO.
14816 * Firmware TSO on older chips gives lower performance, so it
14817 * is off by default, but can be enabled using ethtool.
14819 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) &&
14820 (dev
->features
& NETIF_F_IP_CSUM
)) {
14821 dev
->features
|= NETIF_F_TSO
;
14822 vlan_features_add(dev
, NETIF_F_TSO
);
14824 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO_2
) ||
14825 (tp
->tg3_flags2
& TG3_FLG2_HW_TSO_3
)) {
14826 if (dev
->features
& NETIF_F_IPV6_CSUM
) {
14827 dev
->features
|= NETIF_F_TSO6
;
14828 vlan_features_add(dev
, NETIF_F_TSO6
);
14830 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO_3
) ||
14831 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14832 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14833 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
14834 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14835 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
14836 dev
->features
|= NETIF_F_TSO_ECN
;
14837 vlan_features_add(dev
, NETIF_F_TSO_ECN
);
14841 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
14842 !(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) &&
14843 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
14844 tp
->tg3_flags2
|= TG3_FLG2_MAX_RXPEND_64
;
14845 tp
->rx_pending
= 63;
14848 err
= tg3_get_device_address(tp
);
14850 dev_err(&pdev
->dev
,
14851 "Could not obtain valid ethernet address, aborting\n");
14852 goto err_out_iounmap
;
14855 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
) {
14856 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
14857 if (!tp
->aperegs
) {
14858 dev_err(&pdev
->dev
,
14859 "Cannot map APE registers, aborting\n");
14861 goto err_out_iounmap
;
14864 tg3_ape_lock_init(tp
);
14866 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)
14867 tg3_read_dash_ver(tp
);
14871 * Reset chip in case UNDI or EFI driver did not shutdown
14872 * DMA self test will enable WDMAC and we'll see (spurious)
14873 * pending DMA on the PCI bus at that point.
14875 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
14876 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
14877 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
14878 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
14881 err
= tg3_test_dma(tp
);
14883 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
14884 goto err_out_apeunmap
;
14887 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
14888 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
14889 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
14890 for (i
= 0; i
< tp
->irq_max
; i
++) {
14891 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
14894 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
14896 tnapi
->int_mbox
= intmbx
;
14902 tnapi
->consmbox
= rcvmbx
;
14903 tnapi
->prodmbox
= sndmbx
;
14906 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
14908 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
14910 if (!(tp
->tg3_flags
& TG3_FLAG_SUPPORT_MSIX
))
14914 * If we support MSIX, we'll be using RSS. If we're using
14915 * RSS, the first vector only handles link interrupts and the
14916 * remaining vectors handle rx and tx interrupts. Reuse the
14917 * mailbox values for the next iteration. The values we setup
14918 * above are still useful for the single vectored mode.
14933 pci_set_drvdata(pdev
, dev
);
14935 err
= register_netdev(dev
);
14937 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
14938 goto err_out_apeunmap
;
14941 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14942 tp
->board_part_number
,
14943 tp
->pci_chip_rev_id
,
14944 tg3_bus_string(tp
, str
),
14947 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
14948 struct phy_device
*phydev
;
14949 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
14951 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14952 phydev
->drv
->name
, dev_name(&phydev
->dev
));
14956 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
14957 ethtype
= "10/100Base-TX";
14958 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
14959 ethtype
= "1000Base-SX";
14961 ethtype
= "10/100/1000Base-T";
14963 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
14964 "(WireSpeed[%d])\n", tg3_phy_string(tp
), ethtype
,
14965 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0);
14968 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14969 (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0,
14970 (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) != 0,
14971 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
14972 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0,
14973 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) != 0);
14974 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14976 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
14977 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
14983 iounmap(tp
->aperegs
);
14984 tp
->aperegs
= NULL
;
14997 pci_release_regions(pdev
);
14999 err_out_disable_pdev
:
15000 pci_disable_device(pdev
);
15001 pci_set_drvdata(pdev
, NULL
);
15005 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
15007 struct net_device
*dev
= pci_get_drvdata(pdev
);
15010 struct tg3
*tp
= netdev_priv(dev
);
15013 release_firmware(tp
->fw
);
15015 cancel_work_sync(&tp
->reset_task
);
15017 if (tp
->tg3_flags3
& TG3_FLG3_USE_PHYLIB
) {
15022 unregister_netdev(dev
);
15024 iounmap(tp
->aperegs
);
15025 tp
->aperegs
= NULL
;
15032 pci_release_regions(pdev
);
15033 pci_disable_device(pdev
);
15034 pci_set_drvdata(pdev
, NULL
);
15038 #ifdef CONFIG_PM_SLEEP
15039 static int tg3_suspend(struct device
*device
)
15041 struct pci_dev
*pdev
= to_pci_dev(device
);
15042 struct net_device
*dev
= pci_get_drvdata(pdev
);
15043 struct tg3
*tp
= netdev_priv(dev
);
15046 if (!netif_running(dev
))
15049 flush_work_sync(&tp
->reset_task
);
15051 tg3_netif_stop(tp
);
15053 del_timer_sync(&tp
->timer
);
15055 tg3_full_lock(tp
, 1);
15056 tg3_disable_ints(tp
);
15057 tg3_full_unlock(tp
);
15059 netif_device_detach(dev
);
15061 tg3_full_lock(tp
, 0);
15062 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15063 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
15064 tg3_full_unlock(tp
);
15066 err
= tg3_power_down_prepare(tp
);
15070 tg3_full_lock(tp
, 0);
15072 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
15073 err2
= tg3_restart_hw(tp
, 1);
15077 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15078 add_timer(&tp
->timer
);
15080 netif_device_attach(dev
);
15081 tg3_netif_start(tp
);
15084 tg3_full_unlock(tp
);
15093 static int tg3_resume(struct device
*device
)
15095 struct pci_dev
*pdev
= to_pci_dev(device
);
15096 struct net_device
*dev
= pci_get_drvdata(pdev
);
15097 struct tg3
*tp
= netdev_priv(dev
);
15100 if (!netif_running(dev
))
15103 netif_device_attach(dev
);
15105 tg3_full_lock(tp
, 0);
15107 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
15108 err
= tg3_restart_hw(tp
, 1);
15112 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15113 add_timer(&tp
->timer
);
15115 tg3_netif_start(tp
);
15118 tg3_full_unlock(tp
);
15126 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
15127 #define TG3_PM_OPS (&tg3_pm_ops)
15131 #define TG3_PM_OPS NULL
15133 #endif /* CONFIG_PM_SLEEP */
15135 static struct pci_driver tg3_driver
= {
15136 .name
= DRV_MODULE_NAME
,
15137 .id_table
= tg3_pci_tbl
,
15138 .probe
= tg3_init_one
,
15139 .remove
= __devexit_p(tg3_remove_one
),
15140 .driver
.pm
= TG3_PM_OPS
,
15143 static int __init
tg3_init(void)
15145 return pci_register_driver(&tg3_driver
);
15148 static void __exit
tg3_cleanup(void)
15150 pci_unregister_driver(&tg3_driver
);
15153 module_init(tg3_init
);
15154 module_exit(tg3_cleanup
);