tg3: Pull phy int lpbk setup into separate func
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
blob08953b0999d20505c0978430b35d7d7f0e8b7e2b
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
61 #define BAR_0 0
62 #define BAR_2 2
64 #include "tg3.h"
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 set_bit(flag, bits);
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
100 (NETIF_MSG_DRV | \
101 NETIF_MSG_PROBE | \
102 NETIF_MSG_LINK | \
103 NETIF_MSG_TIMER | \
104 NETIF_MSG_IFDOWN | \
105 NETIF_MSG_IFUP | \
106 NETIF_MSG_RX_ERR | \
107 NETIF_MSG_TX_ERR)
109 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
111 /* length of time before we decide the hardware is borked,
112 * and dev->tx_timeout() should be called to fix the problem
115 #define TG3_TX_TIMEOUT (5 * HZ)
117 /* hardware minimum and maximum for a single frame's data payload */
118 #define TG3_MIN_MTU 60
119 #define TG3_MAX_MTU(tp) \
120 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122 /* These numbers seem to be hard coded in the NIC firmware somehow.
123 * You can't change the ring sizes, but you can change where you place
124 * them in the NIC onboard memory.
126 #define TG3_RX_STD_RING_SIZE(tp) \
127 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
128 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
129 #define TG3_DEF_RX_RING_PENDING 200
130 #define TG3_RX_JMB_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
133 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
134 #define TG3_RSS_INDIR_TBL_SIZE 128
136 /* Do not place this n-ring entries value into the tp struct itself,
137 * we really want to expose these constants to GCC so that modulo et
138 * al. operations are done with shifts and masks instead of with
139 * hw multiply/modulo instructions. Another solution would be to
140 * replace things like '% foo' with '& (foo - 1)'.
143 #define TG3_TX_RING_SIZE 512
144 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
146 #define TG3_RX_STD_RING_BYTES(tp) \
147 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
148 #define TG3_RX_JMB_RING_BYTES(tp) \
149 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
150 #define TG3_RX_RCB_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
152 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
153 TG3_TX_RING_SIZE)
154 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156 #define TG3_DMA_BYTE_ENAB 64
158 #define TG3_RX_STD_DMA_SZ 1536
159 #define TG3_RX_JMB_DMA_SZ 9046
161 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
163 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
164 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
167 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
170 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
173 * that are at least dword aligned when used in PCIX mode. The driver
174 * works around this bug by double copying the packet. This workaround
175 * is built into the normal double copy length check for efficiency.
177 * However, the double copy is only necessary on those architectures
178 * where unaligned memory accesses are inefficient. For those architectures
179 * where unaligned memory accesses incur little penalty, we can reintegrate
180 * the 5701 in the normal rx path. Doing so saves a device structure
181 * dereference by hardcoding the double copy threshold in place.
183 #define TG3_RX_COPY_THRESHOLD 256
184 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
185 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
186 #else
187 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
188 #endif
190 /* minimum number of free TX descriptors required to wake up TX process */
191 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
192 #define TG3_TX_BD_DMA_MAX 4096
194 #define TG3_RAW_IP_ALIGN 2
196 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
198 #define FIRMWARE_TG3 "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
202 static char version[] __devinitdata =
203 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
213 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
304 static const struct {
305 const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307 { "rx_octets" },
308 { "rx_fragments" },
309 { "rx_ucast_packets" },
310 { "rx_mcast_packets" },
311 { "rx_bcast_packets" },
312 { "rx_fcs_errors" },
313 { "rx_align_errors" },
314 { "rx_xon_pause_rcvd" },
315 { "rx_xoff_pause_rcvd" },
316 { "rx_mac_ctrl_rcvd" },
317 { "rx_xoff_entered" },
318 { "rx_frame_too_long_errors" },
319 { "rx_jabbers" },
320 { "rx_undersize_packets" },
321 { "rx_in_length_errors" },
322 { "rx_out_length_errors" },
323 { "rx_64_or_less_octet_packets" },
324 { "rx_65_to_127_octet_packets" },
325 { "rx_128_to_255_octet_packets" },
326 { "rx_256_to_511_octet_packets" },
327 { "rx_512_to_1023_octet_packets" },
328 { "rx_1024_to_1522_octet_packets" },
329 { "rx_1523_to_2047_octet_packets" },
330 { "rx_2048_to_4095_octet_packets" },
331 { "rx_4096_to_8191_octet_packets" },
332 { "rx_8192_to_9022_octet_packets" },
334 { "tx_octets" },
335 { "tx_collisions" },
337 { "tx_xon_sent" },
338 { "tx_xoff_sent" },
339 { "tx_flow_control" },
340 { "tx_mac_errors" },
341 { "tx_single_collisions" },
342 { "tx_mult_collisions" },
343 { "tx_deferred" },
344 { "tx_excessive_collisions" },
345 { "tx_late_collisions" },
346 { "tx_collide_2times" },
347 { "tx_collide_3times" },
348 { "tx_collide_4times" },
349 { "tx_collide_5times" },
350 { "tx_collide_6times" },
351 { "tx_collide_7times" },
352 { "tx_collide_8times" },
353 { "tx_collide_9times" },
354 { "tx_collide_10times" },
355 { "tx_collide_11times" },
356 { "tx_collide_12times" },
357 { "tx_collide_13times" },
358 { "tx_collide_14times" },
359 { "tx_collide_15times" },
360 { "tx_ucast_packets" },
361 { "tx_mcast_packets" },
362 { "tx_bcast_packets" },
363 { "tx_carrier_sense_errors" },
364 { "tx_discards" },
365 { "tx_errors" },
367 { "dma_writeq_full" },
368 { "dma_write_prioq_full" },
369 { "rxbds_empty" },
370 { "rx_discards" },
371 { "rx_errors" },
372 { "rx_threshold_hit" },
374 { "dma_readq_full" },
375 { "dma_read_prioq_full" },
376 { "tx_comp_queue_full" },
378 { "ring_set_send_prod_index" },
379 { "ring_status_update" },
380 { "nic_irqs" },
381 { "nic_avoided_irqs" },
382 { "nic_tx_threshold_hit" },
384 { "mbuf_lwm_thresh_hit" },
387 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
390 static const struct {
391 const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393 { "nvram test (online) " },
394 { "link test (online) " },
395 { "register test (offline)" },
396 { "memory test (offline)" },
397 { "loopback test (offline)" },
398 { "interrupt test (offline)" },
401 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
406 writel(val, tp->regs + off);
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
411 return readl(tp->regs + off);
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
416 writel(val, tp->aperegs + off);
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
421 return readl(tp->aperegs + off);
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
426 unsigned long flags;
428 spin_lock_irqsave(&tp->indirect_lock, flags);
429 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431 spin_unlock_irqrestore(&tp->indirect_lock, flags);
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
436 writel(val, tp->regs + off);
437 readl(tp->regs + off);
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
442 unsigned long flags;
443 u32 val;
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 return val;
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
454 unsigned long flags;
456 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
461 if (off == TG3_RX_STD_PROD_IDX_REG) {
462 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463 TG3_64BIT_REG_LOW, val);
464 return;
467 spin_lock_irqsave(&tp->indirect_lock, flags);
468 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470 spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 /* In indirect mode when disabling interrupts, we also need
473 * to clear the interrupt bit in the GRC local ctrl register.
475 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476 (val == 0x1)) {
477 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
484 unsigned long flags;
485 u32 val;
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 return val;
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495 * where it is unsafe to read back the register without some delay.
496 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
501 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502 /* Non-posted methods */
503 tp->write32(tp, off, val);
504 else {
505 /* Posted method */
506 tg3_write32(tp, off, val);
507 if (usec_wait)
508 udelay(usec_wait);
509 tp->read32(tp, off);
511 /* Wait again after the read for the posted method to guarantee that
512 * the wait time is met.
514 if (usec_wait)
515 udelay(usec_wait);
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
520 tp->write32_mbox(tp, off, val);
521 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522 tp->read32_mbox(tp, off);
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
527 void __iomem *mbox = tp->regs + off;
528 writel(val, mbox);
529 if (tg3_flag(tp, TXD_MBOX_HWBUG))
530 writel(val, mbox);
531 if (tg3_flag(tp, MBOX_WRITE_REORDER))
532 readl(mbox);
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
537 return readl(tp->regs + off + GRCMBOX_BASE);
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
542 writel(val, tp->regs + off + GRCMBOX_BASE);
545 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
551 #define tw32(reg, val) tp->write32(tp, reg, val)
552 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg) tp->read32(tp, reg)
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
558 unsigned long flags;
560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562 return;
564 spin_lock_irqsave(&tp->indirect_lock, flags);
565 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
569 /* Always leave this as zero. */
570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571 } else {
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573 tw32_f(TG3PCI_MEM_WIN_DATA, val);
575 /* Always leave this as zero. */
576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
578 spin_unlock_irqrestore(&tp->indirect_lock, flags);
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
583 unsigned long flags;
585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587 *val = 0;
588 return;
591 spin_lock_irqsave(&tp->indirect_lock, flags);
592 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
596 /* Always leave this as zero. */
597 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598 } else {
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600 *val = tr32(TG3PCI_MEM_WIN_DATA);
602 /* Always leave this as zero. */
603 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
605 spin_unlock_irqrestore(&tp->indirect_lock, flags);
608 static void tg3_ape_lock_init(struct tg3 *tp)
610 int i;
611 u32 regbase, bit;
613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614 regbase = TG3_APE_LOCK_GRANT;
615 else
616 regbase = TG3_APE_PER_LOCK_GRANT;
618 /* Make sure the driver hasn't any stale locks. */
619 for (i = 0; i < 8; i++) {
620 if (i == TG3_APE_LOCK_GPIO)
621 continue;
622 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
625 /* Clear the correct bit of the GPIO lock too. */
626 if (!tp->pci_fn)
627 bit = APE_LOCK_GRANT_DRIVER;
628 else
629 bit = 1 << tp->pci_fn;
631 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
634 static int tg3_ape_lock(struct tg3 *tp, int locknum)
636 int i, off;
637 int ret = 0;
638 u32 status, req, gnt, bit;
640 if (!tg3_flag(tp, ENABLE_APE))
641 return 0;
643 switch (locknum) {
644 case TG3_APE_LOCK_GPIO:
645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646 return 0;
647 case TG3_APE_LOCK_GRC:
648 case TG3_APE_LOCK_MEM:
649 break;
650 default:
651 return -EINVAL;
654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655 req = TG3_APE_LOCK_REQ;
656 gnt = TG3_APE_LOCK_GRANT;
657 } else {
658 req = TG3_APE_PER_LOCK_REQ;
659 gnt = TG3_APE_PER_LOCK_GRANT;
662 off = 4 * locknum;
664 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665 bit = APE_LOCK_REQ_DRIVER;
666 else
667 bit = 1 << tp->pci_fn;
669 tg3_ape_write32(tp, req + off, bit);
671 /* Wait for up to 1 millisecond to acquire lock. */
672 for (i = 0; i < 100; i++) {
673 status = tg3_ape_read32(tp, gnt + off);
674 if (status == bit)
675 break;
676 udelay(10);
679 if (status != bit) {
680 /* Revoke the lock request. */
681 tg3_ape_write32(tp, gnt + off, bit);
682 ret = -EBUSY;
685 return ret;
688 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
690 u32 gnt, bit;
692 if (!tg3_flag(tp, ENABLE_APE))
693 return;
695 switch (locknum) {
696 case TG3_APE_LOCK_GPIO:
697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698 return;
699 case TG3_APE_LOCK_GRC:
700 case TG3_APE_LOCK_MEM:
701 break;
702 default:
703 return;
706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707 gnt = TG3_APE_LOCK_GRANT;
708 else
709 gnt = TG3_APE_PER_LOCK_GRANT;
711 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712 bit = APE_LOCK_GRANT_DRIVER;
713 else
714 bit = 1 << tp->pci_fn;
716 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
719 static void tg3_disable_ints(struct tg3 *tp)
721 int i;
723 tw32(TG3PCI_MISC_HOST_CTRL,
724 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
725 for (i = 0; i < tp->irq_max; i++)
726 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
729 static void tg3_enable_ints(struct tg3 *tp)
731 int i;
733 tp->irq_sync = 0;
734 wmb();
736 tw32(TG3PCI_MISC_HOST_CTRL,
737 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
739 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
740 for (i = 0; i < tp->irq_cnt; i++) {
741 struct tg3_napi *tnapi = &tp->napi[i];
743 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
744 if (tg3_flag(tp, 1SHOT_MSI))
745 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
747 tp->coal_now |= tnapi->coal_now;
750 /* Force an initial interrupt */
751 if (!tg3_flag(tp, TAGGED_STATUS) &&
752 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754 else
755 tw32(HOSTCC_MODE, tp->coal_now);
757 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
760 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
762 struct tg3 *tp = tnapi->tp;
763 struct tg3_hw_status *sblk = tnapi->hw_status;
764 unsigned int work_exists = 0;
766 /* check for phy events */
767 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
768 if (sblk->status & SD_STATUS_LINK_CHG)
769 work_exists = 1;
771 /* check for RX/TX work to do */
772 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
773 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
774 work_exists = 1;
776 return work_exists;
779 /* tg3_int_reenable
780 * similar to tg3_enable_ints, but it accurately determines whether there
781 * is new work pending and can return without flushing the PIO write
782 * which reenables interrupts
784 static void tg3_int_reenable(struct tg3_napi *tnapi)
786 struct tg3 *tp = tnapi->tp;
788 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
789 mmiowb();
791 /* When doing tagged status, this work check is unnecessary.
792 * The last_tag we write above tells the chip which piece of
793 * work we've completed.
795 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
796 tw32(HOSTCC_MODE, tp->coalesce_mode |
797 HOSTCC_MODE_ENABLE | tnapi->coal_now);
800 static void tg3_switch_clocks(struct tg3 *tp)
802 u32 clock_ctrl;
803 u32 orig_clock_ctrl;
805 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
806 return;
808 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
810 orig_clock_ctrl = clock_ctrl;
811 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812 CLOCK_CTRL_CLKRUN_OENABLE |
813 0x1f);
814 tp->pci_clock_ctrl = clock_ctrl;
816 if (tg3_flag(tp, 5705_PLUS)) {
817 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
818 tw32_wait_f(TG3PCI_CLOCK_CTRL,
819 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
821 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
822 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823 clock_ctrl |
824 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825 40);
826 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827 clock_ctrl | (CLOCK_CTRL_ALTCLK),
828 40);
830 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
833 #define PHY_BUSY_LOOPS 5000
835 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
837 u32 frame_val;
838 unsigned int loops;
839 int ret;
841 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842 tw32_f(MAC_MI_MODE,
843 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844 udelay(80);
847 *val = 0x0;
849 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850 MI_COM_PHY_ADDR_MASK);
851 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852 MI_COM_REG_ADDR_MASK);
853 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
855 tw32_f(MAC_MI_COM, frame_val);
857 loops = PHY_BUSY_LOOPS;
858 while (loops != 0) {
859 udelay(10);
860 frame_val = tr32(MAC_MI_COM);
862 if ((frame_val & MI_COM_BUSY) == 0) {
863 udelay(5);
864 frame_val = tr32(MAC_MI_COM);
865 break;
867 loops -= 1;
870 ret = -EBUSY;
871 if (loops != 0) {
872 *val = frame_val & MI_COM_DATA_MASK;
873 ret = 0;
876 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877 tw32_f(MAC_MI_MODE, tp->mi_mode);
878 udelay(80);
881 return ret;
884 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
886 u32 frame_val;
887 unsigned int loops;
888 int ret;
890 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
891 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
892 return 0;
894 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895 tw32_f(MAC_MI_MODE,
896 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897 udelay(80);
900 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
901 MI_COM_PHY_ADDR_MASK);
902 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903 MI_COM_REG_ADDR_MASK);
904 frame_val |= (val & MI_COM_DATA_MASK);
905 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
907 tw32_f(MAC_MI_COM, frame_val);
909 loops = PHY_BUSY_LOOPS;
910 while (loops != 0) {
911 udelay(10);
912 frame_val = tr32(MAC_MI_COM);
913 if ((frame_val & MI_COM_BUSY) == 0) {
914 udelay(5);
915 frame_val = tr32(MAC_MI_COM);
916 break;
918 loops -= 1;
921 ret = -EBUSY;
922 if (loops != 0)
923 ret = 0;
925 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926 tw32_f(MAC_MI_MODE, tp->mi_mode);
927 udelay(80);
930 return ret;
933 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
935 int err;
937 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938 if (err)
939 goto done;
941 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942 if (err)
943 goto done;
945 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947 if (err)
948 goto done;
950 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
952 done:
953 return err;
956 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
958 int err;
960 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961 if (err)
962 goto done;
964 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965 if (err)
966 goto done;
968 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970 if (err)
971 goto done;
973 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
975 done:
976 return err;
979 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
981 int err;
983 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984 if (!err)
985 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
987 return err;
990 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
992 int err;
994 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995 if (!err)
996 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
998 return err;
1001 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1003 int err;
1005 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007 MII_TG3_AUXCTL_SHDWSEL_MISC);
1008 if (!err)
1009 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1011 return err;
1014 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1016 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017 set |= MII_TG3_AUXCTL_MISC_WREN;
1019 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025 MII_TG3_AUXCTL_ACTL_TX_6DB)
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029 MII_TG3_AUXCTL_ACTL_TX_6DB);
1031 static int tg3_bmcr_reset(struct tg3 *tp)
1033 u32 phy_control;
1034 int limit, err;
1036 /* OK, reset it, and poll the BMCR_RESET bit until it
1037 * clears or we time out.
1039 phy_control = BMCR_RESET;
1040 err = tg3_writephy(tp, MII_BMCR, phy_control);
1041 if (err != 0)
1042 return -EBUSY;
1044 limit = 5000;
1045 while (limit--) {
1046 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047 if (err != 0)
1048 return -EBUSY;
1050 if ((phy_control & BMCR_RESET) == 0) {
1051 udelay(40);
1052 break;
1054 udelay(10);
1056 if (limit < 0)
1057 return -EBUSY;
1059 return 0;
1062 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1064 struct tg3 *tp = bp->priv;
1065 u32 val;
1067 spin_lock_bh(&tp->lock);
1069 if (tg3_readphy(tp, reg, &val))
1070 val = -EIO;
1072 spin_unlock_bh(&tp->lock);
1074 return val;
1077 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1079 struct tg3 *tp = bp->priv;
1080 u32 ret = 0;
1082 spin_lock_bh(&tp->lock);
1084 if (tg3_writephy(tp, reg, val))
1085 ret = -EIO;
1087 spin_unlock_bh(&tp->lock);
1089 return ret;
1092 static int tg3_mdio_reset(struct mii_bus *bp)
1094 return 0;
1097 static void tg3_mdio_config_5785(struct tg3 *tp)
1099 u32 val;
1100 struct phy_device *phydev;
1102 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104 case PHY_ID_BCM50610:
1105 case PHY_ID_BCM50610M:
1106 val = MAC_PHYCFG2_50610_LED_MODES;
1107 break;
1108 case PHY_ID_BCMAC131:
1109 val = MAC_PHYCFG2_AC131_LED_MODES;
1110 break;
1111 case PHY_ID_RTL8211C:
1112 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113 break;
1114 case PHY_ID_RTL8201E:
1115 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116 break;
1117 default:
1118 return;
1121 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122 tw32(MAC_PHYCFG2, val);
1124 val = tr32(MAC_PHYCFG1);
1125 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1128 tw32(MAC_PHYCFG1, val);
1130 return;
1133 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1134 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135 MAC_PHYCFG2_FMODE_MASK_MASK |
1136 MAC_PHYCFG2_GMODE_MASK_MASK |
1137 MAC_PHYCFG2_ACT_MASK_MASK |
1138 MAC_PHYCFG2_QUAL_MASK_MASK |
1139 MAC_PHYCFG2_INBAND_ENABLE;
1141 tw32(MAC_PHYCFG2, val);
1143 val = tr32(MAC_PHYCFG1);
1144 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1146 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1148 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1149 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1150 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1152 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154 tw32(MAC_PHYCFG1, val);
1156 val = tr32(MAC_EXT_RGMII_MODE);
1157 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158 MAC_RGMII_MODE_RX_QUALITY |
1159 MAC_RGMII_MODE_RX_ACTIVITY |
1160 MAC_RGMII_MODE_RX_ENG_DET |
1161 MAC_RGMII_MODE_TX_ENABLE |
1162 MAC_RGMII_MODE_TX_LOWPWR |
1163 MAC_RGMII_MODE_TX_RESET);
1164 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1166 val |= MAC_RGMII_MODE_RX_INT_B |
1167 MAC_RGMII_MODE_RX_QUALITY |
1168 MAC_RGMII_MODE_RX_ACTIVITY |
1169 MAC_RGMII_MODE_RX_ENG_DET;
1170 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1171 val |= MAC_RGMII_MODE_TX_ENABLE |
1172 MAC_RGMII_MODE_TX_LOWPWR |
1173 MAC_RGMII_MODE_TX_RESET;
1175 tw32(MAC_EXT_RGMII_MODE, val);
1178 static void tg3_mdio_start(struct tg3 *tp)
1180 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181 tw32_f(MAC_MI_MODE, tp->mi_mode);
1182 udelay(80);
1184 if (tg3_flag(tp, MDIOBUS_INITED) &&
1185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186 tg3_mdio_config_5785(tp);
1189 static int tg3_mdio_init(struct tg3 *tp)
1191 int i;
1192 u32 reg;
1193 struct phy_device *phydev;
1195 if (tg3_flag(tp, 5717_PLUS)) {
1196 u32 is_serdes;
1198 tp->phy_addr = tp->pci_fn + 1;
1200 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202 else
1203 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204 TG3_CPMU_PHY_STRAP_IS_SERDES;
1205 if (is_serdes)
1206 tp->phy_addr += 7;
1207 } else
1208 tp->phy_addr = TG3_PHY_MII_ADDR;
1210 tg3_mdio_start(tp);
1212 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1213 return 0;
1215 tp->mdio_bus = mdiobus_alloc();
1216 if (tp->mdio_bus == NULL)
1217 return -ENOMEM;
1219 tp->mdio_bus->name = "tg3 mdio bus";
1220 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1221 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1222 tp->mdio_bus->priv = tp;
1223 tp->mdio_bus->parent = &tp->pdev->dev;
1224 tp->mdio_bus->read = &tg3_mdio_read;
1225 tp->mdio_bus->write = &tg3_mdio_write;
1226 tp->mdio_bus->reset = &tg3_mdio_reset;
1227 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1228 tp->mdio_bus->irq = &tp->mdio_irq[0];
1230 for (i = 0; i < PHY_MAX_ADDR; i++)
1231 tp->mdio_bus->irq[i] = PHY_POLL;
1233 /* The bus registration will look for all the PHYs on the mdio bus.
1234 * Unfortunately, it does not ensure the PHY is powered up before
1235 * accessing the PHY ID registers. A chip reset is the
1236 * quickest way to bring the device back to an operational state..
1238 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239 tg3_bmcr_reset(tp);
1241 i = mdiobus_register(tp->mdio_bus);
1242 if (i) {
1243 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1244 mdiobus_free(tp->mdio_bus);
1245 return i;
1248 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1250 if (!phydev || !phydev->drv) {
1251 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1252 mdiobus_unregister(tp->mdio_bus);
1253 mdiobus_free(tp->mdio_bus);
1254 return -ENODEV;
1257 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1258 case PHY_ID_BCM57780:
1259 phydev->interface = PHY_INTERFACE_MODE_GMII;
1260 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1261 break;
1262 case PHY_ID_BCM50610:
1263 case PHY_ID_BCM50610M:
1264 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1265 PHY_BRCM_RX_REFCLK_UNUSED |
1266 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1267 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1268 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1269 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1270 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1271 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1272 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1273 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1274 /* fallthru */
1275 case PHY_ID_RTL8211C:
1276 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1277 break;
1278 case PHY_ID_RTL8201E:
1279 case PHY_ID_BCMAC131:
1280 phydev->interface = PHY_INTERFACE_MODE_MII;
1281 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1282 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1283 break;
1286 tg3_flag_set(tp, MDIOBUS_INITED);
1288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289 tg3_mdio_config_5785(tp);
1291 return 0;
1294 static void tg3_mdio_fini(struct tg3 *tp)
1296 if (tg3_flag(tp, MDIOBUS_INITED)) {
1297 tg3_flag_clear(tp, MDIOBUS_INITED);
1298 mdiobus_unregister(tp->mdio_bus);
1299 mdiobus_free(tp->mdio_bus);
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3 *tp)
1306 u32 val;
1308 val = tr32(GRC_RX_CPU_EVENT);
1309 val |= GRC_RX_CPU_DRIVER_EVENT;
1310 tw32_f(GRC_RX_CPU_EVENT, val);
1312 tp->last_event_jiffies = jiffies;
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3 *tp)
1320 int i;
1321 unsigned int delay_cnt;
1322 long time_remain;
1324 /* If enough time has passed, no wait is necessary. */
1325 time_remain = (long)(tp->last_event_jiffies + 1 +
1326 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327 (long)jiffies;
1328 if (time_remain < 0)
1329 return;
1331 /* Check if we can shorten the wait time. */
1332 delay_cnt = jiffies_to_usecs(time_remain);
1333 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335 delay_cnt = (delay_cnt >> 3) + 1;
1337 for (i = 0; i < delay_cnt; i++) {
1338 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339 break;
1340 udelay(8);
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3 *tp)
1347 u32 reg;
1348 u32 val;
1350 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1351 return;
1353 tg3_wait_for_event_ack(tp);
1355 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1357 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1359 val = 0;
1360 if (!tg3_readphy(tp, MII_BMCR, &reg))
1361 val = reg << 16;
1362 if (!tg3_readphy(tp, MII_BMSR, &reg))
1363 val |= (reg & 0xffff);
1364 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1366 val = 0;
1367 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368 val = reg << 16;
1369 if (!tg3_readphy(tp, MII_LPA, &reg))
1370 val |= (reg & 0xffff);
1371 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1373 val = 0;
1374 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1375 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376 val = reg << 16;
1377 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378 val |= (reg & 0xffff);
1380 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1382 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383 val = reg << 16;
1384 else
1385 val = 0;
1386 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1388 tg3_generate_fw_event(tp);
1391 static void tg3_link_report(struct tg3 *tp)
1393 if (!netif_carrier_ok(tp->dev)) {
1394 netif_info(tp, link, tp->dev, "Link is down\n");
1395 tg3_ump_link_report(tp);
1396 } else if (netif_msg_link(tp)) {
1397 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398 (tp->link_config.active_speed == SPEED_1000 ?
1399 1000 :
1400 (tp->link_config.active_speed == SPEED_100 ?
1401 100 : 10)),
1402 (tp->link_config.active_duplex == DUPLEX_FULL ?
1403 "full" : "half"));
1405 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407 "on" : "off",
1408 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409 "on" : "off");
1411 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412 netdev_info(tp->dev, "EEE is %s\n",
1413 tp->setlpicnt ? "enabled" : "disabled");
1415 tg3_ump_link_report(tp);
1419 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1421 u16 miireg;
1423 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1424 miireg = ADVERTISE_PAUSE_CAP;
1425 else if (flow_ctrl & FLOW_CTRL_TX)
1426 miireg = ADVERTISE_PAUSE_ASYM;
1427 else if (flow_ctrl & FLOW_CTRL_RX)
1428 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429 else
1430 miireg = 0;
1432 return miireg;
1435 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1437 u16 miireg;
1439 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1440 miireg = ADVERTISE_1000XPAUSE;
1441 else if (flow_ctrl & FLOW_CTRL_TX)
1442 miireg = ADVERTISE_1000XPSE_ASYM;
1443 else if (flow_ctrl & FLOW_CTRL_RX)
1444 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445 else
1446 miireg = 0;
1448 return miireg;
1451 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1453 u8 cap = 0;
1455 if (lcladv & ADVERTISE_1000XPAUSE) {
1456 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457 if (rmtadv & LPA_1000XPAUSE)
1458 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1459 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1460 cap = FLOW_CTRL_RX;
1461 } else {
1462 if (rmtadv & LPA_1000XPAUSE)
1463 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1465 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1467 cap = FLOW_CTRL_TX;
1470 return cap;
1473 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1475 u8 autoneg;
1476 u8 flowctrl = 0;
1477 u32 old_rx_mode = tp->rx_mode;
1478 u32 old_tx_mode = tp->tx_mode;
1480 if (tg3_flag(tp, USE_PHYLIB))
1481 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1482 else
1483 autoneg = tp->link_config.autoneg;
1485 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1486 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1487 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1488 else
1489 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1490 } else
1491 flowctrl = tp->link_config.flowctrl;
1493 tp->link_config.active_flowctrl = flowctrl;
1495 if (flowctrl & FLOW_CTRL_RX)
1496 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497 else
1498 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1500 if (old_rx_mode != tp->rx_mode)
1501 tw32_f(MAC_RX_MODE, tp->rx_mode);
1503 if (flowctrl & FLOW_CTRL_TX)
1504 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505 else
1506 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1508 if (old_tx_mode != tp->tx_mode)
1509 tw32_f(MAC_TX_MODE, tp->tx_mode);
1512 static void tg3_adjust_link(struct net_device *dev)
1514 u8 oldflowctrl, linkmesg = 0;
1515 u32 mac_mode, lcl_adv, rmt_adv;
1516 struct tg3 *tp = netdev_priv(dev);
1517 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1519 spin_lock_bh(&tp->lock);
1521 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522 MAC_MODE_HALF_DUPLEX);
1524 oldflowctrl = tp->link_config.active_flowctrl;
1526 if (phydev->link) {
1527 lcl_adv = 0;
1528 rmt_adv = 0;
1530 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531 mac_mode |= MAC_MODE_PORT_MODE_MII;
1532 else if (phydev->speed == SPEED_1000 ||
1533 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1534 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1535 else
1536 mac_mode |= MAC_MODE_PORT_MODE_MII;
1538 if (phydev->duplex == DUPLEX_HALF)
1539 mac_mode |= MAC_MODE_HALF_DUPLEX;
1540 else {
1541 lcl_adv = tg3_advert_flowctrl_1000T(
1542 tp->link_config.flowctrl);
1544 if (phydev->pause)
1545 rmt_adv = LPA_PAUSE_CAP;
1546 if (phydev->asym_pause)
1547 rmt_adv |= LPA_PAUSE_ASYM;
1550 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551 } else
1552 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1554 if (mac_mode != tp->mac_mode) {
1555 tp->mac_mode = mac_mode;
1556 tw32_f(MAC_MODE, tp->mac_mode);
1557 udelay(40);
1560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561 if (phydev->speed == SPEED_10)
1562 tw32(MAC_MI_STAT,
1563 MAC_MI_STAT_10MBPS_MODE |
1564 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565 else
1566 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1569 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570 tw32(MAC_TX_LENGTHS,
1571 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572 (6 << TX_LENGTHS_IPG_SHIFT) |
1573 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574 else
1575 tw32(MAC_TX_LENGTHS,
1576 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577 (6 << TX_LENGTHS_IPG_SHIFT) |
1578 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1580 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582 phydev->speed != tp->link_config.active_speed ||
1583 phydev->duplex != tp->link_config.active_duplex ||
1584 oldflowctrl != tp->link_config.active_flowctrl)
1585 linkmesg = 1;
1587 tp->link_config.active_speed = phydev->speed;
1588 tp->link_config.active_duplex = phydev->duplex;
1590 spin_unlock_bh(&tp->lock);
1592 if (linkmesg)
1593 tg3_link_report(tp);
1596 static int tg3_phy_init(struct tg3 *tp)
1598 struct phy_device *phydev;
1600 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1601 return 0;
1603 /* Bring the PHY back to a known state. */
1604 tg3_bmcr_reset(tp);
1606 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1608 /* Attach the MAC to the PHY. */
1609 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1610 phydev->dev_flags, phydev->interface);
1611 if (IS_ERR(phydev)) {
1612 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1613 return PTR_ERR(phydev);
1616 /* Mask with MAC supported features. */
1617 switch (phydev->interface) {
1618 case PHY_INTERFACE_MODE_GMII:
1619 case PHY_INTERFACE_MODE_RGMII:
1620 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1621 phydev->supported &= (PHY_GBIT_FEATURES |
1622 SUPPORTED_Pause |
1623 SUPPORTED_Asym_Pause);
1624 break;
1626 /* fallthru */
1627 case PHY_INTERFACE_MODE_MII:
1628 phydev->supported &= (PHY_BASIC_FEATURES |
1629 SUPPORTED_Pause |
1630 SUPPORTED_Asym_Pause);
1631 break;
1632 default:
1633 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1634 return -EINVAL;
1637 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1639 phydev->advertising = phydev->supported;
1641 return 0;
1644 static void tg3_phy_start(struct tg3 *tp)
1646 struct phy_device *phydev;
1648 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1649 return;
1651 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1653 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1655 phydev->speed = tp->link_config.orig_speed;
1656 phydev->duplex = tp->link_config.orig_duplex;
1657 phydev->autoneg = tp->link_config.orig_autoneg;
1658 phydev->advertising = tp->link_config.orig_advertising;
1661 phy_start(phydev);
1663 phy_start_aneg(phydev);
1666 static void tg3_phy_stop(struct tg3 *tp)
1668 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1669 return;
1671 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1674 static void tg3_phy_fini(struct tg3 *tp)
1676 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1677 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1678 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1682 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1684 u32 phytest;
1686 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687 u32 phy;
1689 tg3_writephy(tp, MII_TG3_FET_TEST,
1690 phytest | MII_TG3_FET_SHADOW_EN);
1691 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692 if (enable)
1693 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694 else
1695 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1698 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1702 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1704 u32 reg;
1706 if (!tg3_flag(tp, 5705_PLUS) ||
1707 (tg3_flag(tp, 5717_PLUS) &&
1708 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1709 return;
1711 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1712 tg3_phy_fet_toggle_apd(tp, enable);
1713 return;
1716 reg = MII_TG3_MISC_SHDW_WREN |
1717 MII_TG3_MISC_SHDW_SCR5_SEL |
1718 MII_TG3_MISC_SHDW_SCR5_LPED |
1719 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720 MII_TG3_MISC_SHDW_SCR5_SDTL |
1721 MII_TG3_MISC_SHDW_SCR5_C125OE;
1722 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1725 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1728 reg = MII_TG3_MISC_SHDW_WREN |
1729 MII_TG3_MISC_SHDW_APD_SEL |
1730 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731 if (enable)
1732 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1734 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1737 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1739 u32 phy;
1741 if (!tg3_flag(tp, 5705_PLUS) ||
1742 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1743 return;
1745 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1746 u32 ephy;
1748 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1751 tg3_writephy(tp, MII_TG3_FET_TEST,
1752 ephy | MII_TG3_FET_SHADOW_EN);
1753 if (!tg3_readphy(tp, reg, &phy)) {
1754 if (enable)
1755 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1756 else
1757 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758 tg3_writephy(tp, reg, phy);
1760 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1762 } else {
1763 int ret;
1765 ret = tg3_phy_auxctl_read(tp,
1766 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767 if (!ret) {
1768 if (enable)
1769 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770 else
1771 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1772 tg3_phy_auxctl_write(tp,
1773 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1778 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1780 int ret;
1781 u32 val;
1783 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1784 return;
1786 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787 if (!ret)
1788 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1792 static void tg3_phy_apply_otp(struct tg3 *tp)
1794 u32 otp, phy;
1796 if (!tp->phy_otp)
1797 return;
1799 otp = tp->phy_otp;
1801 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802 return;
1804 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1808 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1812 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1816 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1819 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1822 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1826 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1829 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1831 u32 val;
1833 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834 return;
1836 tp->setlpicnt = 0;
1838 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839 current_link_up == 1 &&
1840 tp->link_config.active_duplex == DUPLEX_FULL &&
1841 (tp->link_config.active_speed == SPEED_100 ||
1842 tp->link_config.active_speed == SPEED_1000)) {
1843 u32 eeectl;
1845 if (tp->link_config.active_speed == SPEED_1000)
1846 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847 else
1848 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1850 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1852 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853 TG3_CL45_D7_EEERES_STAT, &val);
1855 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1857 tp->setlpicnt = 2;
1860 if (!tp->setlpicnt) {
1861 if (current_link_up == 1 &&
1862 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1863 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1864 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1867 val = tr32(TG3_CPMU_EEE_MODE);
1868 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1872 static void tg3_phy_eee_enable(struct tg3 *tp)
1874 u32 val;
1876 if (tp->link_config.active_speed == SPEED_1000 &&
1877 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1880 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1881 val = MII_TG3_DSP_TAP26_ALNOKO |
1882 MII_TG3_DSP_TAP26_RMRXSTO;
1883 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1884 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1887 val = tr32(TG3_CPMU_EEE_MODE);
1888 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1891 static int tg3_wait_macro_done(struct tg3 *tp)
1893 int limit = 100;
1895 while (limit--) {
1896 u32 tmp32;
1898 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1899 if ((tmp32 & 0x1000) == 0)
1900 break;
1903 if (limit < 0)
1904 return -EBUSY;
1906 return 0;
1909 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1911 static const u32 test_pat[4][6] = {
1912 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1913 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1914 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1915 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1917 int chan;
1919 for (chan = 0; chan < 4; chan++) {
1920 int i;
1922 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1923 (chan * 0x2000) | 0x0200);
1924 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1926 for (i = 0; i < 6; i++)
1927 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1928 test_pat[chan][i]);
1930 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1931 if (tg3_wait_macro_done(tp)) {
1932 *resetp = 1;
1933 return -EBUSY;
1936 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1937 (chan * 0x2000) | 0x0200);
1938 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1939 if (tg3_wait_macro_done(tp)) {
1940 *resetp = 1;
1941 return -EBUSY;
1944 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1945 if (tg3_wait_macro_done(tp)) {
1946 *resetp = 1;
1947 return -EBUSY;
1950 for (i = 0; i < 6; i += 2) {
1951 u32 low, high;
1953 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1954 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1955 tg3_wait_macro_done(tp)) {
1956 *resetp = 1;
1957 return -EBUSY;
1959 low &= 0x7fff;
1960 high &= 0x000f;
1961 if (low != test_pat[chan][i] ||
1962 high != test_pat[chan][i+1]) {
1963 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1964 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1965 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1967 return -EBUSY;
1972 return 0;
1975 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1977 int chan;
1979 for (chan = 0; chan < 4; chan++) {
1980 int i;
1982 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1983 (chan * 0x2000) | 0x0200);
1984 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1985 for (i = 0; i < 6; i++)
1986 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1987 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1988 if (tg3_wait_macro_done(tp))
1989 return -EBUSY;
1992 return 0;
1995 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1997 u32 reg32, phy9_orig;
1998 int retries, do_phy_reset, err;
2000 retries = 10;
2001 do_phy_reset = 1;
2002 do {
2003 if (do_phy_reset) {
2004 err = tg3_bmcr_reset(tp);
2005 if (err)
2006 return err;
2007 do_phy_reset = 0;
2010 /* Disable transmitter and interrupt. */
2011 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2012 continue;
2014 reg32 |= 0x3000;
2015 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2017 /* Set full-duplex, 1000 mbps. */
2018 tg3_writephy(tp, MII_BMCR,
2019 BMCR_FULLDPLX | BMCR_SPEED1000);
2021 /* Set to master mode. */
2022 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2023 continue;
2025 tg3_writephy(tp, MII_CTRL1000,
2026 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2028 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2029 if (err)
2030 return err;
2032 /* Block the PHY control access. */
2033 tg3_phydsp_write(tp, 0x8005, 0x0800);
2035 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2036 if (!err)
2037 break;
2038 } while (--retries);
2040 err = tg3_phy_reset_chanpat(tp);
2041 if (err)
2042 return err;
2044 tg3_phydsp_write(tp, 0x8005, 0x0000);
2046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2047 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2049 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2051 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2053 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2054 reg32 &= ~0x3000;
2055 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2056 } else if (!err)
2057 err = -EBUSY;
2059 return err;
2062 /* This will reset the tigon3 PHY if there is no valid
2063 * link unless the FORCE argument is non-zero.
2065 static int tg3_phy_reset(struct tg3 *tp)
2067 u32 val, cpmuctrl;
2068 int err;
2070 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2071 val = tr32(GRC_MISC_CFG);
2072 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2073 udelay(40);
2075 err = tg3_readphy(tp, MII_BMSR, &val);
2076 err |= tg3_readphy(tp, MII_BMSR, &val);
2077 if (err != 0)
2078 return -EBUSY;
2080 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2081 netif_carrier_off(tp->dev);
2082 tg3_link_report(tp);
2085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2086 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2087 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2088 err = tg3_phy_reset_5703_4_5(tp);
2089 if (err)
2090 return err;
2091 goto out;
2094 cpmuctrl = 0;
2095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2096 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2097 cpmuctrl = tr32(TG3_CPMU_CTRL);
2098 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2099 tw32(TG3_CPMU_CTRL,
2100 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2103 err = tg3_bmcr_reset(tp);
2104 if (err)
2105 return err;
2107 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2108 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2109 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2111 tw32(TG3_CPMU_CTRL, cpmuctrl);
2114 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2115 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2116 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2117 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2118 CPMU_LSPD_1000MB_MACCLK_12_5) {
2119 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2120 udelay(40);
2121 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2125 if (tg3_flag(tp, 5717_PLUS) &&
2126 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2127 return 0;
2129 tg3_phy_apply_otp(tp);
2131 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2132 tg3_phy_toggle_apd(tp, true);
2133 else
2134 tg3_phy_toggle_apd(tp, false);
2136 out:
2137 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2138 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2139 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2140 tg3_phydsp_write(tp, 0x000a, 0x0323);
2141 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2144 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2145 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2146 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2149 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2150 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2151 tg3_phydsp_write(tp, 0x000a, 0x310b);
2152 tg3_phydsp_write(tp, 0x201f, 0x9506);
2153 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2154 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2156 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2157 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2159 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2160 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2161 tg3_writephy(tp, MII_TG3_TEST1,
2162 MII_TG3_TEST1_TRIM_EN | 0x4);
2163 } else
2164 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2166 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2170 /* Set Extended packet length bit (bit 14) on all chips that */
2171 /* support jumbo frames */
2172 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2173 /* Cannot do read-modify-write on 5401 */
2174 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2175 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2176 /* Set bit 14 with read-modify-write to preserve other bits */
2177 err = tg3_phy_auxctl_read(tp,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2179 if (!err)
2180 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2184 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2185 * jumbo frames transmission.
2187 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2188 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2189 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2190 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2193 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2194 /* adjust output voltage */
2195 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2198 tg3_phy_toggle_automdix(tp, 1);
2199 tg3_phy_set_wirespeed(tp);
2200 return 0;
2203 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2204 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2205 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2206 TG3_GPIO_MSG_NEED_VAUX)
2207 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2208 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2209 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2210 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2211 (TG3_GPIO_MSG_DRVR_PRES << 12))
2213 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2214 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2215 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2216 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2217 (TG3_GPIO_MSG_NEED_VAUX << 12))
2219 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2221 u32 status, shift;
2223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2224 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2225 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2226 else
2227 status = tr32(TG3_CPMU_DRV_STATUS);
2229 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2230 status &= ~(TG3_GPIO_MSG_MASK << shift);
2231 status |= (newstat << shift);
2233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2234 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2235 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2236 else
2237 tw32(TG3_CPMU_DRV_STATUS, status);
2239 return status >> TG3_APE_GPIO_MSG_SHIFT;
2242 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2244 if (!tg3_flag(tp, IS_NIC))
2245 return 0;
2247 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2248 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2249 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2250 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2251 return -EIO;
2253 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2255 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2256 TG3_GRC_LCLCTL_PWRSW_DELAY);
2258 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2259 } else {
2260 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2261 TG3_GRC_LCLCTL_PWRSW_DELAY);
2264 return 0;
2267 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2269 u32 grc_local_ctrl;
2271 if (!tg3_flag(tp, IS_NIC) ||
2272 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2273 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2274 return;
2276 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2278 tw32_wait_f(GRC_LOCAL_CTRL,
2279 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280 TG3_GRC_LCLCTL_PWRSW_DELAY);
2282 tw32_wait_f(GRC_LOCAL_CTRL,
2283 grc_local_ctrl,
2284 TG3_GRC_LCLCTL_PWRSW_DELAY);
2286 tw32_wait_f(GRC_LOCAL_CTRL,
2287 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2288 TG3_GRC_LCLCTL_PWRSW_DELAY);
2291 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2293 if (!tg3_flag(tp, IS_NIC))
2294 return;
2296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2298 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2299 (GRC_LCLCTRL_GPIO_OE0 |
2300 GRC_LCLCTRL_GPIO_OE1 |
2301 GRC_LCLCTRL_GPIO_OE2 |
2302 GRC_LCLCTRL_GPIO_OUTPUT0 |
2303 GRC_LCLCTRL_GPIO_OUTPUT1),
2304 TG3_GRC_LCLCTL_PWRSW_DELAY);
2305 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2306 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2307 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2308 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2309 GRC_LCLCTRL_GPIO_OE1 |
2310 GRC_LCLCTRL_GPIO_OE2 |
2311 GRC_LCLCTRL_GPIO_OUTPUT0 |
2312 GRC_LCLCTRL_GPIO_OUTPUT1 |
2313 tp->grc_local_ctrl;
2314 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315 TG3_GRC_LCLCTL_PWRSW_DELAY);
2317 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2318 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2319 TG3_GRC_LCLCTL_PWRSW_DELAY);
2321 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2322 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2323 TG3_GRC_LCLCTL_PWRSW_DELAY);
2324 } else {
2325 u32 no_gpio2;
2326 u32 grc_local_ctrl = 0;
2328 /* Workaround to prevent overdrawing Amps. */
2329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2330 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2331 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2332 grc_local_ctrl,
2333 TG3_GRC_LCLCTL_PWRSW_DELAY);
2336 /* On 5753 and variants, GPIO2 cannot be used. */
2337 no_gpio2 = tp->nic_sram_data_cfg &
2338 NIC_SRAM_DATA_CFG_NO_GPIO2;
2340 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2341 GRC_LCLCTRL_GPIO_OE1 |
2342 GRC_LCLCTRL_GPIO_OE2 |
2343 GRC_LCLCTRL_GPIO_OUTPUT1 |
2344 GRC_LCLCTRL_GPIO_OUTPUT2;
2345 if (no_gpio2) {
2346 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2347 GRC_LCLCTRL_GPIO_OUTPUT2);
2349 tw32_wait_f(GRC_LOCAL_CTRL,
2350 tp->grc_local_ctrl | grc_local_ctrl,
2351 TG3_GRC_LCLCTL_PWRSW_DELAY);
2353 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2355 tw32_wait_f(GRC_LOCAL_CTRL,
2356 tp->grc_local_ctrl | grc_local_ctrl,
2357 TG3_GRC_LCLCTL_PWRSW_DELAY);
2359 if (!no_gpio2) {
2360 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2361 tw32_wait_f(GRC_LOCAL_CTRL,
2362 tp->grc_local_ctrl | grc_local_ctrl,
2363 TG3_GRC_LCLCTL_PWRSW_DELAY);
2368 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2370 u32 msg = 0;
2372 /* Serialize power state transitions */
2373 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2374 return;
2376 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2377 msg = TG3_GPIO_MSG_NEED_VAUX;
2379 msg = tg3_set_function_status(tp, msg);
2381 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2382 goto done;
2384 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2385 tg3_pwrsrc_switch_to_vaux(tp);
2386 else
2387 tg3_pwrsrc_die_with_vmain(tp);
2389 done:
2390 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2393 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2395 bool need_vaux = false;
2397 /* The GPIOs do something completely different on 57765. */
2398 if (!tg3_flag(tp, IS_NIC) ||
2399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2400 return;
2402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2405 tg3_frob_aux_power_5717(tp, include_wol ?
2406 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2407 return;
2410 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2411 struct net_device *dev_peer;
2413 dev_peer = pci_get_drvdata(tp->pdev_peer);
2415 /* remove_one() may have been run on the peer. */
2416 if (dev_peer) {
2417 struct tg3 *tp_peer = netdev_priv(dev_peer);
2419 if (tg3_flag(tp_peer, INIT_COMPLETE))
2420 return;
2422 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2423 tg3_flag(tp_peer, ENABLE_ASF))
2424 need_vaux = true;
2428 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2429 tg3_flag(tp, ENABLE_ASF))
2430 need_vaux = true;
2432 if (need_vaux)
2433 tg3_pwrsrc_switch_to_vaux(tp);
2434 else
2435 tg3_pwrsrc_die_with_vmain(tp);
2438 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2440 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2441 return 1;
2442 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2443 if (speed != SPEED_10)
2444 return 1;
2445 } else if (speed == SPEED_10)
2446 return 1;
2448 return 0;
2451 static int tg3_setup_phy(struct tg3 *, int);
2453 #define RESET_KIND_SHUTDOWN 0
2454 #define RESET_KIND_INIT 1
2455 #define RESET_KIND_SUSPEND 2
2457 static void tg3_write_sig_post_reset(struct tg3 *, int);
2458 static int tg3_halt_cpu(struct tg3 *, u32);
2460 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2462 u32 val;
2464 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2466 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2467 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2469 sg_dig_ctrl |=
2470 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2471 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2472 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2474 return;
2477 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478 tg3_bmcr_reset(tp);
2479 val = tr32(GRC_MISC_CFG);
2480 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2481 udelay(40);
2482 return;
2483 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2484 u32 phytest;
2485 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2486 u32 phy;
2488 tg3_writephy(tp, MII_ADVERTISE, 0);
2489 tg3_writephy(tp, MII_BMCR,
2490 BMCR_ANENABLE | BMCR_ANRESTART);
2492 tg3_writephy(tp, MII_TG3_FET_TEST,
2493 phytest | MII_TG3_FET_SHADOW_EN);
2494 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2495 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2496 tg3_writephy(tp,
2497 MII_TG3_FET_SHDW_AUXMODE4,
2498 phy);
2500 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2502 return;
2503 } else if (do_low_power) {
2504 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2505 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2507 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2508 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2509 MII_TG3_AUXCTL_PCTL_VREG_11V;
2510 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2513 /* The PHY should not be powered down on some chips because
2514 * of bugs.
2516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2518 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2519 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2520 return;
2522 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2523 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2524 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2525 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2526 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2527 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2530 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2533 /* tp->lock is held. */
2534 static int tg3_nvram_lock(struct tg3 *tp)
2536 if (tg3_flag(tp, NVRAM)) {
2537 int i;
2539 if (tp->nvram_lock_cnt == 0) {
2540 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2541 for (i = 0; i < 8000; i++) {
2542 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2543 break;
2544 udelay(20);
2546 if (i == 8000) {
2547 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2548 return -ENODEV;
2551 tp->nvram_lock_cnt++;
2553 return 0;
2556 /* tp->lock is held. */
2557 static void tg3_nvram_unlock(struct tg3 *tp)
2559 if (tg3_flag(tp, NVRAM)) {
2560 if (tp->nvram_lock_cnt > 0)
2561 tp->nvram_lock_cnt--;
2562 if (tp->nvram_lock_cnt == 0)
2563 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2567 /* tp->lock is held. */
2568 static void tg3_enable_nvram_access(struct tg3 *tp)
2570 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2571 u32 nvaccess = tr32(NVRAM_ACCESS);
2573 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2577 /* tp->lock is held. */
2578 static void tg3_disable_nvram_access(struct tg3 *tp)
2580 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2581 u32 nvaccess = tr32(NVRAM_ACCESS);
2583 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2587 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2588 u32 offset, u32 *val)
2590 u32 tmp;
2591 int i;
2593 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2594 return -EINVAL;
2596 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2597 EEPROM_ADDR_DEVID_MASK |
2598 EEPROM_ADDR_READ);
2599 tw32(GRC_EEPROM_ADDR,
2600 tmp |
2601 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2602 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2603 EEPROM_ADDR_ADDR_MASK) |
2604 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2606 for (i = 0; i < 1000; i++) {
2607 tmp = tr32(GRC_EEPROM_ADDR);
2609 if (tmp & EEPROM_ADDR_COMPLETE)
2610 break;
2611 msleep(1);
2613 if (!(tmp & EEPROM_ADDR_COMPLETE))
2614 return -EBUSY;
2616 tmp = tr32(GRC_EEPROM_DATA);
2619 * The data will always be opposite the native endian
2620 * format. Perform a blind byteswap to compensate.
2622 *val = swab32(tmp);
2624 return 0;
2627 #define NVRAM_CMD_TIMEOUT 10000
2629 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2631 int i;
2633 tw32(NVRAM_CMD, nvram_cmd);
2634 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2635 udelay(10);
2636 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2637 udelay(10);
2638 break;
2642 if (i == NVRAM_CMD_TIMEOUT)
2643 return -EBUSY;
2645 return 0;
2648 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2650 if (tg3_flag(tp, NVRAM) &&
2651 tg3_flag(tp, NVRAM_BUFFERED) &&
2652 tg3_flag(tp, FLASH) &&
2653 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2654 (tp->nvram_jedecnum == JEDEC_ATMEL))
2656 addr = ((addr / tp->nvram_pagesize) <<
2657 ATMEL_AT45DB0X1B_PAGE_POS) +
2658 (addr % tp->nvram_pagesize);
2660 return addr;
2663 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2665 if (tg3_flag(tp, NVRAM) &&
2666 tg3_flag(tp, NVRAM_BUFFERED) &&
2667 tg3_flag(tp, FLASH) &&
2668 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2669 (tp->nvram_jedecnum == JEDEC_ATMEL))
2671 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2672 tp->nvram_pagesize) +
2673 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2675 return addr;
2678 /* NOTE: Data read in from NVRAM is byteswapped according to
2679 * the byteswapping settings for all other register accesses.
2680 * tg3 devices are BE devices, so on a BE machine, the data
2681 * returned will be exactly as it is seen in NVRAM. On a LE
2682 * machine, the 32-bit value will be byteswapped.
2684 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2686 int ret;
2688 if (!tg3_flag(tp, NVRAM))
2689 return tg3_nvram_read_using_eeprom(tp, offset, val);
2691 offset = tg3_nvram_phys_addr(tp, offset);
2693 if (offset > NVRAM_ADDR_MSK)
2694 return -EINVAL;
2696 ret = tg3_nvram_lock(tp);
2697 if (ret)
2698 return ret;
2700 tg3_enable_nvram_access(tp);
2702 tw32(NVRAM_ADDR, offset);
2703 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2704 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2706 if (ret == 0)
2707 *val = tr32(NVRAM_RDDATA);
2709 tg3_disable_nvram_access(tp);
2711 tg3_nvram_unlock(tp);
2713 return ret;
2716 /* Ensures NVRAM data is in bytestream format. */
2717 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2719 u32 v;
2720 int res = tg3_nvram_read(tp, offset, &v);
2721 if (!res)
2722 *val = cpu_to_be32(v);
2723 return res;
2726 /* tp->lock is held. */
2727 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2729 u32 addr_high, addr_low;
2730 int i;
2732 addr_high = ((tp->dev->dev_addr[0] << 8) |
2733 tp->dev->dev_addr[1]);
2734 addr_low = ((tp->dev->dev_addr[2] << 24) |
2735 (tp->dev->dev_addr[3] << 16) |
2736 (tp->dev->dev_addr[4] << 8) |
2737 (tp->dev->dev_addr[5] << 0));
2738 for (i = 0; i < 4; i++) {
2739 if (i == 1 && skip_mac_1)
2740 continue;
2741 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2742 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2747 for (i = 0; i < 12; i++) {
2748 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2749 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2753 addr_high = (tp->dev->dev_addr[0] +
2754 tp->dev->dev_addr[1] +
2755 tp->dev->dev_addr[2] +
2756 tp->dev->dev_addr[3] +
2757 tp->dev->dev_addr[4] +
2758 tp->dev->dev_addr[5]) &
2759 TX_BACKOFF_SEED_MASK;
2760 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2763 static void tg3_enable_register_access(struct tg3 *tp)
2766 * Make sure register accesses (indirect or otherwise) will function
2767 * correctly.
2769 pci_write_config_dword(tp->pdev,
2770 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2773 static int tg3_power_up(struct tg3 *tp)
2775 int err;
2777 tg3_enable_register_access(tp);
2779 err = pci_set_power_state(tp->pdev, PCI_D0);
2780 if (!err) {
2781 /* Switch out of Vaux if it is a NIC */
2782 tg3_pwrsrc_switch_to_vmain(tp);
2783 } else {
2784 netdev_err(tp->dev, "Transition to D0 failed\n");
2787 return err;
2790 static int tg3_power_down_prepare(struct tg3 *tp)
2792 u32 misc_host_ctrl;
2793 bool device_should_wake, do_low_power;
2795 tg3_enable_register_access(tp);
2797 /* Restore the CLKREQ setting. */
2798 if (tg3_flag(tp, CLKREQ_BUG)) {
2799 u16 lnkctl;
2801 pci_read_config_word(tp->pdev,
2802 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2803 &lnkctl);
2804 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2805 pci_write_config_word(tp->pdev,
2806 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2807 lnkctl);
2810 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2811 tw32(TG3PCI_MISC_HOST_CTRL,
2812 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2814 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2815 tg3_flag(tp, WOL_ENABLE);
2817 if (tg3_flag(tp, USE_PHYLIB)) {
2818 do_low_power = false;
2819 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2820 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2821 struct phy_device *phydev;
2822 u32 phyid, advertising;
2824 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2826 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2828 tp->link_config.orig_speed = phydev->speed;
2829 tp->link_config.orig_duplex = phydev->duplex;
2830 tp->link_config.orig_autoneg = phydev->autoneg;
2831 tp->link_config.orig_advertising = phydev->advertising;
2833 advertising = ADVERTISED_TP |
2834 ADVERTISED_Pause |
2835 ADVERTISED_Autoneg |
2836 ADVERTISED_10baseT_Half;
2838 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2839 if (tg3_flag(tp, WOL_SPEED_100MB))
2840 advertising |=
2841 ADVERTISED_100baseT_Half |
2842 ADVERTISED_100baseT_Full |
2843 ADVERTISED_10baseT_Full;
2844 else
2845 advertising |= ADVERTISED_10baseT_Full;
2848 phydev->advertising = advertising;
2850 phy_start_aneg(phydev);
2852 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2853 if (phyid != PHY_ID_BCMAC131) {
2854 phyid &= PHY_BCM_OUI_MASK;
2855 if (phyid == PHY_BCM_OUI_1 ||
2856 phyid == PHY_BCM_OUI_2 ||
2857 phyid == PHY_BCM_OUI_3)
2858 do_low_power = true;
2861 } else {
2862 do_low_power = true;
2864 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2865 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2866 tp->link_config.orig_speed = tp->link_config.speed;
2867 tp->link_config.orig_duplex = tp->link_config.duplex;
2868 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2871 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2872 tp->link_config.speed = SPEED_10;
2873 tp->link_config.duplex = DUPLEX_HALF;
2874 tp->link_config.autoneg = AUTONEG_ENABLE;
2875 tg3_setup_phy(tp, 0);
2879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2880 u32 val;
2882 val = tr32(GRC_VCPU_EXT_CTRL);
2883 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2884 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2885 int i;
2886 u32 val;
2888 for (i = 0; i < 200; i++) {
2889 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2890 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2891 break;
2892 msleep(1);
2895 if (tg3_flag(tp, WOL_CAP))
2896 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2897 WOL_DRV_STATE_SHUTDOWN |
2898 WOL_DRV_WOL |
2899 WOL_SET_MAGIC_PKT);
2901 if (device_should_wake) {
2902 u32 mac_mode;
2904 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2905 if (do_low_power &&
2906 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2907 tg3_phy_auxctl_write(tp,
2908 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2909 MII_TG3_AUXCTL_PCTL_WOL_EN |
2910 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2911 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2912 udelay(40);
2915 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2916 mac_mode = MAC_MODE_PORT_MODE_GMII;
2917 else
2918 mac_mode = MAC_MODE_PORT_MODE_MII;
2920 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2921 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2922 ASIC_REV_5700) {
2923 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2924 SPEED_100 : SPEED_10;
2925 if (tg3_5700_link_polarity(tp, speed))
2926 mac_mode |= MAC_MODE_LINK_POLARITY;
2927 else
2928 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2930 } else {
2931 mac_mode = MAC_MODE_PORT_MODE_TBI;
2934 if (!tg3_flag(tp, 5750_PLUS))
2935 tw32(MAC_LED_CTRL, tp->led_ctrl);
2937 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2938 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2939 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2940 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2942 if (tg3_flag(tp, ENABLE_APE))
2943 mac_mode |= MAC_MODE_APE_TX_EN |
2944 MAC_MODE_APE_RX_EN |
2945 MAC_MODE_TDE_ENABLE;
2947 tw32_f(MAC_MODE, mac_mode);
2948 udelay(100);
2950 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2951 udelay(10);
2954 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2955 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2957 u32 base_val;
2959 base_val = tp->pci_clock_ctrl;
2960 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2961 CLOCK_CTRL_TXCLK_DISABLE);
2963 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2964 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2965 } else if (tg3_flag(tp, 5780_CLASS) ||
2966 tg3_flag(tp, CPMU_PRESENT) ||
2967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2968 /* do nothing */
2969 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2970 u32 newbits1, newbits2;
2972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2974 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2975 CLOCK_CTRL_TXCLK_DISABLE |
2976 CLOCK_CTRL_ALTCLK);
2977 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2978 } else if (tg3_flag(tp, 5705_PLUS)) {
2979 newbits1 = CLOCK_CTRL_625_CORE;
2980 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2981 } else {
2982 newbits1 = CLOCK_CTRL_ALTCLK;
2983 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2986 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2987 40);
2989 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2990 40);
2992 if (!tg3_flag(tp, 5705_PLUS)) {
2993 u32 newbits3;
2995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2996 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2997 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2998 CLOCK_CTRL_TXCLK_DISABLE |
2999 CLOCK_CTRL_44MHZ_CORE);
3000 } else {
3001 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3004 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3005 tp->pci_clock_ctrl | newbits3, 40);
3009 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3010 tg3_power_down_phy(tp, do_low_power);
3012 tg3_frob_aux_power(tp, true);
3014 /* Workaround for unstable PLL clock */
3015 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3016 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3017 u32 val = tr32(0x7d00);
3019 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3020 tw32(0x7d00, val);
3021 if (!tg3_flag(tp, ENABLE_ASF)) {
3022 int err;
3024 err = tg3_nvram_lock(tp);
3025 tg3_halt_cpu(tp, RX_CPU_BASE);
3026 if (!err)
3027 tg3_nvram_unlock(tp);
3031 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3033 return 0;
3036 static void tg3_power_down(struct tg3 *tp)
3038 tg3_power_down_prepare(tp);
3040 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3041 pci_set_power_state(tp->pdev, PCI_D3hot);
3044 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3046 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3047 case MII_TG3_AUX_STAT_10HALF:
3048 *speed = SPEED_10;
3049 *duplex = DUPLEX_HALF;
3050 break;
3052 case MII_TG3_AUX_STAT_10FULL:
3053 *speed = SPEED_10;
3054 *duplex = DUPLEX_FULL;
3055 break;
3057 case MII_TG3_AUX_STAT_100HALF:
3058 *speed = SPEED_100;
3059 *duplex = DUPLEX_HALF;
3060 break;
3062 case MII_TG3_AUX_STAT_100FULL:
3063 *speed = SPEED_100;
3064 *duplex = DUPLEX_FULL;
3065 break;
3067 case MII_TG3_AUX_STAT_1000HALF:
3068 *speed = SPEED_1000;
3069 *duplex = DUPLEX_HALF;
3070 break;
3072 case MII_TG3_AUX_STAT_1000FULL:
3073 *speed = SPEED_1000;
3074 *duplex = DUPLEX_FULL;
3075 break;
3077 default:
3078 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3079 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3080 SPEED_10;
3081 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3082 DUPLEX_HALF;
3083 break;
3085 *speed = SPEED_INVALID;
3086 *duplex = DUPLEX_INVALID;
3087 break;
3091 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3093 int err = 0;
3094 u32 val, new_adv;
3096 new_adv = ADVERTISE_CSMA;
3097 if (advertise & ADVERTISED_10baseT_Half)
3098 new_adv |= ADVERTISE_10HALF;
3099 if (advertise & ADVERTISED_10baseT_Full)
3100 new_adv |= ADVERTISE_10FULL;
3101 if (advertise & ADVERTISED_100baseT_Half)
3102 new_adv |= ADVERTISE_100HALF;
3103 if (advertise & ADVERTISED_100baseT_Full)
3104 new_adv |= ADVERTISE_100FULL;
3106 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3108 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3109 if (err)
3110 goto done;
3112 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3113 goto done;
3115 new_adv = 0;
3116 if (advertise & ADVERTISED_1000baseT_Half)
3117 new_adv |= ADVERTISE_1000HALF;
3118 if (advertise & ADVERTISED_1000baseT_Full)
3119 new_adv |= ADVERTISE_1000FULL;
3121 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3122 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3123 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3125 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3126 if (err)
3127 goto done;
3129 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3130 goto done;
3132 tw32(TG3_CPMU_EEE_MODE,
3133 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3135 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3136 if (!err) {
3137 u32 err2;
3139 val = 0;
3140 /* Advertise 100-BaseTX EEE ability */
3141 if (advertise & ADVERTISED_100baseT_Full)
3142 val |= MDIO_AN_EEE_ADV_100TX;
3143 /* Advertise 1000-BaseT EEE ability */
3144 if (advertise & ADVERTISED_1000baseT_Full)
3145 val |= MDIO_AN_EEE_ADV_1000T;
3146 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3147 if (err)
3148 val = 0;
3150 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3151 case ASIC_REV_5717:
3152 case ASIC_REV_57765:
3153 case ASIC_REV_5719:
3154 /* If we advertised any eee advertisements above... */
3155 if (val)
3156 val = MII_TG3_DSP_TAP26_ALNOKO |
3157 MII_TG3_DSP_TAP26_RMRXSTO |
3158 MII_TG3_DSP_TAP26_OPCSINPT;
3159 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3160 /* Fall through */
3161 case ASIC_REV_5720:
3162 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3163 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3164 MII_TG3_DSP_CH34TP2_HIBW01);
3167 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3168 if (!err)
3169 err = err2;
3172 done:
3173 return err;
3176 static void tg3_phy_copper_begin(struct tg3 *tp)
3178 u32 new_adv;
3179 int i;
3181 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3182 new_adv = ADVERTISED_10baseT_Half |
3183 ADVERTISED_10baseT_Full;
3184 if (tg3_flag(tp, WOL_SPEED_100MB))
3185 new_adv |= ADVERTISED_100baseT_Half |
3186 ADVERTISED_100baseT_Full;
3188 tg3_phy_autoneg_cfg(tp, new_adv,
3189 FLOW_CTRL_TX | FLOW_CTRL_RX);
3190 } else if (tp->link_config.speed == SPEED_INVALID) {
3191 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3192 tp->link_config.advertising &=
3193 ~(ADVERTISED_1000baseT_Half |
3194 ADVERTISED_1000baseT_Full);
3196 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3197 tp->link_config.flowctrl);
3198 } else {
3199 /* Asking for a specific link mode. */
3200 if (tp->link_config.speed == SPEED_1000) {
3201 if (tp->link_config.duplex == DUPLEX_FULL)
3202 new_adv = ADVERTISED_1000baseT_Full;
3203 else
3204 new_adv = ADVERTISED_1000baseT_Half;
3205 } else if (tp->link_config.speed == SPEED_100) {
3206 if (tp->link_config.duplex == DUPLEX_FULL)
3207 new_adv = ADVERTISED_100baseT_Full;
3208 else
3209 new_adv = ADVERTISED_100baseT_Half;
3210 } else {
3211 if (tp->link_config.duplex == DUPLEX_FULL)
3212 new_adv = ADVERTISED_10baseT_Full;
3213 else
3214 new_adv = ADVERTISED_10baseT_Half;
3217 tg3_phy_autoneg_cfg(tp, new_adv,
3218 tp->link_config.flowctrl);
3221 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3222 tp->link_config.speed != SPEED_INVALID) {
3223 u32 bmcr, orig_bmcr;
3225 tp->link_config.active_speed = tp->link_config.speed;
3226 tp->link_config.active_duplex = tp->link_config.duplex;
3228 bmcr = 0;
3229 switch (tp->link_config.speed) {
3230 default:
3231 case SPEED_10:
3232 break;
3234 case SPEED_100:
3235 bmcr |= BMCR_SPEED100;
3236 break;
3238 case SPEED_1000:
3239 bmcr |= BMCR_SPEED1000;
3240 break;
3243 if (tp->link_config.duplex == DUPLEX_FULL)
3244 bmcr |= BMCR_FULLDPLX;
3246 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3247 (bmcr != orig_bmcr)) {
3248 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3249 for (i = 0; i < 1500; i++) {
3250 u32 tmp;
3252 udelay(10);
3253 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3254 tg3_readphy(tp, MII_BMSR, &tmp))
3255 continue;
3256 if (!(tmp & BMSR_LSTATUS)) {
3257 udelay(40);
3258 break;
3261 tg3_writephy(tp, MII_BMCR, bmcr);
3262 udelay(40);
3264 } else {
3265 tg3_writephy(tp, MII_BMCR,
3266 BMCR_ANENABLE | BMCR_ANRESTART);
3270 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3272 int err;
3274 /* Turn off tap power management. */
3275 /* Set Extended packet length bit */
3276 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3278 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3279 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3280 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3281 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3282 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3284 udelay(40);
3286 return err;
3289 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3291 u32 adv_reg, all_mask = 0;
3293 if (mask & ADVERTISED_10baseT_Half)
3294 all_mask |= ADVERTISE_10HALF;
3295 if (mask & ADVERTISED_10baseT_Full)
3296 all_mask |= ADVERTISE_10FULL;
3297 if (mask & ADVERTISED_100baseT_Half)
3298 all_mask |= ADVERTISE_100HALF;
3299 if (mask & ADVERTISED_100baseT_Full)
3300 all_mask |= ADVERTISE_100FULL;
3302 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3303 return 0;
3305 if ((adv_reg & all_mask) != all_mask)
3306 return 0;
3307 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3308 u32 tg3_ctrl;
3310 all_mask = 0;
3311 if (mask & ADVERTISED_1000baseT_Half)
3312 all_mask |= ADVERTISE_1000HALF;
3313 if (mask & ADVERTISED_1000baseT_Full)
3314 all_mask |= ADVERTISE_1000FULL;
3316 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3317 return 0;
3319 if ((tg3_ctrl & all_mask) != all_mask)
3320 return 0;
3322 return 1;
3325 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3327 u32 curadv, reqadv;
3329 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3330 return 1;
3332 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3333 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3335 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3336 if (curadv != reqadv)
3337 return 0;
3339 if (tg3_flag(tp, PAUSE_AUTONEG))
3340 tg3_readphy(tp, MII_LPA, rmtadv);
3341 } else {
3342 /* Reprogram the advertisement register, even if it
3343 * does not affect the current link. If the link
3344 * gets renegotiated in the future, we can save an
3345 * additional renegotiation cycle by advertising
3346 * it correctly in the first place.
3348 if (curadv != reqadv) {
3349 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3350 ADVERTISE_PAUSE_ASYM);
3351 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3355 return 1;
3358 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3360 int current_link_up;
3361 u32 bmsr, val;
3362 u32 lcl_adv, rmt_adv;
3363 u16 current_speed;
3364 u8 current_duplex;
3365 int i, err;
3367 tw32(MAC_EVENT, 0);
3369 tw32_f(MAC_STATUS,
3370 (MAC_STATUS_SYNC_CHANGED |
3371 MAC_STATUS_CFG_CHANGED |
3372 MAC_STATUS_MI_COMPLETION |
3373 MAC_STATUS_LNKSTATE_CHANGED));
3374 udelay(40);
3376 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3377 tw32_f(MAC_MI_MODE,
3378 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3379 udelay(80);
3382 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3384 /* Some third-party PHYs need to be reset on link going
3385 * down.
3387 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3390 netif_carrier_ok(tp->dev)) {
3391 tg3_readphy(tp, MII_BMSR, &bmsr);
3392 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3393 !(bmsr & BMSR_LSTATUS))
3394 force_reset = 1;
3396 if (force_reset)
3397 tg3_phy_reset(tp);
3399 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3400 tg3_readphy(tp, MII_BMSR, &bmsr);
3401 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3402 !tg3_flag(tp, INIT_COMPLETE))
3403 bmsr = 0;
3405 if (!(bmsr & BMSR_LSTATUS)) {
3406 err = tg3_init_5401phy_dsp(tp);
3407 if (err)
3408 return err;
3410 tg3_readphy(tp, MII_BMSR, &bmsr);
3411 for (i = 0; i < 1000; i++) {
3412 udelay(10);
3413 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3414 (bmsr & BMSR_LSTATUS)) {
3415 udelay(40);
3416 break;
3420 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3421 TG3_PHY_REV_BCM5401_B0 &&
3422 !(bmsr & BMSR_LSTATUS) &&
3423 tp->link_config.active_speed == SPEED_1000) {
3424 err = tg3_phy_reset(tp);
3425 if (!err)
3426 err = tg3_init_5401phy_dsp(tp);
3427 if (err)
3428 return err;
3431 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3432 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3433 /* 5701 {A0,B0} CRC bug workaround */
3434 tg3_writephy(tp, 0x15, 0x0a75);
3435 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3436 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3437 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3440 /* Clear pending interrupts... */
3441 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3442 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3444 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3445 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3446 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3447 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3452 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3453 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3454 else
3455 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3458 current_link_up = 0;
3459 current_speed = SPEED_INVALID;
3460 current_duplex = DUPLEX_INVALID;
3462 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3463 err = tg3_phy_auxctl_read(tp,
3464 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3465 &val);
3466 if (!err && !(val & (1 << 10))) {
3467 tg3_phy_auxctl_write(tp,
3468 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3469 val | (1 << 10));
3470 goto relink;
3474 bmsr = 0;
3475 for (i = 0; i < 100; i++) {
3476 tg3_readphy(tp, MII_BMSR, &bmsr);
3477 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3478 (bmsr & BMSR_LSTATUS))
3479 break;
3480 udelay(40);
3483 if (bmsr & BMSR_LSTATUS) {
3484 u32 aux_stat, bmcr;
3486 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3487 for (i = 0; i < 2000; i++) {
3488 udelay(10);
3489 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3490 aux_stat)
3491 break;
3494 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3495 &current_speed,
3496 &current_duplex);
3498 bmcr = 0;
3499 for (i = 0; i < 200; i++) {
3500 tg3_readphy(tp, MII_BMCR, &bmcr);
3501 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3502 continue;
3503 if (bmcr && bmcr != 0x7fff)
3504 break;
3505 udelay(10);
3508 lcl_adv = 0;
3509 rmt_adv = 0;
3511 tp->link_config.active_speed = current_speed;
3512 tp->link_config.active_duplex = current_duplex;
3514 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3515 if ((bmcr & BMCR_ANENABLE) &&
3516 tg3_copper_is_advertising_all(tp,
3517 tp->link_config.advertising)) {
3518 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3519 &rmt_adv))
3520 current_link_up = 1;
3522 } else {
3523 if (!(bmcr & BMCR_ANENABLE) &&
3524 tp->link_config.speed == current_speed &&
3525 tp->link_config.duplex == current_duplex &&
3526 tp->link_config.flowctrl ==
3527 tp->link_config.active_flowctrl) {
3528 current_link_up = 1;
3532 if (current_link_up == 1 &&
3533 tp->link_config.active_duplex == DUPLEX_FULL)
3534 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3537 relink:
3538 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3539 tg3_phy_copper_begin(tp);
3541 tg3_readphy(tp, MII_BMSR, &bmsr);
3542 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3543 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3544 current_link_up = 1;
3547 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3548 if (current_link_up == 1) {
3549 if (tp->link_config.active_speed == SPEED_100 ||
3550 tp->link_config.active_speed == SPEED_10)
3551 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3552 else
3553 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3554 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3555 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3556 else
3557 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3559 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3560 if (tp->link_config.active_duplex == DUPLEX_HALF)
3561 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3564 if (current_link_up == 1 &&
3565 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3566 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3567 else
3568 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3571 /* ??? Without this setting Netgear GA302T PHY does not
3572 * ??? send/receive packets...
3574 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3575 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3576 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3577 tw32_f(MAC_MI_MODE, tp->mi_mode);
3578 udelay(80);
3581 tw32_f(MAC_MODE, tp->mac_mode);
3582 udelay(40);
3584 tg3_phy_eee_adjust(tp, current_link_up);
3586 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3587 /* Polled via timer. */
3588 tw32_f(MAC_EVENT, 0);
3589 } else {
3590 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3592 udelay(40);
3594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3595 current_link_up == 1 &&
3596 tp->link_config.active_speed == SPEED_1000 &&
3597 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3598 udelay(120);
3599 tw32_f(MAC_STATUS,
3600 (MAC_STATUS_SYNC_CHANGED |
3601 MAC_STATUS_CFG_CHANGED));
3602 udelay(40);
3603 tg3_write_mem(tp,
3604 NIC_SRAM_FIRMWARE_MBOX,
3605 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3608 /* Prevent send BD corruption. */
3609 if (tg3_flag(tp, CLKREQ_BUG)) {
3610 u16 oldlnkctl, newlnkctl;
3612 pci_read_config_word(tp->pdev,
3613 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3614 &oldlnkctl);
3615 if (tp->link_config.active_speed == SPEED_100 ||
3616 tp->link_config.active_speed == SPEED_10)
3617 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3618 else
3619 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3620 if (newlnkctl != oldlnkctl)
3621 pci_write_config_word(tp->pdev,
3622 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3623 newlnkctl);
3626 if (current_link_up != netif_carrier_ok(tp->dev)) {
3627 if (current_link_up)
3628 netif_carrier_on(tp->dev);
3629 else
3630 netif_carrier_off(tp->dev);
3631 tg3_link_report(tp);
3634 return 0;
3637 struct tg3_fiber_aneginfo {
3638 int state;
3639 #define ANEG_STATE_UNKNOWN 0
3640 #define ANEG_STATE_AN_ENABLE 1
3641 #define ANEG_STATE_RESTART_INIT 2
3642 #define ANEG_STATE_RESTART 3
3643 #define ANEG_STATE_DISABLE_LINK_OK 4
3644 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3645 #define ANEG_STATE_ABILITY_DETECT 6
3646 #define ANEG_STATE_ACK_DETECT_INIT 7
3647 #define ANEG_STATE_ACK_DETECT 8
3648 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3649 #define ANEG_STATE_COMPLETE_ACK 10
3650 #define ANEG_STATE_IDLE_DETECT_INIT 11
3651 #define ANEG_STATE_IDLE_DETECT 12
3652 #define ANEG_STATE_LINK_OK 13
3653 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3654 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3656 u32 flags;
3657 #define MR_AN_ENABLE 0x00000001
3658 #define MR_RESTART_AN 0x00000002
3659 #define MR_AN_COMPLETE 0x00000004
3660 #define MR_PAGE_RX 0x00000008
3661 #define MR_NP_LOADED 0x00000010
3662 #define MR_TOGGLE_TX 0x00000020
3663 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3664 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3665 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3666 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3667 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3668 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3669 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3670 #define MR_TOGGLE_RX 0x00002000
3671 #define MR_NP_RX 0x00004000
3673 #define MR_LINK_OK 0x80000000
3675 unsigned long link_time, cur_time;
3677 u32 ability_match_cfg;
3678 int ability_match_count;
3680 char ability_match, idle_match, ack_match;
3682 u32 txconfig, rxconfig;
3683 #define ANEG_CFG_NP 0x00000080
3684 #define ANEG_CFG_ACK 0x00000040
3685 #define ANEG_CFG_RF2 0x00000020
3686 #define ANEG_CFG_RF1 0x00000010
3687 #define ANEG_CFG_PS2 0x00000001
3688 #define ANEG_CFG_PS1 0x00008000
3689 #define ANEG_CFG_HD 0x00004000
3690 #define ANEG_CFG_FD 0x00002000
3691 #define ANEG_CFG_INVAL 0x00001f06
3694 #define ANEG_OK 0
3695 #define ANEG_DONE 1
3696 #define ANEG_TIMER_ENAB 2
3697 #define ANEG_FAILED -1
3699 #define ANEG_STATE_SETTLE_TIME 10000
3701 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3702 struct tg3_fiber_aneginfo *ap)
3704 u16 flowctrl;
3705 unsigned long delta;
3706 u32 rx_cfg_reg;
3707 int ret;
3709 if (ap->state == ANEG_STATE_UNKNOWN) {
3710 ap->rxconfig = 0;
3711 ap->link_time = 0;
3712 ap->cur_time = 0;
3713 ap->ability_match_cfg = 0;
3714 ap->ability_match_count = 0;
3715 ap->ability_match = 0;
3716 ap->idle_match = 0;
3717 ap->ack_match = 0;
3719 ap->cur_time++;
3721 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3722 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3724 if (rx_cfg_reg != ap->ability_match_cfg) {
3725 ap->ability_match_cfg = rx_cfg_reg;
3726 ap->ability_match = 0;
3727 ap->ability_match_count = 0;
3728 } else {
3729 if (++ap->ability_match_count > 1) {
3730 ap->ability_match = 1;
3731 ap->ability_match_cfg = rx_cfg_reg;
3734 if (rx_cfg_reg & ANEG_CFG_ACK)
3735 ap->ack_match = 1;
3736 else
3737 ap->ack_match = 0;
3739 ap->idle_match = 0;
3740 } else {
3741 ap->idle_match = 1;
3742 ap->ability_match_cfg = 0;
3743 ap->ability_match_count = 0;
3744 ap->ability_match = 0;
3745 ap->ack_match = 0;
3747 rx_cfg_reg = 0;
3750 ap->rxconfig = rx_cfg_reg;
3751 ret = ANEG_OK;
3753 switch (ap->state) {
3754 case ANEG_STATE_UNKNOWN:
3755 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3756 ap->state = ANEG_STATE_AN_ENABLE;
3758 /* fallthru */
3759 case ANEG_STATE_AN_ENABLE:
3760 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3761 if (ap->flags & MR_AN_ENABLE) {
3762 ap->link_time = 0;
3763 ap->cur_time = 0;
3764 ap->ability_match_cfg = 0;
3765 ap->ability_match_count = 0;
3766 ap->ability_match = 0;
3767 ap->idle_match = 0;
3768 ap->ack_match = 0;
3770 ap->state = ANEG_STATE_RESTART_INIT;
3771 } else {
3772 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3774 break;
3776 case ANEG_STATE_RESTART_INIT:
3777 ap->link_time = ap->cur_time;
3778 ap->flags &= ~(MR_NP_LOADED);
3779 ap->txconfig = 0;
3780 tw32(MAC_TX_AUTO_NEG, 0);
3781 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3782 tw32_f(MAC_MODE, tp->mac_mode);
3783 udelay(40);
3785 ret = ANEG_TIMER_ENAB;
3786 ap->state = ANEG_STATE_RESTART;
3788 /* fallthru */
3789 case ANEG_STATE_RESTART:
3790 delta = ap->cur_time - ap->link_time;
3791 if (delta > ANEG_STATE_SETTLE_TIME)
3792 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3793 else
3794 ret = ANEG_TIMER_ENAB;
3795 break;
3797 case ANEG_STATE_DISABLE_LINK_OK:
3798 ret = ANEG_DONE;
3799 break;
3801 case ANEG_STATE_ABILITY_DETECT_INIT:
3802 ap->flags &= ~(MR_TOGGLE_TX);
3803 ap->txconfig = ANEG_CFG_FD;
3804 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3805 if (flowctrl & ADVERTISE_1000XPAUSE)
3806 ap->txconfig |= ANEG_CFG_PS1;
3807 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3808 ap->txconfig |= ANEG_CFG_PS2;
3809 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3810 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3811 tw32_f(MAC_MODE, tp->mac_mode);
3812 udelay(40);
3814 ap->state = ANEG_STATE_ABILITY_DETECT;
3815 break;
3817 case ANEG_STATE_ABILITY_DETECT:
3818 if (ap->ability_match != 0 && ap->rxconfig != 0)
3819 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3820 break;
3822 case ANEG_STATE_ACK_DETECT_INIT:
3823 ap->txconfig |= ANEG_CFG_ACK;
3824 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3825 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3826 tw32_f(MAC_MODE, tp->mac_mode);
3827 udelay(40);
3829 ap->state = ANEG_STATE_ACK_DETECT;
3831 /* fallthru */
3832 case ANEG_STATE_ACK_DETECT:
3833 if (ap->ack_match != 0) {
3834 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3835 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3836 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3837 } else {
3838 ap->state = ANEG_STATE_AN_ENABLE;
3840 } else if (ap->ability_match != 0 &&
3841 ap->rxconfig == 0) {
3842 ap->state = ANEG_STATE_AN_ENABLE;
3844 break;
3846 case ANEG_STATE_COMPLETE_ACK_INIT:
3847 if (ap->rxconfig & ANEG_CFG_INVAL) {
3848 ret = ANEG_FAILED;
3849 break;
3851 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3852 MR_LP_ADV_HALF_DUPLEX |
3853 MR_LP_ADV_SYM_PAUSE |
3854 MR_LP_ADV_ASYM_PAUSE |
3855 MR_LP_ADV_REMOTE_FAULT1 |
3856 MR_LP_ADV_REMOTE_FAULT2 |
3857 MR_LP_ADV_NEXT_PAGE |
3858 MR_TOGGLE_RX |
3859 MR_NP_RX);
3860 if (ap->rxconfig & ANEG_CFG_FD)
3861 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3862 if (ap->rxconfig & ANEG_CFG_HD)
3863 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3864 if (ap->rxconfig & ANEG_CFG_PS1)
3865 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3866 if (ap->rxconfig & ANEG_CFG_PS2)
3867 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3868 if (ap->rxconfig & ANEG_CFG_RF1)
3869 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3870 if (ap->rxconfig & ANEG_CFG_RF2)
3871 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3872 if (ap->rxconfig & ANEG_CFG_NP)
3873 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3875 ap->link_time = ap->cur_time;
3877 ap->flags ^= (MR_TOGGLE_TX);
3878 if (ap->rxconfig & 0x0008)
3879 ap->flags |= MR_TOGGLE_RX;
3880 if (ap->rxconfig & ANEG_CFG_NP)
3881 ap->flags |= MR_NP_RX;
3882 ap->flags |= MR_PAGE_RX;
3884 ap->state = ANEG_STATE_COMPLETE_ACK;
3885 ret = ANEG_TIMER_ENAB;
3886 break;
3888 case ANEG_STATE_COMPLETE_ACK:
3889 if (ap->ability_match != 0 &&
3890 ap->rxconfig == 0) {
3891 ap->state = ANEG_STATE_AN_ENABLE;
3892 break;
3894 delta = ap->cur_time - ap->link_time;
3895 if (delta > ANEG_STATE_SETTLE_TIME) {
3896 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3897 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3898 } else {
3899 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3900 !(ap->flags & MR_NP_RX)) {
3901 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3902 } else {
3903 ret = ANEG_FAILED;
3907 break;
3909 case ANEG_STATE_IDLE_DETECT_INIT:
3910 ap->link_time = ap->cur_time;
3911 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3912 tw32_f(MAC_MODE, tp->mac_mode);
3913 udelay(40);
3915 ap->state = ANEG_STATE_IDLE_DETECT;
3916 ret = ANEG_TIMER_ENAB;
3917 break;
3919 case ANEG_STATE_IDLE_DETECT:
3920 if (ap->ability_match != 0 &&
3921 ap->rxconfig == 0) {
3922 ap->state = ANEG_STATE_AN_ENABLE;
3923 break;
3925 delta = ap->cur_time - ap->link_time;
3926 if (delta > ANEG_STATE_SETTLE_TIME) {
3927 /* XXX another gem from the Broadcom driver :( */
3928 ap->state = ANEG_STATE_LINK_OK;
3930 break;
3932 case ANEG_STATE_LINK_OK:
3933 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3934 ret = ANEG_DONE;
3935 break;
3937 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3938 /* ??? unimplemented */
3939 break;
3941 case ANEG_STATE_NEXT_PAGE_WAIT:
3942 /* ??? unimplemented */
3943 break;
3945 default:
3946 ret = ANEG_FAILED;
3947 break;
3950 return ret;
3953 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3955 int res = 0;
3956 struct tg3_fiber_aneginfo aninfo;
3957 int status = ANEG_FAILED;
3958 unsigned int tick;
3959 u32 tmp;
3961 tw32_f(MAC_TX_AUTO_NEG, 0);
3963 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3964 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3965 udelay(40);
3967 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3968 udelay(40);
3970 memset(&aninfo, 0, sizeof(aninfo));
3971 aninfo.flags |= MR_AN_ENABLE;
3972 aninfo.state = ANEG_STATE_UNKNOWN;
3973 aninfo.cur_time = 0;
3974 tick = 0;
3975 while (++tick < 195000) {
3976 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3977 if (status == ANEG_DONE || status == ANEG_FAILED)
3978 break;
3980 udelay(1);
3983 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3984 tw32_f(MAC_MODE, tp->mac_mode);
3985 udelay(40);
3987 *txflags = aninfo.txconfig;
3988 *rxflags = aninfo.flags;
3990 if (status == ANEG_DONE &&
3991 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3992 MR_LP_ADV_FULL_DUPLEX)))
3993 res = 1;
3995 return res;
3998 static void tg3_init_bcm8002(struct tg3 *tp)
4000 u32 mac_status = tr32(MAC_STATUS);
4001 int i;
4003 /* Reset when initting first time or we have a link. */
4004 if (tg3_flag(tp, INIT_COMPLETE) &&
4005 !(mac_status & MAC_STATUS_PCS_SYNCED))
4006 return;
4008 /* Set PLL lock range. */
4009 tg3_writephy(tp, 0x16, 0x8007);
4011 /* SW reset */
4012 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4014 /* Wait for reset to complete. */
4015 /* XXX schedule_timeout() ... */
4016 for (i = 0; i < 500; i++)
4017 udelay(10);
4019 /* Config mode; select PMA/Ch 1 regs. */
4020 tg3_writephy(tp, 0x10, 0x8411);
4022 /* Enable auto-lock and comdet, select txclk for tx. */
4023 tg3_writephy(tp, 0x11, 0x0a10);
4025 tg3_writephy(tp, 0x18, 0x00a0);
4026 tg3_writephy(tp, 0x16, 0x41ff);
4028 /* Assert and deassert POR. */
4029 tg3_writephy(tp, 0x13, 0x0400);
4030 udelay(40);
4031 tg3_writephy(tp, 0x13, 0x0000);
4033 tg3_writephy(tp, 0x11, 0x0a50);
4034 udelay(40);
4035 tg3_writephy(tp, 0x11, 0x0a10);
4037 /* Wait for signal to stabilize */
4038 /* XXX schedule_timeout() ... */
4039 for (i = 0; i < 15000; i++)
4040 udelay(10);
4042 /* Deselect the channel register so we can read the PHYID
4043 * later.
4045 tg3_writephy(tp, 0x10, 0x8011);
4048 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4050 u16 flowctrl;
4051 u32 sg_dig_ctrl, sg_dig_status;
4052 u32 serdes_cfg, expected_sg_dig_ctrl;
4053 int workaround, port_a;
4054 int current_link_up;
4056 serdes_cfg = 0;
4057 expected_sg_dig_ctrl = 0;
4058 workaround = 0;
4059 port_a = 1;
4060 current_link_up = 0;
4062 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4063 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4064 workaround = 1;
4065 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4066 port_a = 0;
4068 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4069 /* preserve bits 20-23 for voltage regulator */
4070 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4073 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4075 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4076 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4077 if (workaround) {
4078 u32 val = serdes_cfg;
4080 if (port_a)
4081 val |= 0xc010000;
4082 else
4083 val |= 0x4010000;
4084 tw32_f(MAC_SERDES_CFG, val);
4087 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4089 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4090 tg3_setup_flow_control(tp, 0, 0);
4091 current_link_up = 1;
4093 goto out;
4096 /* Want auto-negotiation. */
4097 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4099 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4100 if (flowctrl & ADVERTISE_1000XPAUSE)
4101 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4102 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4103 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4105 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4106 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4107 tp->serdes_counter &&
4108 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4109 MAC_STATUS_RCVD_CFG)) ==
4110 MAC_STATUS_PCS_SYNCED)) {
4111 tp->serdes_counter--;
4112 current_link_up = 1;
4113 goto out;
4115 restart_autoneg:
4116 if (workaround)
4117 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4118 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4119 udelay(5);
4120 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4122 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4123 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4124 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4125 MAC_STATUS_SIGNAL_DET)) {
4126 sg_dig_status = tr32(SG_DIG_STATUS);
4127 mac_status = tr32(MAC_STATUS);
4129 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4130 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4131 u32 local_adv = 0, remote_adv = 0;
4133 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4134 local_adv |= ADVERTISE_1000XPAUSE;
4135 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4136 local_adv |= ADVERTISE_1000XPSE_ASYM;
4138 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4139 remote_adv |= LPA_1000XPAUSE;
4140 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4141 remote_adv |= LPA_1000XPAUSE_ASYM;
4143 tg3_setup_flow_control(tp, local_adv, remote_adv);
4144 current_link_up = 1;
4145 tp->serdes_counter = 0;
4146 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4147 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4148 if (tp->serdes_counter)
4149 tp->serdes_counter--;
4150 else {
4151 if (workaround) {
4152 u32 val = serdes_cfg;
4154 if (port_a)
4155 val |= 0xc010000;
4156 else
4157 val |= 0x4010000;
4159 tw32_f(MAC_SERDES_CFG, val);
4162 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4163 udelay(40);
4165 /* Link parallel detection - link is up */
4166 /* only if we have PCS_SYNC and not */
4167 /* receiving config code words */
4168 mac_status = tr32(MAC_STATUS);
4169 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4170 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4171 tg3_setup_flow_control(tp, 0, 0);
4172 current_link_up = 1;
4173 tp->phy_flags |=
4174 TG3_PHYFLG_PARALLEL_DETECT;
4175 tp->serdes_counter =
4176 SERDES_PARALLEL_DET_TIMEOUT;
4177 } else
4178 goto restart_autoneg;
4181 } else {
4182 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4183 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4186 out:
4187 return current_link_up;
4190 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4192 int current_link_up = 0;
4194 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4195 goto out;
4197 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4198 u32 txflags, rxflags;
4199 int i;
4201 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4202 u32 local_adv = 0, remote_adv = 0;
4204 if (txflags & ANEG_CFG_PS1)
4205 local_adv |= ADVERTISE_1000XPAUSE;
4206 if (txflags & ANEG_CFG_PS2)
4207 local_adv |= ADVERTISE_1000XPSE_ASYM;
4209 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4210 remote_adv |= LPA_1000XPAUSE;
4211 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4212 remote_adv |= LPA_1000XPAUSE_ASYM;
4214 tg3_setup_flow_control(tp, local_adv, remote_adv);
4216 current_link_up = 1;
4218 for (i = 0; i < 30; i++) {
4219 udelay(20);
4220 tw32_f(MAC_STATUS,
4221 (MAC_STATUS_SYNC_CHANGED |
4222 MAC_STATUS_CFG_CHANGED));
4223 udelay(40);
4224 if ((tr32(MAC_STATUS) &
4225 (MAC_STATUS_SYNC_CHANGED |
4226 MAC_STATUS_CFG_CHANGED)) == 0)
4227 break;
4230 mac_status = tr32(MAC_STATUS);
4231 if (current_link_up == 0 &&
4232 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4233 !(mac_status & MAC_STATUS_RCVD_CFG))
4234 current_link_up = 1;
4235 } else {
4236 tg3_setup_flow_control(tp, 0, 0);
4238 /* Forcing 1000FD link up. */
4239 current_link_up = 1;
4241 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4242 udelay(40);
4244 tw32_f(MAC_MODE, tp->mac_mode);
4245 udelay(40);
4248 out:
4249 return current_link_up;
4252 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4254 u32 orig_pause_cfg;
4255 u16 orig_active_speed;
4256 u8 orig_active_duplex;
4257 u32 mac_status;
4258 int current_link_up;
4259 int i;
4261 orig_pause_cfg = tp->link_config.active_flowctrl;
4262 orig_active_speed = tp->link_config.active_speed;
4263 orig_active_duplex = tp->link_config.active_duplex;
4265 if (!tg3_flag(tp, HW_AUTONEG) &&
4266 netif_carrier_ok(tp->dev) &&
4267 tg3_flag(tp, INIT_COMPLETE)) {
4268 mac_status = tr32(MAC_STATUS);
4269 mac_status &= (MAC_STATUS_PCS_SYNCED |
4270 MAC_STATUS_SIGNAL_DET |
4271 MAC_STATUS_CFG_CHANGED |
4272 MAC_STATUS_RCVD_CFG);
4273 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4274 MAC_STATUS_SIGNAL_DET)) {
4275 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4276 MAC_STATUS_CFG_CHANGED));
4277 return 0;
4281 tw32_f(MAC_TX_AUTO_NEG, 0);
4283 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4284 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4285 tw32_f(MAC_MODE, tp->mac_mode);
4286 udelay(40);
4288 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4289 tg3_init_bcm8002(tp);
4291 /* Enable link change event even when serdes polling. */
4292 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4293 udelay(40);
4295 current_link_up = 0;
4296 mac_status = tr32(MAC_STATUS);
4298 if (tg3_flag(tp, HW_AUTONEG))
4299 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4300 else
4301 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4303 tp->napi[0].hw_status->status =
4304 (SD_STATUS_UPDATED |
4305 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4307 for (i = 0; i < 100; i++) {
4308 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4309 MAC_STATUS_CFG_CHANGED));
4310 udelay(5);
4311 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4312 MAC_STATUS_CFG_CHANGED |
4313 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4314 break;
4317 mac_status = tr32(MAC_STATUS);
4318 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4319 current_link_up = 0;
4320 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4321 tp->serdes_counter == 0) {
4322 tw32_f(MAC_MODE, (tp->mac_mode |
4323 MAC_MODE_SEND_CONFIGS));
4324 udelay(1);
4325 tw32_f(MAC_MODE, tp->mac_mode);
4329 if (current_link_up == 1) {
4330 tp->link_config.active_speed = SPEED_1000;
4331 tp->link_config.active_duplex = DUPLEX_FULL;
4332 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4333 LED_CTRL_LNKLED_OVERRIDE |
4334 LED_CTRL_1000MBPS_ON));
4335 } else {
4336 tp->link_config.active_speed = SPEED_INVALID;
4337 tp->link_config.active_duplex = DUPLEX_INVALID;
4338 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4339 LED_CTRL_LNKLED_OVERRIDE |
4340 LED_CTRL_TRAFFIC_OVERRIDE));
4343 if (current_link_up != netif_carrier_ok(tp->dev)) {
4344 if (current_link_up)
4345 netif_carrier_on(tp->dev);
4346 else
4347 netif_carrier_off(tp->dev);
4348 tg3_link_report(tp);
4349 } else {
4350 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4351 if (orig_pause_cfg != now_pause_cfg ||
4352 orig_active_speed != tp->link_config.active_speed ||
4353 orig_active_duplex != tp->link_config.active_duplex)
4354 tg3_link_report(tp);
4357 return 0;
4360 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4362 int current_link_up, err = 0;
4363 u32 bmsr, bmcr;
4364 u16 current_speed;
4365 u8 current_duplex;
4366 u32 local_adv, remote_adv;
4368 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4369 tw32_f(MAC_MODE, tp->mac_mode);
4370 udelay(40);
4372 tw32(MAC_EVENT, 0);
4374 tw32_f(MAC_STATUS,
4375 (MAC_STATUS_SYNC_CHANGED |
4376 MAC_STATUS_CFG_CHANGED |
4377 MAC_STATUS_MI_COMPLETION |
4378 MAC_STATUS_LNKSTATE_CHANGED));
4379 udelay(40);
4381 if (force_reset)
4382 tg3_phy_reset(tp);
4384 current_link_up = 0;
4385 current_speed = SPEED_INVALID;
4386 current_duplex = DUPLEX_INVALID;
4388 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4389 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4391 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4392 bmsr |= BMSR_LSTATUS;
4393 else
4394 bmsr &= ~BMSR_LSTATUS;
4397 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4399 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4400 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4401 /* do nothing, just check for link up at the end */
4402 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4403 u32 adv, new_adv;
4405 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4406 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4407 ADVERTISE_1000XPAUSE |
4408 ADVERTISE_1000XPSE_ASYM |
4409 ADVERTISE_SLCT);
4411 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4413 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4414 new_adv |= ADVERTISE_1000XHALF;
4415 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4416 new_adv |= ADVERTISE_1000XFULL;
4418 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4419 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4420 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4421 tg3_writephy(tp, MII_BMCR, bmcr);
4423 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4424 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4425 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4427 return err;
4429 } else {
4430 u32 new_bmcr;
4432 bmcr &= ~BMCR_SPEED1000;
4433 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4435 if (tp->link_config.duplex == DUPLEX_FULL)
4436 new_bmcr |= BMCR_FULLDPLX;
4438 if (new_bmcr != bmcr) {
4439 /* BMCR_SPEED1000 is a reserved bit that needs
4440 * to be set on write.
4442 new_bmcr |= BMCR_SPEED1000;
4444 /* Force a linkdown */
4445 if (netif_carrier_ok(tp->dev)) {
4446 u32 adv;
4448 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4449 adv &= ~(ADVERTISE_1000XFULL |
4450 ADVERTISE_1000XHALF |
4451 ADVERTISE_SLCT);
4452 tg3_writephy(tp, MII_ADVERTISE, adv);
4453 tg3_writephy(tp, MII_BMCR, bmcr |
4454 BMCR_ANRESTART |
4455 BMCR_ANENABLE);
4456 udelay(10);
4457 netif_carrier_off(tp->dev);
4459 tg3_writephy(tp, MII_BMCR, new_bmcr);
4460 bmcr = new_bmcr;
4461 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4462 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4463 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4464 ASIC_REV_5714) {
4465 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4466 bmsr |= BMSR_LSTATUS;
4467 else
4468 bmsr &= ~BMSR_LSTATUS;
4470 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4474 if (bmsr & BMSR_LSTATUS) {
4475 current_speed = SPEED_1000;
4476 current_link_up = 1;
4477 if (bmcr & BMCR_FULLDPLX)
4478 current_duplex = DUPLEX_FULL;
4479 else
4480 current_duplex = DUPLEX_HALF;
4482 local_adv = 0;
4483 remote_adv = 0;
4485 if (bmcr & BMCR_ANENABLE) {
4486 u32 common;
4488 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4489 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4490 common = local_adv & remote_adv;
4491 if (common & (ADVERTISE_1000XHALF |
4492 ADVERTISE_1000XFULL)) {
4493 if (common & ADVERTISE_1000XFULL)
4494 current_duplex = DUPLEX_FULL;
4495 else
4496 current_duplex = DUPLEX_HALF;
4497 } else if (!tg3_flag(tp, 5780_CLASS)) {
4498 /* Link is up via parallel detect */
4499 } else {
4500 current_link_up = 0;
4505 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4506 tg3_setup_flow_control(tp, local_adv, remote_adv);
4508 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4509 if (tp->link_config.active_duplex == DUPLEX_HALF)
4510 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4512 tw32_f(MAC_MODE, tp->mac_mode);
4513 udelay(40);
4515 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4517 tp->link_config.active_speed = current_speed;
4518 tp->link_config.active_duplex = current_duplex;
4520 if (current_link_up != netif_carrier_ok(tp->dev)) {
4521 if (current_link_up)
4522 netif_carrier_on(tp->dev);
4523 else {
4524 netif_carrier_off(tp->dev);
4525 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4527 tg3_link_report(tp);
4529 return err;
4532 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4534 if (tp->serdes_counter) {
4535 /* Give autoneg time to complete. */
4536 tp->serdes_counter--;
4537 return;
4540 if (!netif_carrier_ok(tp->dev) &&
4541 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4542 u32 bmcr;
4544 tg3_readphy(tp, MII_BMCR, &bmcr);
4545 if (bmcr & BMCR_ANENABLE) {
4546 u32 phy1, phy2;
4548 /* Select shadow register 0x1f */
4549 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4550 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4552 /* Select expansion interrupt status register */
4553 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4554 MII_TG3_DSP_EXP1_INT_STAT);
4555 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4556 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4558 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4559 /* We have signal detect and not receiving
4560 * config code words, link is up by parallel
4561 * detection.
4564 bmcr &= ~BMCR_ANENABLE;
4565 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4566 tg3_writephy(tp, MII_BMCR, bmcr);
4567 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4570 } else if (netif_carrier_ok(tp->dev) &&
4571 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4572 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4573 u32 phy2;
4575 /* Select expansion interrupt status register */
4576 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4577 MII_TG3_DSP_EXP1_INT_STAT);
4578 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4579 if (phy2 & 0x20) {
4580 u32 bmcr;
4582 /* Config code words received, turn on autoneg. */
4583 tg3_readphy(tp, MII_BMCR, &bmcr);
4584 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4586 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4592 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4594 u32 val;
4595 int err;
4597 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4598 err = tg3_setup_fiber_phy(tp, force_reset);
4599 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4600 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4601 else
4602 err = tg3_setup_copper_phy(tp, force_reset);
4604 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4605 u32 scale;
4607 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4608 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4609 scale = 65;
4610 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4611 scale = 6;
4612 else
4613 scale = 12;
4615 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4616 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4617 tw32(GRC_MISC_CFG, val);
4620 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4621 (6 << TX_LENGTHS_IPG_SHIFT);
4622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4623 val |= tr32(MAC_TX_LENGTHS) &
4624 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4625 TX_LENGTHS_CNT_DWN_VAL_MSK);
4627 if (tp->link_config.active_speed == SPEED_1000 &&
4628 tp->link_config.active_duplex == DUPLEX_HALF)
4629 tw32(MAC_TX_LENGTHS, val |
4630 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4631 else
4632 tw32(MAC_TX_LENGTHS, val |
4633 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4635 if (!tg3_flag(tp, 5705_PLUS)) {
4636 if (netif_carrier_ok(tp->dev)) {
4637 tw32(HOSTCC_STAT_COAL_TICKS,
4638 tp->coal.stats_block_coalesce_usecs);
4639 } else {
4640 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4644 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4645 val = tr32(PCIE_PWR_MGMT_THRESH);
4646 if (!netif_carrier_ok(tp->dev))
4647 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4648 tp->pwrmgmt_thresh;
4649 else
4650 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4651 tw32(PCIE_PWR_MGMT_THRESH, val);
4654 return err;
4657 static inline int tg3_irq_sync(struct tg3 *tp)
4659 return tp->irq_sync;
4662 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4664 int i;
4666 dst = (u32 *)((u8 *)dst + off);
4667 for (i = 0; i < len; i += sizeof(u32))
4668 *dst++ = tr32(off + i);
4671 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4673 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4674 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4675 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4676 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4677 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4678 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4679 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4680 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4681 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4682 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4683 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4684 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4685 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4686 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4687 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4688 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4689 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4690 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4691 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4693 if (tg3_flag(tp, SUPPORT_MSIX))
4694 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4696 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4697 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4698 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4699 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4700 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4701 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4702 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4703 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4705 if (!tg3_flag(tp, 5705_PLUS)) {
4706 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4707 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4708 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4711 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4712 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4713 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4714 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4715 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4717 if (tg3_flag(tp, NVRAM))
4718 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4721 static void tg3_dump_state(struct tg3 *tp)
4723 int i;
4724 u32 *regs;
4726 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4727 if (!regs) {
4728 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4729 return;
4732 if (tg3_flag(tp, PCI_EXPRESS)) {
4733 /* Read up to but not including private PCI registers */
4734 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4735 regs[i / sizeof(u32)] = tr32(i);
4736 } else
4737 tg3_dump_legacy_regs(tp, regs);
4739 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4740 if (!regs[i + 0] && !regs[i + 1] &&
4741 !regs[i + 2] && !regs[i + 3])
4742 continue;
4744 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4745 i * 4,
4746 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4749 kfree(regs);
4751 for (i = 0; i < tp->irq_cnt; i++) {
4752 struct tg3_napi *tnapi = &tp->napi[i];
4754 /* SW status block */
4755 netdev_err(tp->dev,
4756 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4758 tnapi->hw_status->status,
4759 tnapi->hw_status->status_tag,
4760 tnapi->hw_status->rx_jumbo_consumer,
4761 tnapi->hw_status->rx_consumer,
4762 tnapi->hw_status->rx_mini_consumer,
4763 tnapi->hw_status->idx[0].rx_producer,
4764 tnapi->hw_status->idx[0].tx_consumer);
4766 netdev_err(tp->dev,
4767 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4769 tnapi->last_tag, tnapi->last_irq_tag,
4770 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4771 tnapi->rx_rcb_ptr,
4772 tnapi->prodring.rx_std_prod_idx,
4773 tnapi->prodring.rx_std_cons_idx,
4774 tnapi->prodring.rx_jmb_prod_idx,
4775 tnapi->prodring.rx_jmb_cons_idx);
4779 /* This is called whenever we suspect that the system chipset is re-
4780 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4781 * is bogus tx completions. We try to recover by setting the
4782 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4783 * in the workqueue.
4785 static void tg3_tx_recover(struct tg3 *tp)
4787 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4788 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4790 netdev_warn(tp->dev,
4791 "The system may be re-ordering memory-mapped I/O "
4792 "cycles to the network device, attempting to recover. "
4793 "Please report the problem to the driver maintainer "
4794 "and include system chipset information.\n");
4796 spin_lock(&tp->lock);
4797 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4798 spin_unlock(&tp->lock);
4801 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4803 /* Tell compiler to fetch tx indices from memory. */
4804 barrier();
4805 return tnapi->tx_pending -
4806 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4809 /* Tigon3 never reports partial packet sends. So we do not
4810 * need special logic to handle SKBs that have not had all
4811 * of their frags sent yet, like SunGEM does.
4813 static void tg3_tx(struct tg3_napi *tnapi)
4815 struct tg3 *tp = tnapi->tp;
4816 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4817 u32 sw_idx = tnapi->tx_cons;
4818 struct netdev_queue *txq;
4819 int index = tnapi - tp->napi;
4821 if (tg3_flag(tp, ENABLE_TSS))
4822 index--;
4824 txq = netdev_get_tx_queue(tp->dev, index);
4826 while (sw_idx != hw_idx) {
4827 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4828 struct sk_buff *skb = ri->skb;
4829 int i, tx_bug = 0;
4831 if (unlikely(skb == NULL)) {
4832 tg3_tx_recover(tp);
4833 return;
4836 pci_unmap_single(tp->pdev,
4837 dma_unmap_addr(ri, mapping),
4838 skb_headlen(skb),
4839 PCI_DMA_TODEVICE);
4841 ri->skb = NULL;
4843 while (ri->fragmented) {
4844 ri->fragmented = false;
4845 sw_idx = NEXT_TX(sw_idx);
4846 ri = &tnapi->tx_buffers[sw_idx];
4849 sw_idx = NEXT_TX(sw_idx);
4851 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4852 ri = &tnapi->tx_buffers[sw_idx];
4853 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4854 tx_bug = 1;
4856 pci_unmap_page(tp->pdev,
4857 dma_unmap_addr(ri, mapping),
4858 skb_shinfo(skb)->frags[i].size,
4859 PCI_DMA_TODEVICE);
4861 while (ri->fragmented) {
4862 ri->fragmented = false;
4863 sw_idx = NEXT_TX(sw_idx);
4864 ri = &tnapi->tx_buffers[sw_idx];
4867 sw_idx = NEXT_TX(sw_idx);
4870 dev_kfree_skb(skb);
4872 if (unlikely(tx_bug)) {
4873 tg3_tx_recover(tp);
4874 return;
4878 tnapi->tx_cons = sw_idx;
4880 /* Need to make the tx_cons update visible to tg3_start_xmit()
4881 * before checking for netif_queue_stopped(). Without the
4882 * memory barrier, there is a small possibility that tg3_start_xmit()
4883 * will miss it and cause the queue to be stopped forever.
4885 smp_mb();
4887 if (unlikely(netif_tx_queue_stopped(txq) &&
4888 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4889 __netif_tx_lock(txq, smp_processor_id());
4890 if (netif_tx_queue_stopped(txq) &&
4891 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4892 netif_tx_wake_queue(txq);
4893 __netif_tx_unlock(txq);
4897 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4899 if (!ri->skb)
4900 return;
4902 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4903 map_sz, PCI_DMA_FROMDEVICE);
4904 dev_kfree_skb_any(ri->skb);
4905 ri->skb = NULL;
4908 /* Returns size of skb allocated or < 0 on error.
4910 * We only need to fill in the address because the other members
4911 * of the RX descriptor are invariant, see tg3_init_rings.
4913 * Note the purposeful assymetry of cpu vs. chip accesses. For
4914 * posting buffers we only dirty the first cache line of the RX
4915 * descriptor (containing the address). Whereas for the RX status
4916 * buffers the cpu only reads the last cacheline of the RX descriptor
4917 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4919 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4920 u32 opaque_key, u32 dest_idx_unmasked)
4922 struct tg3_rx_buffer_desc *desc;
4923 struct ring_info *map;
4924 struct sk_buff *skb;
4925 dma_addr_t mapping;
4926 int skb_size, dest_idx;
4928 switch (opaque_key) {
4929 case RXD_OPAQUE_RING_STD:
4930 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4931 desc = &tpr->rx_std[dest_idx];
4932 map = &tpr->rx_std_buffers[dest_idx];
4933 skb_size = tp->rx_pkt_map_sz;
4934 break;
4936 case RXD_OPAQUE_RING_JUMBO:
4937 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4938 desc = &tpr->rx_jmb[dest_idx].std;
4939 map = &tpr->rx_jmb_buffers[dest_idx];
4940 skb_size = TG3_RX_JMB_MAP_SZ;
4941 break;
4943 default:
4944 return -EINVAL;
4947 /* Do not overwrite any of the map or rp information
4948 * until we are sure we can commit to a new buffer.
4950 * Callers depend upon this behavior and assume that
4951 * we leave everything unchanged if we fail.
4953 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4954 if (skb == NULL)
4955 return -ENOMEM;
4957 skb_reserve(skb, tp->rx_offset);
4959 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4960 PCI_DMA_FROMDEVICE);
4961 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4962 dev_kfree_skb(skb);
4963 return -EIO;
4966 map->skb = skb;
4967 dma_unmap_addr_set(map, mapping, mapping);
4969 desc->addr_hi = ((u64)mapping >> 32);
4970 desc->addr_lo = ((u64)mapping & 0xffffffff);
4972 return skb_size;
4975 /* We only need to move over in the address because the other
4976 * members of the RX descriptor are invariant. See notes above
4977 * tg3_alloc_rx_skb for full details.
4979 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4980 struct tg3_rx_prodring_set *dpr,
4981 u32 opaque_key, int src_idx,
4982 u32 dest_idx_unmasked)
4984 struct tg3 *tp = tnapi->tp;
4985 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4986 struct ring_info *src_map, *dest_map;
4987 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4988 int dest_idx;
4990 switch (opaque_key) {
4991 case RXD_OPAQUE_RING_STD:
4992 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4993 dest_desc = &dpr->rx_std[dest_idx];
4994 dest_map = &dpr->rx_std_buffers[dest_idx];
4995 src_desc = &spr->rx_std[src_idx];
4996 src_map = &spr->rx_std_buffers[src_idx];
4997 break;
4999 case RXD_OPAQUE_RING_JUMBO:
5000 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5001 dest_desc = &dpr->rx_jmb[dest_idx].std;
5002 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5003 src_desc = &spr->rx_jmb[src_idx].std;
5004 src_map = &spr->rx_jmb_buffers[src_idx];
5005 break;
5007 default:
5008 return;
5011 dest_map->skb = src_map->skb;
5012 dma_unmap_addr_set(dest_map, mapping,
5013 dma_unmap_addr(src_map, mapping));
5014 dest_desc->addr_hi = src_desc->addr_hi;
5015 dest_desc->addr_lo = src_desc->addr_lo;
5017 /* Ensure that the update to the skb happens after the physical
5018 * addresses have been transferred to the new BD location.
5020 smp_wmb();
5022 src_map->skb = NULL;
5025 /* The RX ring scheme is composed of multiple rings which post fresh
5026 * buffers to the chip, and one special ring the chip uses to report
5027 * status back to the host.
5029 * The special ring reports the status of received packets to the
5030 * host. The chip does not write into the original descriptor the
5031 * RX buffer was obtained from. The chip simply takes the original
5032 * descriptor as provided by the host, updates the status and length
5033 * field, then writes this into the next status ring entry.
5035 * Each ring the host uses to post buffers to the chip is described
5036 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5037 * it is first placed into the on-chip ram. When the packet's length
5038 * is known, it walks down the TG3_BDINFO entries to select the ring.
5039 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5040 * which is within the range of the new packet's length is chosen.
5042 * The "separate ring for rx status" scheme may sound queer, but it makes
5043 * sense from a cache coherency perspective. If only the host writes
5044 * to the buffer post rings, and only the chip writes to the rx status
5045 * rings, then cache lines never move beyond shared-modified state.
5046 * If both the host and chip were to write into the same ring, cache line
5047 * eviction could occur since both entities want it in an exclusive state.
5049 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5051 struct tg3 *tp = tnapi->tp;
5052 u32 work_mask, rx_std_posted = 0;
5053 u32 std_prod_idx, jmb_prod_idx;
5054 u32 sw_idx = tnapi->rx_rcb_ptr;
5055 u16 hw_idx;
5056 int received;
5057 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5059 hw_idx = *(tnapi->rx_rcb_prod_idx);
5061 * We need to order the read of hw_idx and the read of
5062 * the opaque cookie.
5064 rmb();
5065 work_mask = 0;
5066 received = 0;
5067 std_prod_idx = tpr->rx_std_prod_idx;
5068 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5069 while (sw_idx != hw_idx && budget > 0) {
5070 struct ring_info *ri;
5071 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5072 unsigned int len;
5073 struct sk_buff *skb;
5074 dma_addr_t dma_addr;
5075 u32 opaque_key, desc_idx, *post_ptr;
5077 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5078 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5079 if (opaque_key == RXD_OPAQUE_RING_STD) {
5080 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5081 dma_addr = dma_unmap_addr(ri, mapping);
5082 skb = ri->skb;
5083 post_ptr = &std_prod_idx;
5084 rx_std_posted++;
5085 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5086 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5087 dma_addr = dma_unmap_addr(ri, mapping);
5088 skb = ri->skb;
5089 post_ptr = &jmb_prod_idx;
5090 } else
5091 goto next_pkt_nopost;
5093 work_mask |= opaque_key;
5095 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5096 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5097 drop_it:
5098 tg3_recycle_rx(tnapi, tpr, opaque_key,
5099 desc_idx, *post_ptr);
5100 drop_it_no_recycle:
5101 /* Other statistics kept track of by card. */
5102 tp->rx_dropped++;
5103 goto next_pkt;
5106 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5107 ETH_FCS_LEN;
5109 if (len > TG3_RX_COPY_THRESH(tp)) {
5110 int skb_size;
5112 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5113 *post_ptr);
5114 if (skb_size < 0)
5115 goto drop_it;
5117 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5118 PCI_DMA_FROMDEVICE);
5120 /* Ensure that the update to the skb happens
5121 * after the usage of the old DMA mapping.
5123 smp_wmb();
5125 ri->skb = NULL;
5127 skb_put(skb, len);
5128 } else {
5129 struct sk_buff *copy_skb;
5131 tg3_recycle_rx(tnapi, tpr, opaque_key,
5132 desc_idx, *post_ptr);
5134 copy_skb = netdev_alloc_skb(tp->dev, len +
5135 TG3_RAW_IP_ALIGN);
5136 if (copy_skb == NULL)
5137 goto drop_it_no_recycle;
5139 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5140 skb_put(copy_skb, len);
5141 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5142 skb_copy_from_linear_data(skb, copy_skb->data, len);
5143 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5145 /* We'll reuse the original ring buffer. */
5146 skb = copy_skb;
5149 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5150 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5151 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5152 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5153 skb->ip_summed = CHECKSUM_UNNECESSARY;
5154 else
5155 skb_checksum_none_assert(skb);
5157 skb->protocol = eth_type_trans(skb, tp->dev);
5159 if (len > (tp->dev->mtu + ETH_HLEN) &&
5160 skb->protocol != htons(ETH_P_8021Q)) {
5161 dev_kfree_skb(skb);
5162 goto drop_it_no_recycle;
5165 if (desc->type_flags & RXD_FLAG_VLAN &&
5166 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5167 __vlan_hwaccel_put_tag(skb,
5168 desc->err_vlan & RXD_VLAN_MASK);
5170 napi_gro_receive(&tnapi->napi, skb);
5172 received++;
5173 budget--;
5175 next_pkt:
5176 (*post_ptr)++;
5178 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5179 tpr->rx_std_prod_idx = std_prod_idx &
5180 tp->rx_std_ring_mask;
5181 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5182 tpr->rx_std_prod_idx);
5183 work_mask &= ~RXD_OPAQUE_RING_STD;
5184 rx_std_posted = 0;
5186 next_pkt_nopost:
5187 sw_idx++;
5188 sw_idx &= tp->rx_ret_ring_mask;
5190 /* Refresh hw_idx to see if there is new work */
5191 if (sw_idx == hw_idx) {
5192 hw_idx = *(tnapi->rx_rcb_prod_idx);
5193 rmb();
5197 /* ACK the status ring. */
5198 tnapi->rx_rcb_ptr = sw_idx;
5199 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5201 /* Refill RX ring(s). */
5202 if (!tg3_flag(tp, ENABLE_RSS)) {
5203 if (work_mask & RXD_OPAQUE_RING_STD) {
5204 tpr->rx_std_prod_idx = std_prod_idx &
5205 tp->rx_std_ring_mask;
5206 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5207 tpr->rx_std_prod_idx);
5209 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5210 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5211 tp->rx_jmb_ring_mask;
5212 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5213 tpr->rx_jmb_prod_idx);
5215 mmiowb();
5216 } else if (work_mask) {
5217 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5218 * updated before the producer indices can be updated.
5220 smp_wmb();
5222 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5223 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5225 if (tnapi != &tp->napi[1])
5226 napi_schedule(&tp->napi[1].napi);
5229 return received;
5232 static void tg3_poll_link(struct tg3 *tp)
5234 /* handle link change and other phy events */
5235 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5236 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5238 if (sblk->status & SD_STATUS_LINK_CHG) {
5239 sblk->status = SD_STATUS_UPDATED |
5240 (sblk->status & ~SD_STATUS_LINK_CHG);
5241 spin_lock(&tp->lock);
5242 if (tg3_flag(tp, USE_PHYLIB)) {
5243 tw32_f(MAC_STATUS,
5244 (MAC_STATUS_SYNC_CHANGED |
5245 MAC_STATUS_CFG_CHANGED |
5246 MAC_STATUS_MI_COMPLETION |
5247 MAC_STATUS_LNKSTATE_CHANGED));
5248 udelay(40);
5249 } else
5250 tg3_setup_phy(tp, 0);
5251 spin_unlock(&tp->lock);
5256 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5257 struct tg3_rx_prodring_set *dpr,
5258 struct tg3_rx_prodring_set *spr)
5260 u32 si, di, cpycnt, src_prod_idx;
5261 int i, err = 0;
5263 while (1) {
5264 src_prod_idx = spr->rx_std_prod_idx;
5266 /* Make sure updates to the rx_std_buffers[] entries and the
5267 * standard producer index are seen in the correct order.
5269 smp_rmb();
5271 if (spr->rx_std_cons_idx == src_prod_idx)
5272 break;
5274 if (spr->rx_std_cons_idx < src_prod_idx)
5275 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5276 else
5277 cpycnt = tp->rx_std_ring_mask + 1 -
5278 spr->rx_std_cons_idx;
5280 cpycnt = min(cpycnt,
5281 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5283 si = spr->rx_std_cons_idx;
5284 di = dpr->rx_std_prod_idx;
5286 for (i = di; i < di + cpycnt; i++) {
5287 if (dpr->rx_std_buffers[i].skb) {
5288 cpycnt = i - di;
5289 err = -ENOSPC;
5290 break;
5294 if (!cpycnt)
5295 break;
5297 /* Ensure that updates to the rx_std_buffers ring and the
5298 * shadowed hardware producer ring from tg3_recycle_skb() are
5299 * ordered correctly WRT the skb check above.
5301 smp_rmb();
5303 memcpy(&dpr->rx_std_buffers[di],
5304 &spr->rx_std_buffers[si],
5305 cpycnt * sizeof(struct ring_info));
5307 for (i = 0; i < cpycnt; i++, di++, si++) {
5308 struct tg3_rx_buffer_desc *sbd, *dbd;
5309 sbd = &spr->rx_std[si];
5310 dbd = &dpr->rx_std[di];
5311 dbd->addr_hi = sbd->addr_hi;
5312 dbd->addr_lo = sbd->addr_lo;
5315 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5316 tp->rx_std_ring_mask;
5317 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5318 tp->rx_std_ring_mask;
5321 while (1) {
5322 src_prod_idx = spr->rx_jmb_prod_idx;
5324 /* Make sure updates to the rx_jmb_buffers[] entries and
5325 * the jumbo producer index are seen in the correct order.
5327 smp_rmb();
5329 if (spr->rx_jmb_cons_idx == src_prod_idx)
5330 break;
5332 if (spr->rx_jmb_cons_idx < src_prod_idx)
5333 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5334 else
5335 cpycnt = tp->rx_jmb_ring_mask + 1 -
5336 spr->rx_jmb_cons_idx;
5338 cpycnt = min(cpycnt,
5339 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5341 si = spr->rx_jmb_cons_idx;
5342 di = dpr->rx_jmb_prod_idx;
5344 for (i = di; i < di + cpycnt; i++) {
5345 if (dpr->rx_jmb_buffers[i].skb) {
5346 cpycnt = i - di;
5347 err = -ENOSPC;
5348 break;
5352 if (!cpycnt)
5353 break;
5355 /* Ensure that updates to the rx_jmb_buffers ring and the
5356 * shadowed hardware producer ring from tg3_recycle_skb() are
5357 * ordered correctly WRT the skb check above.
5359 smp_rmb();
5361 memcpy(&dpr->rx_jmb_buffers[di],
5362 &spr->rx_jmb_buffers[si],
5363 cpycnt * sizeof(struct ring_info));
5365 for (i = 0; i < cpycnt; i++, di++, si++) {
5366 struct tg3_rx_buffer_desc *sbd, *dbd;
5367 sbd = &spr->rx_jmb[si].std;
5368 dbd = &dpr->rx_jmb[di].std;
5369 dbd->addr_hi = sbd->addr_hi;
5370 dbd->addr_lo = sbd->addr_lo;
5373 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5374 tp->rx_jmb_ring_mask;
5375 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5376 tp->rx_jmb_ring_mask;
5379 return err;
5382 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5384 struct tg3 *tp = tnapi->tp;
5386 /* run TX completion thread */
5387 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5388 tg3_tx(tnapi);
5389 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5390 return work_done;
5393 /* run RX thread, within the bounds set by NAPI.
5394 * All RX "locking" is done by ensuring outside
5395 * code synchronizes with tg3->napi.poll()
5397 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5398 work_done += tg3_rx(tnapi, budget - work_done);
5400 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5401 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5402 int i, err = 0;
5403 u32 std_prod_idx = dpr->rx_std_prod_idx;
5404 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5406 for (i = 1; i < tp->irq_cnt; i++)
5407 err |= tg3_rx_prodring_xfer(tp, dpr,
5408 &tp->napi[i].prodring);
5410 wmb();
5412 if (std_prod_idx != dpr->rx_std_prod_idx)
5413 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5414 dpr->rx_std_prod_idx);
5416 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5417 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5418 dpr->rx_jmb_prod_idx);
5420 mmiowb();
5422 if (err)
5423 tw32_f(HOSTCC_MODE, tp->coal_now);
5426 return work_done;
5429 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5431 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5432 struct tg3 *tp = tnapi->tp;
5433 int work_done = 0;
5434 struct tg3_hw_status *sblk = tnapi->hw_status;
5436 while (1) {
5437 work_done = tg3_poll_work(tnapi, work_done, budget);
5439 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5440 goto tx_recovery;
5442 if (unlikely(work_done >= budget))
5443 break;
5445 /* tp->last_tag is used in tg3_int_reenable() below
5446 * to tell the hw how much work has been processed,
5447 * so we must read it before checking for more work.
5449 tnapi->last_tag = sblk->status_tag;
5450 tnapi->last_irq_tag = tnapi->last_tag;
5451 rmb();
5453 /* check for RX/TX work to do */
5454 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5455 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5456 napi_complete(napi);
5457 /* Reenable interrupts. */
5458 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5459 mmiowb();
5460 break;
5464 return work_done;
5466 tx_recovery:
5467 /* work_done is guaranteed to be less than budget. */
5468 napi_complete(napi);
5469 schedule_work(&tp->reset_task);
5470 return work_done;
5473 static void tg3_process_error(struct tg3 *tp)
5475 u32 val;
5476 bool real_error = false;
5478 if (tg3_flag(tp, ERROR_PROCESSED))
5479 return;
5481 /* Check Flow Attention register */
5482 val = tr32(HOSTCC_FLOW_ATTN);
5483 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5484 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5485 real_error = true;
5488 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5489 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5490 real_error = true;
5493 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5494 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5495 real_error = true;
5498 if (!real_error)
5499 return;
5501 tg3_dump_state(tp);
5503 tg3_flag_set(tp, ERROR_PROCESSED);
5504 schedule_work(&tp->reset_task);
5507 static int tg3_poll(struct napi_struct *napi, int budget)
5509 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5510 struct tg3 *tp = tnapi->tp;
5511 int work_done = 0;
5512 struct tg3_hw_status *sblk = tnapi->hw_status;
5514 while (1) {
5515 if (sblk->status & SD_STATUS_ERROR)
5516 tg3_process_error(tp);
5518 tg3_poll_link(tp);
5520 work_done = tg3_poll_work(tnapi, work_done, budget);
5522 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5523 goto tx_recovery;
5525 if (unlikely(work_done >= budget))
5526 break;
5528 if (tg3_flag(tp, TAGGED_STATUS)) {
5529 /* tp->last_tag is used in tg3_int_reenable() below
5530 * to tell the hw how much work has been processed,
5531 * so we must read it before checking for more work.
5533 tnapi->last_tag = sblk->status_tag;
5534 tnapi->last_irq_tag = tnapi->last_tag;
5535 rmb();
5536 } else
5537 sblk->status &= ~SD_STATUS_UPDATED;
5539 if (likely(!tg3_has_work(tnapi))) {
5540 napi_complete(napi);
5541 tg3_int_reenable(tnapi);
5542 break;
5546 return work_done;
5548 tx_recovery:
5549 /* work_done is guaranteed to be less than budget. */
5550 napi_complete(napi);
5551 schedule_work(&tp->reset_task);
5552 return work_done;
5555 static void tg3_napi_disable(struct tg3 *tp)
5557 int i;
5559 for (i = tp->irq_cnt - 1; i >= 0; i--)
5560 napi_disable(&tp->napi[i].napi);
5563 static void tg3_napi_enable(struct tg3 *tp)
5565 int i;
5567 for (i = 0; i < tp->irq_cnt; i++)
5568 napi_enable(&tp->napi[i].napi);
5571 static void tg3_napi_init(struct tg3 *tp)
5573 int i;
5575 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5576 for (i = 1; i < tp->irq_cnt; i++)
5577 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5580 static void tg3_napi_fini(struct tg3 *tp)
5582 int i;
5584 for (i = 0; i < tp->irq_cnt; i++)
5585 netif_napi_del(&tp->napi[i].napi);
5588 static inline void tg3_netif_stop(struct tg3 *tp)
5590 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5591 tg3_napi_disable(tp);
5592 netif_tx_disable(tp->dev);
5595 static inline void tg3_netif_start(struct tg3 *tp)
5597 /* NOTE: unconditional netif_tx_wake_all_queues is only
5598 * appropriate so long as all callers are assured to
5599 * have free tx slots (such as after tg3_init_hw)
5601 netif_tx_wake_all_queues(tp->dev);
5603 tg3_napi_enable(tp);
5604 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5605 tg3_enable_ints(tp);
5608 static void tg3_irq_quiesce(struct tg3 *tp)
5610 int i;
5612 BUG_ON(tp->irq_sync);
5614 tp->irq_sync = 1;
5615 smp_mb();
5617 for (i = 0; i < tp->irq_cnt; i++)
5618 synchronize_irq(tp->napi[i].irq_vec);
5621 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5622 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5623 * with as well. Most of the time, this is not necessary except when
5624 * shutting down the device.
5626 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5628 spin_lock_bh(&tp->lock);
5629 if (irq_sync)
5630 tg3_irq_quiesce(tp);
5633 static inline void tg3_full_unlock(struct tg3 *tp)
5635 spin_unlock_bh(&tp->lock);
5638 /* One-shot MSI handler - Chip automatically disables interrupt
5639 * after sending MSI so driver doesn't have to do it.
5641 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5643 struct tg3_napi *tnapi = dev_id;
5644 struct tg3 *tp = tnapi->tp;
5646 prefetch(tnapi->hw_status);
5647 if (tnapi->rx_rcb)
5648 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5650 if (likely(!tg3_irq_sync(tp)))
5651 napi_schedule(&tnapi->napi);
5653 return IRQ_HANDLED;
5656 /* MSI ISR - No need to check for interrupt sharing and no need to
5657 * flush status block and interrupt mailbox. PCI ordering rules
5658 * guarantee that MSI will arrive after the status block.
5660 static irqreturn_t tg3_msi(int irq, void *dev_id)
5662 struct tg3_napi *tnapi = dev_id;
5663 struct tg3 *tp = tnapi->tp;
5665 prefetch(tnapi->hw_status);
5666 if (tnapi->rx_rcb)
5667 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5669 * Writing any value to intr-mbox-0 clears PCI INTA# and
5670 * chip-internal interrupt pending events.
5671 * Writing non-zero to intr-mbox-0 additional tells the
5672 * NIC to stop sending us irqs, engaging "in-intr-handler"
5673 * event coalescing.
5675 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5676 if (likely(!tg3_irq_sync(tp)))
5677 napi_schedule(&tnapi->napi);
5679 return IRQ_RETVAL(1);
5682 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5684 struct tg3_napi *tnapi = dev_id;
5685 struct tg3 *tp = tnapi->tp;
5686 struct tg3_hw_status *sblk = tnapi->hw_status;
5687 unsigned int handled = 1;
5689 /* In INTx mode, it is possible for the interrupt to arrive at
5690 * the CPU before the status block posted prior to the interrupt.
5691 * Reading the PCI State register will confirm whether the
5692 * interrupt is ours and will flush the status block.
5694 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5695 if (tg3_flag(tp, CHIP_RESETTING) ||
5696 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5697 handled = 0;
5698 goto out;
5703 * Writing any value to intr-mbox-0 clears PCI INTA# and
5704 * chip-internal interrupt pending events.
5705 * Writing non-zero to intr-mbox-0 additional tells the
5706 * NIC to stop sending us irqs, engaging "in-intr-handler"
5707 * event coalescing.
5709 * Flush the mailbox to de-assert the IRQ immediately to prevent
5710 * spurious interrupts. The flush impacts performance but
5711 * excessive spurious interrupts can be worse in some cases.
5713 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5714 if (tg3_irq_sync(tp))
5715 goto out;
5716 sblk->status &= ~SD_STATUS_UPDATED;
5717 if (likely(tg3_has_work(tnapi))) {
5718 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5719 napi_schedule(&tnapi->napi);
5720 } else {
5721 /* No work, shared interrupt perhaps? re-enable
5722 * interrupts, and flush that PCI write
5724 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5725 0x00000000);
5727 out:
5728 return IRQ_RETVAL(handled);
5731 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5733 struct tg3_napi *tnapi = dev_id;
5734 struct tg3 *tp = tnapi->tp;
5735 struct tg3_hw_status *sblk = tnapi->hw_status;
5736 unsigned int handled = 1;
5738 /* In INTx mode, it is possible for the interrupt to arrive at
5739 * the CPU before the status block posted prior to the interrupt.
5740 * Reading the PCI State register will confirm whether the
5741 * interrupt is ours and will flush the status block.
5743 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5744 if (tg3_flag(tp, CHIP_RESETTING) ||
5745 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5746 handled = 0;
5747 goto out;
5752 * writing any value to intr-mbox-0 clears PCI INTA# and
5753 * chip-internal interrupt pending events.
5754 * writing non-zero to intr-mbox-0 additional tells the
5755 * NIC to stop sending us irqs, engaging "in-intr-handler"
5756 * event coalescing.
5758 * Flush the mailbox to de-assert the IRQ immediately to prevent
5759 * spurious interrupts. The flush impacts performance but
5760 * excessive spurious interrupts can be worse in some cases.
5762 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5765 * In a shared interrupt configuration, sometimes other devices'
5766 * interrupts will scream. We record the current status tag here
5767 * so that the above check can report that the screaming interrupts
5768 * are unhandled. Eventually they will be silenced.
5770 tnapi->last_irq_tag = sblk->status_tag;
5772 if (tg3_irq_sync(tp))
5773 goto out;
5775 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5777 napi_schedule(&tnapi->napi);
5779 out:
5780 return IRQ_RETVAL(handled);
5783 /* ISR for interrupt test */
5784 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5786 struct tg3_napi *tnapi = dev_id;
5787 struct tg3 *tp = tnapi->tp;
5788 struct tg3_hw_status *sblk = tnapi->hw_status;
5790 if ((sblk->status & SD_STATUS_UPDATED) ||
5791 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5792 tg3_disable_ints(tp);
5793 return IRQ_RETVAL(1);
5795 return IRQ_RETVAL(0);
5798 static int tg3_init_hw(struct tg3 *, int);
5799 static int tg3_halt(struct tg3 *, int, int);
5801 /* Restart hardware after configuration changes, self-test, etc.
5802 * Invoked with tp->lock held.
5804 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5805 __releases(tp->lock)
5806 __acquires(tp->lock)
5808 int err;
5810 err = tg3_init_hw(tp, reset_phy);
5811 if (err) {
5812 netdev_err(tp->dev,
5813 "Failed to re-initialize device, aborting\n");
5814 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5815 tg3_full_unlock(tp);
5816 del_timer_sync(&tp->timer);
5817 tp->irq_sync = 0;
5818 tg3_napi_enable(tp);
5819 dev_close(tp->dev);
5820 tg3_full_lock(tp, 0);
5822 return err;
5825 #ifdef CONFIG_NET_POLL_CONTROLLER
5826 static void tg3_poll_controller(struct net_device *dev)
5828 int i;
5829 struct tg3 *tp = netdev_priv(dev);
5831 for (i = 0; i < tp->irq_cnt; i++)
5832 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5834 #endif
5836 static void tg3_reset_task(struct work_struct *work)
5838 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5839 int err;
5840 unsigned int restart_timer;
5842 tg3_full_lock(tp, 0);
5844 if (!netif_running(tp->dev)) {
5845 tg3_full_unlock(tp);
5846 return;
5849 tg3_full_unlock(tp);
5851 tg3_phy_stop(tp);
5853 tg3_netif_stop(tp);
5855 tg3_full_lock(tp, 1);
5857 restart_timer = tg3_flag(tp, RESTART_TIMER);
5858 tg3_flag_clear(tp, RESTART_TIMER);
5860 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5861 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5862 tp->write32_rx_mbox = tg3_write_flush_reg32;
5863 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5864 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5867 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5868 err = tg3_init_hw(tp, 1);
5869 if (err)
5870 goto out;
5872 tg3_netif_start(tp);
5874 if (restart_timer)
5875 mod_timer(&tp->timer, jiffies + 1);
5877 out:
5878 tg3_full_unlock(tp);
5880 if (!err)
5881 tg3_phy_start(tp);
5884 static void tg3_tx_timeout(struct net_device *dev)
5886 struct tg3 *tp = netdev_priv(dev);
5888 if (netif_msg_tx_err(tp)) {
5889 netdev_err(dev, "transmit timed out, resetting\n");
5890 tg3_dump_state(tp);
5893 schedule_work(&tp->reset_task);
5896 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5897 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5899 u32 base = (u32) mapping & 0xffffffff;
5901 return (base > 0xffffdcc0) && (base + len + 8 < base);
5904 /* Test for DMA addresses > 40-bit */
5905 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5906 int len)
5908 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5909 if (tg3_flag(tp, 40BIT_DMA_BUG))
5910 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5911 return 0;
5912 #else
5913 return 0;
5914 #endif
5917 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
5918 dma_addr_t mapping, u32 len, u32 flags,
5919 u32 mss, u32 vlan)
5921 txbd->addr_hi = ((u64) mapping >> 32);
5922 txbd->addr_lo = ((u64) mapping & 0xffffffff);
5923 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5924 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5927 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
5928 dma_addr_t map, u32 len, u32 flags,
5929 u32 mss, u32 vlan)
5931 struct tg3 *tp = tnapi->tp;
5932 bool hwbug = false;
5934 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
5935 hwbug = 1;
5937 if (tg3_4g_overflow_test(map, len))
5938 hwbug = 1;
5940 if (tg3_40bit_overflow_test(tp, map, len))
5941 hwbug = 1;
5943 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
5944 u32 tmp_flag = flags & ~TXD_FLAG_END;
5945 while (len > TG3_TX_BD_DMA_MAX) {
5946 u32 frag_len = TG3_TX_BD_DMA_MAX;
5947 len -= TG3_TX_BD_DMA_MAX;
5949 if (len) {
5950 tnapi->tx_buffers[*entry].fragmented = true;
5951 /* Avoid the 8byte DMA problem */
5952 if (len <= 8) {
5953 len += TG3_TX_BD_DMA_MAX / 2;
5954 frag_len = TG3_TX_BD_DMA_MAX / 2;
5956 } else
5957 tmp_flag = flags;
5959 if (*budget) {
5960 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5961 frag_len, tmp_flag, mss, vlan);
5962 (*budget)--;
5963 *entry = NEXT_TX(*entry);
5964 } else {
5965 hwbug = 1;
5966 break;
5969 map += frag_len;
5972 if (len) {
5973 if (*budget) {
5974 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5975 len, flags, mss, vlan);
5976 (*budget)--;
5977 *entry = NEXT_TX(*entry);
5978 } else {
5979 hwbug = 1;
5982 } else {
5983 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5984 len, flags, mss, vlan);
5985 *entry = NEXT_TX(*entry);
5988 return hwbug;
5991 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
5993 int i;
5994 struct sk_buff *skb;
5995 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
5997 skb = txb->skb;
5998 txb->skb = NULL;
6000 pci_unmap_single(tnapi->tp->pdev,
6001 dma_unmap_addr(txb, mapping),
6002 skb_headlen(skb),
6003 PCI_DMA_TODEVICE);
6005 while (txb->fragmented) {
6006 txb->fragmented = false;
6007 entry = NEXT_TX(entry);
6008 txb = &tnapi->tx_buffers[entry];
6011 for (i = 0; i < last; i++) {
6012 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6014 entry = NEXT_TX(entry);
6015 txb = &tnapi->tx_buffers[entry];
6017 pci_unmap_page(tnapi->tp->pdev,
6018 dma_unmap_addr(txb, mapping),
6019 frag->size, PCI_DMA_TODEVICE);
6021 while (txb->fragmented) {
6022 txb->fragmented = false;
6023 entry = NEXT_TX(entry);
6024 txb = &tnapi->tx_buffers[entry];
6029 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6030 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6031 struct sk_buff *skb,
6032 u32 *entry, u32 *budget,
6033 u32 base_flags, u32 mss, u32 vlan)
6035 struct tg3 *tp = tnapi->tp;
6036 struct sk_buff *new_skb;
6037 dma_addr_t new_addr = 0;
6038 int ret = 0;
6040 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6041 new_skb = skb_copy(skb, GFP_ATOMIC);
6042 else {
6043 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6045 new_skb = skb_copy_expand(skb,
6046 skb_headroom(skb) + more_headroom,
6047 skb_tailroom(skb), GFP_ATOMIC);
6050 if (!new_skb) {
6051 ret = -1;
6052 } else {
6053 /* New SKB is guaranteed to be linear. */
6054 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6055 PCI_DMA_TODEVICE);
6056 /* Make sure the mapping succeeded */
6057 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6058 dev_kfree_skb(new_skb);
6059 ret = -1;
6060 } else {
6061 base_flags |= TXD_FLAG_END;
6063 tnapi->tx_buffers[*entry].skb = new_skb;
6064 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6065 mapping, new_addr);
6067 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6068 new_skb->len, base_flags,
6069 mss, vlan)) {
6070 tg3_tx_skb_unmap(tnapi, *entry, 0);
6071 dev_kfree_skb(new_skb);
6072 ret = -1;
6077 dev_kfree_skb(skb);
6079 return ret;
6082 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6084 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6085 * TSO header is greater than 80 bytes.
6087 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6089 struct sk_buff *segs, *nskb;
6090 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6092 /* Estimate the number of fragments in the worst case */
6093 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6094 netif_stop_queue(tp->dev);
6096 /* netif_tx_stop_queue() must be done before checking
6097 * checking tx index in tg3_tx_avail() below, because in
6098 * tg3_tx(), we update tx index before checking for
6099 * netif_tx_queue_stopped().
6101 smp_mb();
6102 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6103 return NETDEV_TX_BUSY;
6105 netif_wake_queue(tp->dev);
6108 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6109 if (IS_ERR(segs))
6110 goto tg3_tso_bug_end;
6112 do {
6113 nskb = segs;
6114 segs = segs->next;
6115 nskb->next = NULL;
6116 tg3_start_xmit(nskb, tp->dev);
6117 } while (segs);
6119 tg3_tso_bug_end:
6120 dev_kfree_skb(skb);
6122 return NETDEV_TX_OK;
6125 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6126 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6128 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6130 struct tg3 *tp = netdev_priv(dev);
6131 u32 len, entry, base_flags, mss, vlan = 0;
6132 u32 budget;
6133 int i = -1, would_hit_hwbug;
6134 dma_addr_t mapping;
6135 struct tg3_napi *tnapi;
6136 struct netdev_queue *txq;
6137 unsigned int last;
6139 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6140 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6141 if (tg3_flag(tp, ENABLE_TSS))
6142 tnapi++;
6144 budget = tg3_tx_avail(tnapi);
6146 /* We are running in BH disabled context with netif_tx_lock
6147 * and TX reclaim runs via tp->napi.poll inside of a software
6148 * interrupt. Furthermore, IRQ processing runs lockless so we have
6149 * no IRQ context deadlocks to worry about either. Rejoice!
6151 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6152 if (!netif_tx_queue_stopped(txq)) {
6153 netif_tx_stop_queue(txq);
6155 /* This is a hard error, log it. */
6156 netdev_err(dev,
6157 "BUG! Tx Ring full when queue awake!\n");
6159 return NETDEV_TX_BUSY;
6162 entry = tnapi->tx_prod;
6163 base_flags = 0;
6164 if (skb->ip_summed == CHECKSUM_PARTIAL)
6165 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6167 mss = skb_shinfo(skb)->gso_size;
6168 if (mss) {
6169 struct iphdr *iph;
6170 u32 tcp_opt_len, hdr_len;
6172 if (skb_header_cloned(skb) &&
6173 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6174 dev_kfree_skb(skb);
6175 goto out_unlock;
6178 iph = ip_hdr(skb);
6179 tcp_opt_len = tcp_optlen(skb);
6181 if (skb_is_gso_v6(skb)) {
6182 hdr_len = skb_headlen(skb) - ETH_HLEN;
6183 } else {
6184 u32 ip_tcp_len;
6186 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6187 hdr_len = ip_tcp_len + tcp_opt_len;
6189 iph->check = 0;
6190 iph->tot_len = htons(mss + hdr_len);
6193 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6194 tg3_flag(tp, TSO_BUG))
6195 return tg3_tso_bug(tp, skb);
6197 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6198 TXD_FLAG_CPU_POST_DMA);
6200 if (tg3_flag(tp, HW_TSO_1) ||
6201 tg3_flag(tp, HW_TSO_2) ||
6202 tg3_flag(tp, HW_TSO_3)) {
6203 tcp_hdr(skb)->check = 0;
6204 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6205 } else
6206 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6207 iph->daddr, 0,
6208 IPPROTO_TCP,
6211 if (tg3_flag(tp, HW_TSO_3)) {
6212 mss |= (hdr_len & 0xc) << 12;
6213 if (hdr_len & 0x10)
6214 base_flags |= 0x00000010;
6215 base_flags |= (hdr_len & 0x3e0) << 5;
6216 } else if (tg3_flag(tp, HW_TSO_2))
6217 mss |= hdr_len << 9;
6218 else if (tg3_flag(tp, HW_TSO_1) ||
6219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6220 if (tcp_opt_len || iph->ihl > 5) {
6221 int tsflags;
6223 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6224 mss |= (tsflags << 11);
6226 } else {
6227 if (tcp_opt_len || iph->ihl > 5) {
6228 int tsflags;
6230 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6231 base_flags |= tsflags << 12;
6236 #ifdef BCM_KERNEL_SUPPORTS_8021Q
6237 if (vlan_tx_tag_present(skb)) {
6238 base_flags |= TXD_FLAG_VLAN;
6239 vlan = vlan_tx_tag_get(skb);
6241 #endif
6243 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6244 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6245 base_flags |= TXD_FLAG_JMB_PKT;
6247 len = skb_headlen(skb);
6249 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6250 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6251 dev_kfree_skb(skb);
6252 goto out_unlock;
6255 tnapi->tx_buffers[entry].skb = skb;
6256 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6258 would_hit_hwbug = 0;
6260 if (tg3_flag(tp, 5701_DMA_BUG))
6261 would_hit_hwbug = 1;
6263 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6264 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6265 mss, vlan))
6266 would_hit_hwbug = 1;
6268 /* Now loop through additional data fragments, and queue them. */
6269 if (skb_shinfo(skb)->nr_frags > 0) {
6270 u32 tmp_mss = mss;
6272 if (!tg3_flag(tp, HW_TSO_1) &&
6273 !tg3_flag(tp, HW_TSO_2) &&
6274 !tg3_flag(tp, HW_TSO_3))
6275 tmp_mss = 0;
6277 last = skb_shinfo(skb)->nr_frags - 1;
6278 for (i = 0; i <= last; i++) {
6279 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6281 len = frag->size;
6282 mapping = pci_map_page(tp->pdev,
6283 frag->page,
6284 frag->page_offset,
6285 len, PCI_DMA_TODEVICE);
6287 tnapi->tx_buffers[entry].skb = NULL;
6288 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6289 mapping);
6290 if (pci_dma_mapping_error(tp->pdev, mapping))
6291 goto dma_error;
6293 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6294 len, base_flags |
6295 ((i == last) ? TXD_FLAG_END : 0),
6296 tmp_mss, vlan))
6297 would_hit_hwbug = 1;
6301 if (would_hit_hwbug) {
6302 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6304 /* If the workaround fails due to memory/mapping
6305 * failure, silently drop this packet.
6307 entry = tnapi->tx_prod;
6308 budget = tg3_tx_avail(tnapi);
6309 if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
6310 base_flags, mss, vlan))
6311 goto out_unlock;
6314 skb_tx_timestamp(skb);
6316 /* Packets are ready, update Tx producer idx local and on card. */
6317 tw32_tx_mbox(tnapi->prodmbox, entry);
6319 tnapi->tx_prod = entry;
6320 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6321 netif_tx_stop_queue(txq);
6323 /* netif_tx_stop_queue() must be done before checking
6324 * checking tx index in tg3_tx_avail() below, because in
6325 * tg3_tx(), we update tx index before checking for
6326 * netif_tx_queue_stopped().
6328 smp_mb();
6329 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6330 netif_tx_wake_queue(txq);
6333 out_unlock:
6334 mmiowb();
6336 return NETDEV_TX_OK;
6338 dma_error:
6339 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6340 dev_kfree_skb(skb);
6341 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6342 return NETDEV_TX_OK;
6345 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6347 if (enable) {
6348 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6349 MAC_MODE_PORT_MODE_MASK);
6351 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6353 if (!tg3_flag(tp, 5705_PLUS))
6354 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6356 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6357 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6358 else
6359 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6360 } else {
6361 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6363 if (tg3_flag(tp, 5705_PLUS) ||
6364 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6366 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6369 tw32(MAC_MODE, tp->mac_mode);
6370 udelay(40);
6373 static void tg3_phy_lpbk_set(struct tg3 *tp, u32 speed)
6375 u32 val, bmcr, mac_mode;
6377 tg3_phy_toggle_apd(tp, false);
6378 tg3_phy_toggle_automdix(tp, 0);
6380 bmcr = BMCR_LOOPBACK | BMCR_FULLDPLX;
6381 switch (speed) {
6382 case SPEED_10:
6383 break;
6384 case SPEED_100:
6385 bmcr |= BMCR_SPEED100;
6386 break;
6387 case SPEED_1000:
6388 default:
6389 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6390 speed = SPEED_100;
6391 bmcr |= BMCR_SPEED100;
6392 } else {
6393 speed = SPEED_1000;
6394 bmcr |= BMCR_SPEED1000;
6398 tg3_writephy(tp, MII_BMCR, bmcr);
6400 /* The write needs to be flushed for the FETs */
6401 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6402 tg3_readphy(tp, MII_BMCR, &bmcr);
6404 udelay(40);
6406 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6407 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6408 tg3_writephy(tp, MII_TG3_FET_PTEST,
6409 MII_TG3_FET_PTEST_FRC_TX_LINK |
6410 MII_TG3_FET_PTEST_FRC_TX_LOCK);
6412 /* The write needs to be flushed for the AC131 */
6413 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6416 /* Reset to prevent losing 1st rx packet intermittently */
6417 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6418 tg3_flag(tp, 5780_CLASS)) {
6419 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6420 udelay(10);
6421 tw32_f(MAC_RX_MODE, tp->rx_mode);
6424 mac_mode = tp->mac_mode &
6425 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6426 if (speed == SPEED_1000)
6427 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6428 else
6429 mac_mode |= MAC_MODE_PORT_MODE_MII;
6431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6432 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6434 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6435 mac_mode &= ~MAC_MODE_LINK_POLARITY;
6436 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6437 mac_mode |= MAC_MODE_LINK_POLARITY;
6439 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6440 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6443 tw32(MAC_MODE, mac_mode);
6444 udelay(40);
6447 static void tg3_set_loopback(struct net_device *dev, u32 features)
6449 struct tg3 *tp = netdev_priv(dev);
6451 if (features & NETIF_F_LOOPBACK) {
6452 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6453 return;
6455 spin_lock_bh(&tp->lock);
6456 tg3_mac_loopback(tp, true);
6457 netif_carrier_on(tp->dev);
6458 spin_unlock_bh(&tp->lock);
6459 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6460 } else {
6461 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6462 return;
6464 spin_lock_bh(&tp->lock);
6465 tg3_mac_loopback(tp, false);
6466 /* Force link status check */
6467 tg3_setup_phy(tp, 1);
6468 spin_unlock_bh(&tp->lock);
6469 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6473 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6475 struct tg3 *tp = netdev_priv(dev);
6477 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6478 features &= ~NETIF_F_ALL_TSO;
6480 return features;
6483 static int tg3_set_features(struct net_device *dev, u32 features)
6485 u32 changed = dev->features ^ features;
6487 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6488 tg3_set_loopback(dev, features);
6490 return 0;
6493 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6494 int new_mtu)
6496 dev->mtu = new_mtu;
6498 if (new_mtu > ETH_DATA_LEN) {
6499 if (tg3_flag(tp, 5780_CLASS)) {
6500 netdev_update_features(dev);
6501 tg3_flag_clear(tp, TSO_CAPABLE);
6502 } else {
6503 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6505 } else {
6506 if (tg3_flag(tp, 5780_CLASS)) {
6507 tg3_flag_set(tp, TSO_CAPABLE);
6508 netdev_update_features(dev);
6510 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6514 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6516 struct tg3 *tp = netdev_priv(dev);
6517 int err;
6519 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6520 return -EINVAL;
6522 if (!netif_running(dev)) {
6523 /* We'll just catch it later when the
6524 * device is up'd.
6526 tg3_set_mtu(dev, tp, new_mtu);
6527 return 0;
6530 tg3_phy_stop(tp);
6532 tg3_netif_stop(tp);
6534 tg3_full_lock(tp, 1);
6536 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6538 tg3_set_mtu(dev, tp, new_mtu);
6540 err = tg3_restart_hw(tp, 0);
6542 if (!err)
6543 tg3_netif_start(tp);
6545 tg3_full_unlock(tp);
6547 if (!err)
6548 tg3_phy_start(tp);
6550 return err;
6553 static void tg3_rx_prodring_free(struct tg3 *tp,
6554 struct tg3_rx_prodring_set *tpr)
6556 int i;
6558 if (tpr != &tp->napi[0].prodring) {
6559 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6560 i = (i + 1) & tp->rx_std_ring_mask)
6561 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6562 tp->rx_pkt_map_sz);
6564 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6565 for (i = tpr->rx_jmb_cons_idx;
6566 i != tpr->rx_jmb_prod_idx;
6567 i = (i + 1) & tp->rx_jmb_ring_mask) {
6568 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6569 TG3_RX_JMB_MAP_SZ);
6573 return;
6576 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6577 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6578 tp->rx_pkt_map_sz);
6580 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6581 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6582 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6583 TG3_RX_JMB_MAP_SZ);
6587 /* Initialize rx rings for packet processing.
6589 * The chip has been shut down and the driver detached from
6590 * the networking, so no interrupts or new tx packets will
6591 * end up in the driver. tp->{tx,}lock are held and thus
6592 * we may not sleep.
6594 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6595 struct tg3_rx_prodring_set *tpr)
6597 u32 i, rx_pkt_dma_sz;
6599 tpr->rx_std_cons_idx = 0;
6600 tpr->rx_std_prod_idx = 0;
6601 tpr->rx_jmb_cons_idx = 0;
6602 tpr->rx_jmb_prod_idx = 0;
6604 if (tpr != &tp->napi[0].prodring) {
6605 memset(&tpr->rx_std_buffers[0], 0,
6606 TG3_RX_STD_BUFF_RING_SIZE(tp));
6607 if (tpr->rx_jmb_buffers)
6608 memset(&tpr->rx_jmb_buffers[0], 0,
6609 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6610 goto done;
6613 /* Zero out all descriptors. */
6614 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6616 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6617 if (tg3_flag(tp, 5780_CLASS) &&
6618 tp->dev->mtu > ETH_DATA_LEN)
6619 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6620 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6622 /* Initialize invariants of the rings, we only set this
6623 * stuff once. This works because the card does not
6624 * write into the rx buffer posting rings.
6626 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6627 struct tg3_rx_buffer_desc *rxd;
6629 rxd = &tpr->rx_std[i];
6630 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6631 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6632 rxd->opaque = (RXD_OPAQUE_RING_STD |
6633 (i << RXD_OPAQUE_INDEX_SHIFT));
6636 /* Now allocate fresh SKBs for each rx ring. */
6637 for (i = 0; i < tp->rx_pending; i++) {
6638 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6639 netdev_warn(tp->dev,
6640 "Using a smaller RX standard ring. Only "
6641 "%d out of %d buffers were allocated "
6642 "successfully\n", i, tp->rx_pending);
6643 if (i == 0)
6644 goto initfail;
6645 tp->rx_pending = i;
6646 break;
6650 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6651 goto done;
6653 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6655 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6656 goto done;
6658 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6659 struct tg3_rx_buffer_desc *rxd;
6661 rxd = &tpr->rx_jmb[i].std;
6662 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6663 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6664 RXD_FLAG_JUMBO;
6665 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6666 (i << RXD_OPAQUE_INDEX_SHIFT));
6669 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6670 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6671 netdev_warn(tp->dev,
6672 "Using a smaller RX jumbo ring. Only %d "
6673 "out of %d buffers were allocated "
6674 "successfully\n", i, tp->rx_jumbo_pending);
6675 if (i == 0)
6676 goto initfail;
6677 tp->rx_jumbo_pending = i;
6678 break;
6682 done:
6683 return 0;
6685 initfail:
6686 tg3_rx_prodring_free(tp, tpr);
6687 return -ENOMEM;
6690 static void tg3_rx_prodring_fini(struct tg3 *tp,
6691 struct tg3_rx_prodring_set *tpr)
6693 kfree(tpr->rx_std_buffers);
6694 tpr->rx_std_buffers = NULL;
6695 kfree(tpr->rx_jmb_buffers);
6696 tpr->rx_jmb_buffers = NULL;
6697 if (tpr->rx_std) {
6698 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6699 tpr->rx_std, tpr->rx_std_mapping);
6700 tpr->rx_std = NULL;
6702 if (tpr->rx_jmb) {
6703 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6704 tpr->rx_jmb, tpr->rx_jmb_mapping);
6705 tpr->rx_jmb = NULL;
6709 static int tg3_rx_prodring_init(struct tg3 *tp,
6710 struct tg3_rx_prodring_set *tpr)
6712 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6713 GFP_KERNEL);
6714 if (!tpr->rx_std_buffers)
6715 return -ENOMEM;
6717 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6718 TG3_RX_STD_RING_BYTES(tp),
6719 &tpr->rx_std_mapping,
6720 GFP_KERNEL);
6721 if (!tpr->rx_std)
6722 goto err_out;
6724 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6725 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6726 GFP_KERNEL);
6727 if (!tpr->rx_jmb_buffers)
6728 goto err_out;
6730 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6731 TG3_RX_JMB_RING_BYTES(tp),
6732 &tpr->rx_jmb_mapping,
6733 GFP_KERNEL);
6734 if (!tpr->rx_jmb)
6735 goto err_out;
6738 return 0;
6740 err_out:
6741 tg3_rx_prodring_fini(tp, tpr);
6742 return -ENOMEM;
6745 /* Free up pending packets in all rx/tx rings.
6747 * The chip has been shut down and the driver detached from
6748 * the networking, so no interrupts or new tx packets will
6749 * end up in the driver. tp->{tx,}lock is not held and we are not
6750 * in an interrupt context and thus may sleep.
6752 static void tg3_free_rings(struct tg3 *tp)
6754 int i, j;
6756 for (j = 0; j < tp->irq_cnt; j++) {
6757 struct tg3_napi *tnapi = &tp->napi[j];
6759 tg3_rx_prodring_free(tp, &tnapi->prodring);
6761 if (!tnapi->tx_buffers)
6762 continue;
6764 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
6765 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
6767 if (!skb)
6768 continue;
6770 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
6772 dev_kfree_skb_any(skb);
6777 /* Initialize tx/rx rings for packet processing.
6779 * The chip has been shut down and the driver detached from
6780 * the networking, so no interrupts or new tx packets will
6781 * end up in the driver. tp->{tx,}lock are held and thus
6782 * we may not sleep.
6784 static int tg3_init_rings(struct tg3 *tp)
6786 int i;
6788 /* Free up all the SKBs. */
6789 tg3_free_rings(tp);
6791 for (i = 0; i < tp->irq_cnt; i++) {
6792 struct tg3_napi *tnapi = &tp->napi[i];
6794 tnapi->last_tag = 0;
6795 tnapi->last_irq_tag = 0;
6796 tnapi->hw_status->status = 0;
6797 tnapi->hw_status->status_tag = 0;
6798 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6800 tnapi->tx_prod = 0;
6801 tnapi->tx_cons = 0;
6802 if (tnapi->tx_ring)
6803 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6805 tnapi->rx_rcb_ptr = 0;
6806 if (tnapi->rx_rcb)
6807 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6809 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6810 tg3_free_rings(tp);
6811 return -ENOMEM;
6815 return 0;
6819 * Must not be invoked with interrupt sources disabled and
6820 * the hardware shutdown down.
6822 static void tg3_free_consistent(struct tg3 *tp)
6824 int i;
6826 for (i = 0; i < tp->irq_cnt; i++) {
6827 struct tg3_napi *tnapi = &tp->napi[i];
6829 if (tnapi->tx_ring) {
6830 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6831 tnapi->tx_ring, tnapi->tx_desc_mapping);
6832 tnapi->tx_ring = NULL;
6835 kfree(tnapi->tx_buffers);
6836 tnapi->tx_buffers = NULL;
6838 if (tnapi->rx_rcb) {
6839 dma_free_coherent(&tp->pdev->dev,
6840 TG3_RX_RCB_RING_BYTES(tp),
6841 tnapi->rx_rcb,
6842 tnapi->rx_rcb_mapping);
6843 tnapi->rx_rcb = NULL;
6846 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6848 if (tnapi->hw_status) {
6849 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6850 tnapi->hw_status,
6851 tnapi->status_mapping);
6852 tnapi->hw_status = NULL;
6856 if (tp->hw_stats) {
6857 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6858 tp->hw_stats, tp->stats_mapping);
6859 tp->hw_stats = NULL;
6864 * Must not be invoked with interrupt sources disabled and
6865 * the hardware shutdown down. Can sleep.
6867 static int tg3_alloc_consistent(struct tg3 *tp)
6869 int i;
6871 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6872 sizeof(struct tg3_hw_stats),
6873 &tp->stats_mapping,
6874 GFP_KERNEL);
6875 if (!tp->hw_stats)
6876 goto err_out;
6878 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6880 for (i = 0; i < tp->irq_cnt; i++) {
6881 struct tg3_napi *tnapi = &tp->napi[i];
6882 struct tg3_hw_status *sblk;
6884 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6885 TG3_HW_STATUS_SIZE,
6886 &tnapi->status_mapping,
6887 GFP_KERNEL);
6888 if (!tnapi->hw_status)
6889 goto err_out;
6891 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6892 sblk = tnapi->hw_status;
6894 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6895 goto err_out;
6897 /* If multivector TSS is enabled, vector 0 does not handle
6898 * tx interrupts. Don't allocate any resources for it.
6900 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6901 (i && tg3_flag(tp, ENABLE_TSS))) {
6902 tnapi->tx_buffers = kzalloc(
6903 sizeof(struct tg3_tx_ring_info) *
6904 TG3_TX_RING_SIZE, GFP_KERNEL);
6905 if (!tnapi->tx_buffers)
6906 goto err_out;
6908 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6909 TG3_TX_RING_BYTES,
6910 &tnapi->tx_desc_mapping,
6911 GFP_KERNEL);
6912 if (!tnapi->tx_ring)
6913 goto err_out;
6917 * When RSS is enabled, the status block format changes
6918 * slightly. The "rx_jumbo_consumer", "reserved",
6919 * and "rx_mini_consumer" members get mapped to the
6920 * other three rx return ring producer indexes.
6922 switch (i) {
6923 default:
6924 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6925 break;
6926 case 2:
6927 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6928 break;
6929 case 3:
6930 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6931 break;
6932 case 4:
6933 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6934 break;
6938 * If multivector RSS is enabled, vector 0 does not handle
6939 * rx or tx interrupts. Don't allocate any resources for it.
6941 if (!i && tg3_flag(tp, ENABLE_RSS))
6942 continue;
6944 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6945 TG3_RX_RCB_RING_BYTES(tp),
6946 &tnapi->rx_rcb_mapping,
6947 GFP_KERNEL);
6948 if (!tnapi->rx_rcb)
6949 goto err_out;
6951 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6954 return 0;
6956 err_out:
6957 tg3_free_consistent(tp);
6958 return -ENOMEM;
6961 #define MAX_WAIT_CNT 1000
6963 /* To stop a block, clear the enable bit and poll till it
6964 * clears. tp->lock is held.
6966 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6968 unsigned int i;
6969 u32 val;
6971 if (tg3_flag(tp, 5705_PLUS)) {
6972 switch (ofs) {
6973 case RCVLSC_MODE:
6974 case DMAC_MODE:
6975 case MBFREE_MODE:
6976 case BUFMGR_MODE:
6977 case MEMARB_MODE:
6978 /* We can't enable/disable these bits of the
6979 * 5705/5750, just say success.
6981 return 0;
6983 default:
6984 break;
6988 val = tr32(ofs);
6989 val &= ~enable_bit;
6990 tw32_f(ofs, val);
6992 for (i = 0; i < MAX_WAIT_CNT; i++) {
6993 udelay(100);
6994 val = tr32(ofs);
6995 if ((val & enable_bit) == 0)
6996 break;
6999 if (i == MAX_WAIT_CNT && !silent) {
7000 dev_err(&tp->pdev->dev,
7001 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7002 ofs, enable_bit);
7003 return -ENODEV;
7006 return 0;
7009 /* tp->lock is held. */
7010 static int tg3_abort_hw(struct tg3 *tp, int silent)
7012 int i, err;
7014 tg3_disable_ints(tp);
7016 tp->rx_mode &= ~RX_MODE_ENABLE;
7017 tw32_f(MAC_RX_MODE, tp->rx_mode);
7018 udelay(10);
7020 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7021 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7022 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7023 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7024 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7025 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7027 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7028 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7029 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7030 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7031 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7032 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7033 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7035 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7036 tw32_f(MAC_MODE, tp->mac_mode);
7037 udelay(40);
7039 tp->tx_mode &= ~TX_MODE_ENABLE;
7040 tw32_f(MAC_TX_MODE, tp->tx_mode);
7042 for (i = 0; i < MAX_WAIT_CNT; i++) {
7043 udelay(100);
7044 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7045 break;
7047 if (i >= MAX_WAIT_CNT) {
7048 dev_err(&tp->pdev->dev,
7049 "%s timed out, TX_MODE_ENABLE will not clear "
7050 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7051 err |= -ENODEV;
7054 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7055 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7056 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7058 tw32(FTQ_RESET, 0xffffffff);
7059 tw32(FTQ_RESET, 0x00000000);
7061 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7062 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7064 for (i = 0; i < tp->irq_cnt; i++) {
7065 struct tg3_napi *tnapi = &tp->napi[i];
7066 if (tnapi->hw_status)
7067 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7069 if (tp->hw_stats)
7070 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7072 return err;
7075 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
7077 int i;
7078 u32 apedata;
7080 /* NCSI does not support APE events */
7081 if (tg3_flag(tp, APE_HAS_NCSI))
7082 return;
7084 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
7085 if (apedata != APE_SEG_SIG_MAGIC)
7086 return;
7088 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
7089 if (!(apedata & APE_FW_STATUS_READY))
7090 return;
7092 /* Wait for up to 1 millisecond for APE to service previous event. */
7093 for (i = 0; i < 10; i++) {
7094 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
7095 return;
7097 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
7099 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7100 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
7101 event | APE_EVENT_STATUS_EVENT_PENDING);
7103 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
7105 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7106 break;
7108 udelay(100);
7111 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7112 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
7115 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
7117 u32 event;
7118 u32 apedata;
7120 if (!tg3_flag(tp, ENABLE_APE))
7121 return;
7123 switch (kind) {
7124 case RESET_KIND_INIT:
7125 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
7126 APE_HOST_SEG_SIG_MAGIC);
7127 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
7128 APE_HOST_SEG_LEN_MAGIC);
7129 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
7130 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
7131 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
7132 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
7133 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
7134 APE_HOST_BEHAV_NO_PHYLOCK);
7135 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
7136 TG3_APE_HOST_DRVR_STATE_START);
7138 event = APE_EVENT_STATUS_STATE_START;
7139 break;
7140 case RESET_KIND_SHUTDOWN:
7141 /* With the interface we are currently using,
7142 * APE does not track driver state. Wiping
7143 * out the HOST SEGMENT SIGNATURE forces
7144 * the APE to assume OS absent status.
7146 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7148 if (device_may_wakeup(&tp->pdev->dev) &&
7149 tg3_flag(tp, WOL_ENABLE)) {
7150 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7151 TG3_APE_HOST_WOL_SPEED_AUTO);
7152 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7153 } else
7154 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7156 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7158 event = APE_EVENT_STATUS_STATE_UNLOAD;
7159 break;
7160 case RESET_KIND_SUSPEND:
7161 event = APE_EVENT_STATUS_STATE_SUSPEND;
7162 break;
7163 default:
7164 return;
7167 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7169 tg3_ape_send_event(tp, event);
7172 /* tp->lock is held. */
7173 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7175 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7176 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7178 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7179 switch (kind) {
7180 case RESET_KIND_INIT:
7181 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7182 DRV_STATE_START);
7183 break;
7185 case RESET_KIND_SHUTDOWN:
7186 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7187 DRV_STATE_UNLOAD);
7188 break;
7190 case RESET_KIND_SUSPEND:
7191 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7192 DRV_STATE_SUSPEND);
7193 break;
7195 default:
7196 break;
7200 if (kind == RESET_KIND_INIT ||
7201 kind == RESET_KIND_SUSPEND)
7202 tg3_ape_driver_state_change(tp, kind);
7205 /* tp->lock is held. */
7206 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7208 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7209 switch (kind) {
7210 case RESET_KIND_INIT:
7211 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7212 DRV_STATE_START_DONE);
7213 break;
7215 case RESET_KIND_SHUTDOWN:
7216 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7217 DRV_STATE_UNLOAD_DONE);
7218 break;
7220 default:
7221 break;
7225 if (kind == RESET_KIND_SHUTDOWN)
7226 tg3_ape_driver_state_change(tp, kind);
7229 /* tp->lock is held. */
7230 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7232 if (tg3_flag(tp, ENABLE_ASF)) {
7233 switch (kind) {
7234 case RESET_KIND_INIT:
7235 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7236 DRV_STATE_START);
7237 break;
7239 case RESET_KIND_SHUTDOWN:
7240 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7241 DRV_STATE_UNLOAD);
7242 break;
7244 case RESET_KIND_SUSPEND:
7245 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7246 DRV_STATE_SUSPEND);
7247 break;
7249 default:
7250 break;
7255 static int tg3_poll_fw(struct tg3 *tp)
7257 int i;
7258 u32 val;
7260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7261 /* Wait up to 20ms for init done. */
7262 for (i = 0; i < 200; i++) {
7263 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7264 return 0;
7265 udelay(100);
7267 return -ENODEV;
7270 /* Wait for firmware initialization to complete. */
7271 for (i = 0; i < 100000; i++) {
7272 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7273 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7274 break;
7275 udelay(10);
7278 /* Chip might not be fitted with firmware. Some Sun onboard
7279 * parts are configured like that. So don't signal the timeout
7280 * of the above loop as an error, but do report the lack of
7281 * running firmware once.
7283 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7284 tg3_flag_set(tp, NO_FWARE_REPORTED);
7286 netdev_info(tp->dev, "No firmware running\n");
7289 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7290 /* The 57765 A0 needs a little more
7291 * time to do some important work.
7293 mdelay(10);
7296 return 0;
7299 /* Save PCI command register before chip reset */
7300 static void tg3_save_pci_state(struct tg3 *tp)
7302 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7305 /* Restore PCI state after chip reset */
7306 static void tg3_restore_pci_state(struct tg3 *tp)
7308 u32 val;
7310 /* Re-enable indirect register accesses. */
7311 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7312 tp->misc_host_ctrl);
7314 /* Set MAX PCI retry to zero. */
7315 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7316 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7317 tg3_flag(tp, PCIX_MODE))
7318 val |= PCISTATE_RETRY_SAME_DMA;
7319 /* Allow reads and writes to the APE register and memory space. */
7320 if (tg3_flag(tp, ENABLE_APE))
7321 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7322 PCISTATE_ALLOW_APE_SHMEM_WR |
7323 PCISTATE_ALLOW_APE_PSPACE_WR;
7324 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7326 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7328 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7329 if (tg3_flag(tp, PCI_EXPRESS))
7330 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7331 else {
7332 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7333 tp->pci_cacheline_sz);
7334 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7335 tp->pci_lat_timer);
7339 /* Make sure PCI-X relaxed ordering bit is clear. */
7340 if (tg3_flag(tp, PCIX_MODE)) {
7341 u16 pcix_cmd;
7343 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7344 &pcix_cmd);
7345 pcix_cmd &= ~PCI_X_CMD_ERO;
7346 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7347 pcix_cmd);
7350 if (tg3_flag(tp, 5780_CLASS)) {
7352 /* Chip reset on 5780 will reset MSI enable bit,
7353 * so need to restore it.
7355 if (tg3_flag(tp, USING_MSI)) {
7356 u16 ctrl;
7358 pci_read_config_word(tp->pdev,
7359 tp->msi_cap + PCI_MSI_FLAGS,
7360 &ctrl);
7361 pci_write_config_word(tp->pdev,
7362 tp->msi_cap + PCI_MSI_FLAGS,
7363 ctrl | PCI_MSI_FLAGS_ENABLE);
7364 val = tr32(MSGINT_MODE);
7365 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7370 static void tg3_stop_fw(struct tg3 *);
7372 /* tp->lock is held. */
7373 static int tg3_chip_reset(struct tg3 *tp)
7375 u32 val;
7376 void (*write_op)(struct tg3 *, u32, u32);
7377 int i, err;
7379 tg3_nvram_lock(tp);
7381 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7383 /* No matching tg3_nvram_unlock() after this because
7384 * chip reset below will undo the nvram lock.
7386 tp->nvram_lock_cnt = 0;
7388 /* GRC_MISC_CFG core clock reset will clear the memory
7389 * enable bit in PCI register 4 and the MSI enable bit
7390 * on some chips, so we save relevant registers here.
7392 tg3_save_pci_state(tp);
7394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7395 tg3_flag(tp, 5755_PLUS))
7396 tw32(GRC_FASTBOOT_PC, 0);
7399 * We must avoid the readl() that normally takes place.
7400 * It locks machines, causes machine checks, and other
7401 * fun things. So, temporarily disable the 5701
7402 * hardware workaround, while we do the reset.
7404 write_op = tp->write32;
7405 if (write_op == tg3_write_flush_reg32)
7406 tp->write32 = tg3_write32;
7408 /* Prevent the irq handler from reading or writing PCI registers
7409 * during chip reset when the memory enable bit in the PCI command
7410 * register may be cleared. The chip does not generate interrupt
7411 * at this time, but the irq handler may still be called due to irq
7412 * sharing or irqpoll.
7414 tg3_flag_set(tp, CHIP_RESETTING);
7415 for (i = 0; i < tp->irq_cnt; i++) {
7416 struct tg3_napi *tnapi = &tp->napi[i];
7417 if (tnapi->hw_status) {
7418 tnapi->hw_status->status = 0;
7419 tnapi->hw_status->status_tag = 0;
7421 tnapi->last_tag = 0;
7422 tnapi->last_irq_tag = 0;
7424 smp_mb();
7426 for (i = 0; i < tp->irq_cnt; i++)
7427 synchronize_irq(tp->napi[i].irq_vec);
7429 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7430 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7431 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7434 /* do the reset */
7435 val = GRC_MISC_CFG_CORECLK_RESET;
7437 if (tg3_flag(tp, PCI_EXPRESS)) {
7438 /* Force PCIe 1.0a mode */
7439 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7440 !tg3_flag(tp, 57765_PLUS) &&
7441 tr32(TG3_PCIE_PHY_TSTCTL) ==
7442 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7443 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7445 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7446 tw32(GRC_MISC_CFG, (1 << 29));
7447 val |= (1 << 29);
7451 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7452 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7453 tw32(GRC_VCPU_EXT_CTRL,
7454 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7457 /* Manage gphy power for all CPMU absent PCIe devices. */
7458 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7459 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7461 tw32(GRC_MISC_CFG, val);
7463 /* restore 5701 hardware bug workaround write method */
7464 tp->write32 = write_op;
7466 /* Unfortunately, we have to delay before the PCI read back.
7467 * Some 575X chips even will not respond to a PCI cfg access
7468 * when the reset command is given to the chip.
7470 * How do these hardware designers expect things to work
7471 * properly if the PCI write is posted for a long period
7472 * of time? It is always necessary to have some method by
7473 * which a register read back can occur to push the write
7474 * out which does the reset.
7476 * For most tg3 variants the trick below was working.
7477 * Ho hum...
7479 udelay(120);
7481 /* Flush PCI posted writes. The normal MMIO registers
7482 * are inaccessible at this time so this is the only
7483 * way to make this reliably (actually, this is no longer
7484 * the case, see above). I tried to use indirect
7485 * register read/write but this upset some 5701 variants.
7487 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7489 udelay(120);
7491 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7492 u16 val16;
7494 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7495 int i;
7496 u32 cfg_val;
7498 /* Wait for link training to complete. */
7499 for (i = 0; i < 5000; i++)
7500 udelay(100);
7502 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7503 pci_write_config_dword(tp->pdev, 0xc4,
7504 cfg_val | (1 << 15));
7507 /* Clear the "no snoop" and "relaxed ordering" bits. */
7508 pci_read_config_word(tp->pdev,
7509 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7510 &val16);
7511 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7512 PCI_EXP_DEVCTL_NOSNOOP_EN);
7514 * Older PCIe devices only support the 128 byte
7515 * MPS setting. Enforce the restriction.
7517 if (!tg3_flag(tp, CPMU_PRESENT))
7518 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7519 pci_write_config_word(tp->pdev,
7520 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7521 val16);
7523 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7525 /* Clear error status */
7526 pci_write_config_word(tp->pdev,
7527 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7528 PCI_EXP_DEVSTA_CED |
7529 PCI_EXP_DEVSTA_NFED |
7530 PCI_EXP_DEVSTA_FED |
7531 PCI_EXP_DEVSTA_URD);
7534 tg3_restore_pci_state(tp);
7536 tg3_flag_clear(tp, CHIP_RESETTING);
7537 tg3_flag_clear(tp, ERROR_PROCESSED);
7539 val = 0;
7540 if (tg3_flag(tp, 5780_CLASS))
7541 val = tr32(MEMARB_MODE);
7542 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7544 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7545 tg3_stop_fw(tp);
7546 tw32(0x5000, 0x400);
7549 tw32(GRC_MODE, tp->grc_mode);
7551 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7552 val = tr32(0xc4);
7554 tw32(0xc4, val | (1 << 15));
7557 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7558 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7559 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7560 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7561 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7562 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7565 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7566 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7567 val = tp->mac_mode;
7568 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7569 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7570 val = tp->mac_mode;
7571 } else
7572 val = 0;
7574 tw32_f(MAC_MODE, val);
7575 udelay(40);
7577 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7579 err = tg3_poll_fw(tp);
7580 if (err)
7581 return err;
7583 tg3_mdio_start(tp);
7585 if (tg3_flag(tp, PCI_EXPRESS) &&
7586 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7587 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7588 !tg3_flag(tp, 57765_PLUS)) {
7589 val = tr32(0x7c00);
7591 tw32(0x7c00, val | (1 << 25));
7594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7595 val = tr32(TG3_CPMU_CLCK_ORIDE);
7596 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7599 /* Reprobe ASF enable state. */
7600 tg3_flag_clear(tp, ENABLE_ASF);
7601 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7602 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7603 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7604 u32 nic_cfg;
7606 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7607 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7608 tg3_flag_set(tp, ENABLE_ASF);
7609 tp->last_event_jiffies = jiffies;
7610 if (tg3_flag(tp, 5750_PLUS))
7611 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7615 return 0;
7618 /* tp->lock is held. */
7619 static void tg3_stop_fw(struct tg3 *tp)
7621 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7622 /* Wait for RX cpu to ACK the previous event. */
7623 tg3_wait_for_event_ack(tp);
7625 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7627 tg3_generate_fw_event(tp);
7629 /* Wait for RX cpu to ACK this event. */
7630 tg3_wait_for_event_ack(tp);
7634 /* tp->lock is held. */
7635 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7637 int err;
7639 tg3_stop_fw(tp);
7641 tg3_write_sig_pre_reset(tp, kind);
7643 tg3_abort_hw(tp, silent);
7644 err = tg3_chip_reset(tp);
7646 __tg3_set_mac_addr(tp, 0);
7648 tg3_write_sig_legacy(tp, kind);
7649 tg3_write_sig_post_reset(tp, kind);
7651 if (err)
7652 return err;
7654 return 0;
7657 #define RX_CPU_SCRATCH_BASE 0x30000
7658 #define RX_CPU_SCRATCH_SIZE 0x04000
7659 #define TX_CPU_SCRATCH_BASE 0x34000
7660 #define TX_CPU_SCRATCH_SIZE 0x04000
7662 /* tp->lock is held. */
7663 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7665 int i;
7667 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7669 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7670 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7672 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7673 return 0;
7675 if (offset == RX_CPU_BASE) {
7676 for (i = 0; i < 10000; i++) {
7677 tw32(offset + CPU_STATE, 0xffffffff);
7678 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7679 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7680 break;
7683 tw32(offset + CPU_STATE, 0xffffffff);
7684 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7685 udelay(10);
7686 } else {
7687 for (i = 0; i < 10000; i++) {
7688 tw32(offset + CPU_STATE, 0xffffffff);
7689 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7690 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7691 break;
7695 if (i >= 10000) {
7696 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7697 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7698 return -ENODEV;
7701 /* Clear firmware's nvram arbitration. */
7702 if (tg3_flag(tp, NVRAM))
7703 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7704 return 0;
7707 struct fw_info {
7708 unsigned int fw_base;
7709 unsigned int fw_len;
7710 const __be32 *fw_data;
7713 /* tp->lock is held. */
7714 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7715 int cpu_scratch_size, struct fw_info *info)
7717 int err, lock_err, i;
7718 void (*write_op)(struct tg3 *, u32, u32);
7720 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7721 netdev_err(tp->dev,
7722 "%s: Trying to load TX cpu firmware which is 5705\n",
7723 __func__);
7724 return -EINVAL;
7727 if (tg3_flag(tp, 5705_PLUS))
7728 write_op = tg3_write_mem;
7729 else
7730 write_op = tg3_write_indirect_reg32;
7732 /* It is possible that bootcode is still loading at this point.
7733 * Get the nvram lock first before halting the cpu.
7735 lock_err = tg3_nvram_lock(tp);
7736 err = tg3_halt_cpu(tp, cpu_base);
7737 if (!lock_err)
7738 tg3_nvram_unlock(tp);
7739 if (err)
7740 goto out;
7742 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7743 write_op(tp, cpu_scratch_base + i, 0);
7744 tw32(cpu_base + CPU_STATE, 0xffffffff);
7745 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7746 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7747 write_op(tp, (cpu_scratch_base +
7748 (info->fw_base & 0xffff) +
7749 (i * sizeof(u32))),
7750 be32_to_cpu(info->fw_data[i]));
7752 err = 0;
7754 out:
7755 return err;
7758 /* tp->lock is held. */
7759 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7761 struct fw_info info;
7762 const __be32 *fw_data;
7763 int err, i;
7765 fw_data = (void *)tp->fw->data;
7767 /* Firmware blob starts with version numbers, followed by
7768 start address and length. We are setting complete length.
7769 length = end_address_of_bss - start_address_of_text.
7770 Remainder is the blob to be loaded contiguously
7771 from start address. */
7773 info.fw_base = be32_to_cpu(fw_data[1]);
7774 info.fw_len = tp->fw->size - 12;
7775 info.fw_data = &fw_data[3];
7777 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7778 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7779 &info);
7780 if (err)
7781 return err;
7783 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7784 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7785 &info);
7786 if (err)
7787 return err;
7789 /* Now startup only the RX cpu. */
7790 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7791 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7793 for (i = 0; i < 5; i++) {
7794 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7795 break;
7796 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7797 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7798 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7799 udelay(1000);
7801 if (i >= 5) {
7802 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7803 "should be %08x\n", __func__,
7804 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7805 return -ENODEV;
7807 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7808 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7810 return 0;
7813 /* tp->lock is held. */
7814 static int tg3_load_tso_firmware(struct tg3 *tp)
7816 struct fw_info info;
7817 const __be32 *fw_data;
7818 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7819 int err, i;
7821 if (tg3_flag(tp, HW_TSO_1) ||
7822 tg3_flag(tp, HW_TSO_2) ||
7823 tg3_flag(tp, HW_TSO_3))
7824 return 0;
7826 fw_data = (void *)tp->fw->data;
7828 /* Firmware blob starts with version numbers, followed by
7829 start address and length. We are setting complete length.
7830 length = end_address_of_bss - start_address_of_text.
7831 Remainder is the blob to be loaded contiguously
7832 from start address. */
7834 info.fw_base = be32_to_cpu(fw_data[1]);
7835 cpu_scratch_size = tp->fw_len;
7836 info.fw_len = tp->fw->size - 12;
7837 info.fw_data = &fw_data[3];
7839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7840 cpu_base = RX_CPU_BASE;
7841 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7842 } else {
7843 cpu_base = TX_CPU_BASE;
7844 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7845 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7848 err = tg3_load_firmware_cpu(tp, cpu_base,
7849 cpu_scratch_base, cpu_scratch_size,
7850 &info);
7851 if (err)
7852 return err;
7854 /* Now startup the cpu. */
7855 tw32(cpu_base + CPU_STATE, 0xffffffff);
7856 tw32_f(cpu_base + CPU_PC, info.fw_base);
7858 for (i = 0; i < 5; i++) {
7859 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7860 break;
7861 tw32(cpu_base + CPU_STATE, 0xffffffff);
7862 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7863 tw32_f(cpu_base + CPU_PC, info.fw_base);
7864 udelay(1000);
7866 if (i >= 5) {
7867 netdev_err(tp->dev,
7868 "%s fails to set CPU PC, is %08x should be %08x\n",
7869 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7870 return -ENODEV;
7872 tw32(cpu_base + CPU_STATE, 0xffffffff);
7873 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7874 return 0;
7878 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7880 struct tg3 *tp = netdev_priv(dev);
7881 struct sockaddr *addr = p;
7882 int err = 0, skip_mac_1 = 0;
7884 if (!is_valid_ether_addr(addr->sa_data))
7885 return -EINVAL;
7887 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7889 if (!netif_running(dev))
7890 return 0;
7892 if (tg3_flag(tp, ENABLE_ASF)) {
7893 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7895 addr0_high = tr32(MAC_ADDR_0_HIGH);
7896 addr0_low = tr32(MAC_ADDR_0_LOW);
7897 addr1_high = tr32(MAC_ADDR_1_HIGH);
7898 addr1_low = tr32(MAC_ADDR_1_LOW);
7900 /* Skip MAC addr 1 if ASF is using it. */
7901 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7902 !(addr1_high == 0 && addr1_low == 0))
7903 skip_mac_1 = 1;
7905 spin_lock_bh(&tp->lock);
7906 __tg3_set_mac_addr(tp, skip_mac_1);
7907 spin_unlock_bh(&tp->lock);
7909 return err;
7912 /* tp->lock is held. */
7913 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7914 dma_addr_t mapping, u32 maxlen_flags,
7915 u32 nic_addr)
7917 tg3_write_mem(tp,
7918 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7919 ((u64) mapping >> 32));
7920 tg3_write_mem(tp,
7921 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7922 ((u64) mapping & 0xffffffff));
7923 tg3_write_mem(tp,
7924 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7925 maxlen_flags);
7927 if (!tg3_flag(tp, 5705_PLUS))
7928 tg3_write_mem(tp,
7929 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7930 nic_addr);
7933 static void __tg3_set_rx_mode(struct net_device *);
7934 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7936 int i;
7938 if (!tg3_flag(tp, ENABLE_TSS)) {
7939 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7940 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7941 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7942 } else {
7943 tw32(HOSTCC_TXCOL_TICKS, 0);
7944 tw32(HOSTCC_TXMAX_FRAMES, 0);
7945 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7948 if (!tg3_flag(tp, ENABLE_RSS)) {
7949 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7950 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7951 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7952 } else {
7953 tw32(HOSTCC_RXCOL_TICKS, 0);
7954 tw32(HOSTCC_RXMAX_FRAMES, 0);
7955 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7958 if (!tg3_flag(tp, 5705_PLUS)) {
7959 u32 val = ec->stats_block_coalesce_usecs;
7961 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7962 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7964 if (!netif_carrier_ok(tp->dev))
7965 val = 0;
7967 tw32(HOSTCC_STAT_COAL_TICKS, val);
7970 for (i = 0; i < tp->irq_cnt - 1; i++) {
7971 u32 reg;
7973 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7974 tw32(reg, ec->rx_coalesce_usecs);
7975 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7976 tw32(reg, ec->rx_max_coalesced_frames);
7977 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7978 tw32(reg, ec->rx_max_coalesced_frames_irq);
7980 if (tg3_flag(tp, ENABLE_TSS)) {
7981 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7982 tw32(reg, ec->tx_coalesce_usecs);
7983 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7984 tw32(reg, ec->tx_max_coalesced_frames);
7985 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7986 tw32(reg, ec->tx_max_coalesced_frames_irq);
7990 for (; i < tp->irq_max - 1; i++) {
7991 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7992 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7993 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7995 if (tg3_flag(tp, ENABLE_TSS)) {
7996 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7997 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7998 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8003 /* tp->lock is held. */
8004 static void tg3_rings_reset(struct tg3 *tp)
8006 int i;
8007 u32 stblk, txrcb, rxrcb, limit;
8008 struct tg3_napi *tnapi = &tp->napi[0];
8010 /* Disable all transmit rings but the first. */
8011 if (!tg3_flag(tp, 5705_PLUS))
8012 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8013 else if (tg3_flag(tp, 5717_PLUS))
8014 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8015 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8016 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8017 else
8018 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8020 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8021 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8022 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8023 BDINFO_FLAGS_DISABLED);
8026 /* Disable all receive return rings but the first. */
8027 if (tg3_flag(tp, 5717_PLUS))
8028 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8029 else if (!tg3_flag(tp, 5705_PLUS))
8030 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8031 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8032 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8033 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8034 else
8035 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8037 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8038 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8039 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8040 BDINFO_FLAGS_DISABLED);
8042 /* Disable interrupts */
8043 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8044 tp->napi[0].chk_msi_cnt = 0;
8045 tp->napi[0].last_rx_cons = 0;
8046 tp->napi[0].last_tx_cons = 0;
8048 /* Zero mailbox registers. */
8049 if (tg3_flag(tp, SUPPORT_MSIX)) {
8050 for (i = 1; i < tp->irq_max; i++) {
8051 tp->napi[i].tx_prod = 0;
8052 tp->napi[i].tx_cons = 0;
8053 if (tg3_flag(tp, ENABLE_TSS))
8054 tw32_mailbox(tp->napi[i].prodmbox, 0);
8055 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8056 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8057 tp->napi[0].chk_msi_cnt = 0;
8058 tp->napi[i].last_rx_cons = 0;
8059 tp->napi[i].last_tx_cons = 0;
8061 if (!tg3_flag(tp, ENABLE_TSS))
8062 tw32_mailbox(tp->napi[0].prodmbox, 0);
8063 } else {
8064 tp->napi[0].tx_prod = 0;
8065 tp->napi[0].tx_cons = 0;
8066 tw32_mailbox(tp->napi[0].prodmbox, 0);
8067 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8070 /* Make sure the NIC-based send BD rings are disabled. */
8071 if (!tg3_flag(tp, 5705_PLUS)) {
8072 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8073 for (i = 0; i < 16; i++)
8074 tw32_tx_mbox(mbox + i * 8, 0);
8077 txrcb = NIC_SRAM_SEND_RCB;
8078 rxrcb = NIC_SRAM_RCV_RET_RCB;
8080 /* Clear status block in ram. */
8081 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8083 /* Set status block DMA address */
8084 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8085 ((u64) tnapi->status_mapping >> 32));
8086 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8087 ((u64) tnapi->status_mapping & 0xffffffff));
8089 if (tnapi->tx_ring) {
8090 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8091 (TG3_TX_RING_SIZE <<
8092 BDINFO_FLAGS_MAXLEN_SHIFT),
8093 NIC_SRAM_TX_BUFFER_DESC);
8094 txrcb += TG3_BDINFO_SIZE;
8097 if (tnapi->rx_rcb) {
8098 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8099 (tp->rx_ret_ring_mask + 1) <<
8100 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8101 rxrcb += TG3_BDINFO_SIZE;
8104 stblk = HOSTCC_STATBLCK_RING1;
8106 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8107 u64 mapping = (u64)tnapi->status_mapping;
8108 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8109 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8111 /* Clear status block in ram. */
8112 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8114 if (tnapi->tx_ring) {
8115 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8116 (TG3_TX_RING_SIZE <<
8117 BDINFO_FLAGS_MAXLEN_SHIFT),
8118 NIC_SRAM_TX_BUFFER_DESC);
8119 txrcb += TG3_BDINFO_SIZE;
8122 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8123 ((tp->rx_ret_ring_mask + 1) <<
8124 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8126 stblk += 8;
8127 rxrcb += TG3_BDINFO_SIZE;
8131 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8133 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8135 if (!tg3_flag(tp, 5750_PLUS) ||
8136 tg3_flag(tp, 5780_CLASS) ||
8137 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8139 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8140 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8141 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8142 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8143 else
8144 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8146 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8147 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8149 val = min(nic_rep_thresh, host_rep_thresh);
8150 tw32(RCVBDI_STD_THRESH, val);
8152 if (tg3_flag(tp, 57765_PLUS))
8153 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8155 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8156 return;
8158 if (!tg3_flag(tp, 5705_PLUS))
8159 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8160 else
8161 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8163 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8165 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8166 tw32(RCVBDI_JUMBO_THRESH, val);
8168 if (tg3_flag(tp, 57765_PLUS))
8169 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8172 /* tp->lock is held. */
8173 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8175 u32 val, rdmac_mode;
8176 int i, err, limit;
8177 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8179 tg3_disable_ints(tp);
8181 tg3_stop_fw(tp);
8183 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8185 if (tg3_flag(tp, INIT_COMPLETE))
8186 tg3_abort_hw(tp, 1);
8188 /* Enable MAC control of LPI */
8189 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8190 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8191 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8192 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8194 tw32_f(TG3_CPMU_EEE_CTRL,
8195 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8197 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8198 TG3_CPMU_EEEMD_LPI_IN_TX |
8199 TG3_CPMU_EEEMD_LPI_IN_RX |
8200 TG3_CPMU_EEEMD_EEE_ENABLE;
8202 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8203 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8205 if (tg3_flag(tp, ENABLE_APE))
8206 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8208 tw32_f(TG3_CPMU_EEE_MODE, val);
8210 tw32_f(TG3_CPMU_EEE_DBTMR1,
8211 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8212 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8214 tw32_f(TG3_CPMU_EEE_DBTMR2,
8215 TG3_CPMU_DBTMR2_APE_TX_2047US |
8216 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8219 if (reset_phy)
8220 tg3_phy_reset(tp);
8222 err = tg3_chip_reset(tp);
8223 if (err)
8224 return err;
8226 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8228 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8229 val = tr32(TG3_CPMU_CTRL);
8230 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8231 tw32(TG3_CPMU_CTRL, val);
8233 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8234 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8235 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8236 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8238 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8239 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8240 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8241 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8243 val = tr32(TG3_CPMU_HST_ACC);
8244 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8245 val |= CPMU_HST_ACC_MACCLK_6_25;
8246 tw32(TG3_CPMU_HST_ACC, val);
8249 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8250 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8251 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8252 PCIE_PWR_MGMT_L1_THRESH_4MS;
8253 tw32(PCIE_PWR_MGMT_THRESH, val);
8255 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8256 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8258 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8260 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8261 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8264 if (tg3_flag(tp, L1PLLPD_EN)) {
8265 u32 grc_mode = tr32(GRC_MODE);
8267 /* Access the lower 1K of PL PCIE block registers. */
8268 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8269 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8271 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8272 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8273 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8275 tw32(GRC_MODE, grc_mode);
8278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8279 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8280 u32 grc_mode = tr32(GRC_MODE);
8282 /* Access the lower 1K of PL PCIE block registers. */
8283 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8284 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8286 val = tr32(TG3_PCIE_TLDLPL_PORT +
8287 TG3_PCIE_PL_LO_PHYCTL5);
8288 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8289 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8291 tw32(GRC_MODE, grc_mode);
8294 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8295 u32 grc_mode = tr32(GRC_MODE);
8297 /* Access the lower 1K of DL PCIE block registers. */
8298 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8299 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8301 val = tr32(TG3_PCIE_TLDLPL_PORT +
8302 TG3_PCIE_DL_LO_FTSMAX);
8303 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8304 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8305 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8307 tw32(GRC_MODE, grc_mode);
8310 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8311 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8312 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8313 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8316 /* This works around an issue with Athlon chipsets on
8317 * B3 tigon3 silicon. This bit has no effect on any
8318 * other revision. But do not set this on PCI Express
8319 * chips and don't even touch the clocks if the CPMU is present.
8321 if (!tg3_flag(tp, CPMU_PRESENT)) {
8322 if (!tg3_flag(tp, PCI_EXPRESS))
8323 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8324 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8327 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8328 tg3_flag(tp, PCIX_MODE)) {
8329 val = tr32(TG3PCI_PCISTATE);
8330 val |= PCISTATE_RETRY_SAME_DMA;
8331 tw32(TG3PCI_PCISTATE, val);
8334 if (tg3_flag(tp, ENABLE_APE)) {
8335 /* Allow reads and writes to the
8336 * APE register and memory space.
8338 val = tr32(TG3PCI_PCISTATE);
8339 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8340 PCISTATE_ALLOW_APE_SHMEM_WR |
8341 PCISTATE_ALLOW_APE_PSPACE_WR;
8342 tw32(TG3PCI_PCISTATE, val);
8345 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8346 /* Enable some hw fixes. */
8347 val = tr32(TG3PCI_MSI_DATA);
8348 val |= (1 << 26) | (1 << 28) | (1 << 29);
8349 tw32(TG3PCI_MSI_DATA, val);
8352 /* Descriptor ring init may make accesses to the
8353 * NIC SRAM area to setup the TX descriptors, so we
8354 * can only do this after the hardware has been
8355 * successfully reset.
8357 err = tg3_init_rings(tp);
8358 if (err)
8359 return err;
8361 if (tg3_flag(tp, 57765_PLUS)) {
8362 val = tr32(TG3PCI_DMA_RW_CTRL) &
8363 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8364 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8365 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8366 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8367 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8368 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8369 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8370 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8371 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8372 /* This value is determined during the probe time DMA
8373 * engine test, tg3_test_dma.
8375 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8378 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8379 GRC_MODE_4X_NIC_SEND_RINGS |
8380 GRC_MODE_NO_TX_PHDR_CSUM |
8381 GRC_MODE_NO_RX_PHDR_CSUM);
8382 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8384 /* Pseudo-header checksum is done by hardware logic and not
8385 * the offload processers, so make the chip do the pseudo-
8386 * header checksums on receive. For transmit it is more
8387 * convenient to do the pseudo-header checksum in software
8388 * as Linux does that on transmit for us in all cases.
8390 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8392 tw32(GRC_MODE,
8393 tp->grc_mode |
8394 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8396 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8397 val = tr32(GRC_MISC_CFG);
8398 val &= ~0xff;
8399 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8400 tw32(GRC_MISC_CFG, val);
8402 /* Initialize MBUF/DESC pool. */
8403 if (tg3_flag(tp, 5750_PLUS)) {
8404 /* Do nothing. */
8405 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8406 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8407 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8408 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8409 else
8410 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8411 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8412 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8413 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8414 int fw_len;
8416 fw_len = tp->fw_len;
8417 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8418 tw32(BUFMGR_MB_POOL_ADDR,
8419 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8420 tw32(BUFMGR_MB_POOL_SIZE,
8421 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8424 if (tp->dev->mtu <= ETH_DATA_LEN) {
8425 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8426 tp->bufmgr_config.mbuf_read_dma_low_water);
8427 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8428 tp->bufmgr_config.mbuf_mac_rx_low_water);
8429 tw32(BUFMGR_MB_HIGH_WATER,
8430 tp->bufmgr_config.mbuf_high_water);
8431 } else {
8432 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8433 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8434 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8435 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8436 tw32(BUFMGR_MB_HIGH_WATER,
8437 tp->bufmgr_config.mbuf_high_water_jumbo);
8439 tw32(BUFMGR_DMA_LOW_WATER,
8440 tp->bufmgr_config.dma_low_water);
8441 tw32(BUFMGR_DMA_HIGH_WATER,
8442 tp->bufmgr_config.dma_high_water);
8444 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8445 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8446 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8448 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8449 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8450 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8451 tw32(BUFMGR_MODE, val);
8452 for (i = 0; i < 2000; i++) {
8453 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8454 break;
8455 udelay(10);
8457 if (i >= 2000) {
8458 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8459 return -ENODEV;
8462 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8463 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8465 tg3_setup_rxbd_thresholds(tp);
8467 /* Initialize TG3_BDINFO's at:
8468 * RCVDBDI_STD_BD: standard eth size rx ring
8469 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8470 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8472 * like so:
8473 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8474 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8475 * ring attribute flags
8476 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8478 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8479 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8481 * The size of each ring is fixed in the firmware, but the location is
8482 * configurable.
8484 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8485 ((u64) tpr->rx_std_mapping >> 32));
8486 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8487 ((u64) tpr->rx_std_mapping & 0xffffffff));
8488 if (!tg3_flag(tp, 5717_PLUS))
8489 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8490 NIC_SRAM_RX_BUFFER_DESC);
8492 /* Disable the mini ring */
8493 if (!tg3_flag(tp, 5705_PLUS))
8494 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8495 BDINFO_FLAGS_DISABLED);
8497 /* Program the jumbo buffer descriptor ring control
8498 * blocks on those devices that have them.
8500 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8501 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8503 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8504 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8505 ((u64) tpr->rx_jmb_mapping >> 32));
8506 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8507 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8508 val = TG3_RX_JMB_RING_SIZE(tp) <<
8509 BDINFO_FLAGS_MAXLEN_SHIFT;
8510 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8511 val | BDINFO_FLAGS_USE_EXT_RECV);
8512 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8513 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8514 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8515 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8516 } else {
8517 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8518 BDINFO_FLAGS_DISABLED);
8521 if (tg3_flag(tp, 57765_PLUS)) {
8522 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8523 val = TG3_RX_STD_MAX_SIZE_5700;
8524 else
8525 val = TG3_RX_STD_MAX_SIZE_5717;
8526 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8527 val |= (TG3_RX_STD_DMA_SZ << 2);
8528 } else
8529 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8530 } else
8531 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8533 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8535 tpr->rx_std_prod_idx = tp->rx_pending;
8536 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8538 tpr->rx_jmb_prod_idx =
8539 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8540 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8542 tg3_rings_reset(tp);
8544 /* Initialize MAC address and backoff seed. */
8545 __tg3_set_mac_addr(tp, 0);
8547 /* MTU + ethernet header + FCS + optional VLAN tag */
8548 tw32(MAC_RX_MTU_SIZE,
8549 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8551 /* The slot time is changed by tg3_setup_phy if we
8552 * run at gigabit with half duplex.
8554 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8555 (6 << TX_LENGTHS_IPG_SHIFT) |
8556 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8559 val |= tr32(MAC_TX_LENGTHS) &
8560 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8561 TX_LENGTHS_CNT_DWN_VAL_MSK);
8563 tw32(MAC_TX_LENGTHS, val);
8565 /* Receive rules. */
8566 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8567 tw32(RCVLPC_CONFIG, 0x0181);
8569 /* Calculate RDMAC_MODE setting early, we need it to determine
8570 * the RCVLPC_STATE_ENABLE mask.
8572 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8573 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8574 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8575 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8576 RDMAC_MODE_LNGREAD_ENAB);
8578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8579 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8583 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8584 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8585 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8586 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8588 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8589 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8590 if (tg3_flag(tp, TSO_CAPABLE) &&
8591 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8592 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8593 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8594 !tg3_flag(tp, IS_5788)) {
8595 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8599 if (tg3_flag(tp, PCI_EXPRESS))
8600 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8602 if (tg3_flag(tp, HW_TSO_1) ||
8603 tg3_flag(tp, HW_TSO_2) ||
8604 tg3_flag(tp, HW_TSO_3))
8605 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8607 if (tg3_flag(tp, 57765_PLUS) ||
8608 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8609 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8610 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8613 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8615 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8616 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8617 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8619 tg3_flag(tp, 57765_PLUS)) {
8620 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8623 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8624 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8625 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8626 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8627 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8628 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8630 tw32(TG3_RDMA_RSRVCTRL_REG,
8631 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8636 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8637 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8638 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8639 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8642 /* Receive/send statistics. */
8643 if (tg3_flag(tp, 5750_PLUS)) {
8644 val = tr32(RCVLPC_STATS_ENABLE);
8645 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8646 tw32(RCVLPC_STATS_ENABLE, val);
8647 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8648 tg3_flag(tp, TSO_CAPABLE)) {
8649 val = tr32(RCVLPC_STATS_ENABLE);
8650 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8651 tw32(RCVLPC_STATS_ENABLE, val);
8652 } else {
8653 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8655 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8656 tw32(SNDDATAI_STATSENAB, 0xffffff);
8657 tw32(SNDDATAI_STATSCTRL,
8658 (SNDDATAI_SCTRL_ENABLE |
8659 SNDDATAI_SCTRL_FASTUPD));
8661 /* Setup host coalescing engine. */
8662 tw32(HOSTCC_MODE, 0);
8663 for (i = 0; i < 2000; i++) {
8664 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8665 break;
8666 udelay(10);
8669 __tg3_set_coalesce(tp, &tp->coal);
8671 if (!tg3_flag(tp, 5705_PLUS)) {
8672 /* Status/statistics block address. See tg3_timer,
8673 * the tg3_periodic_fetch_stats call there, and
8674 * tg3_get_stats to see how this works for 5705/5750 chips.
8676 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8677 ((u64) tp->stats_mapping >> 32));
8678 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8679 ((u64) tp->stats_mapping & 0xffffffff));
8680 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8682 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8684 /* Clear statistics and status block memory areas */
8685 for (i = NIC_SRAM_STATS_BLK;
8686 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8687 i += sizeof(u32)) {
8688 tg3_write_mem(tp, i, 0);
8689 udelay(40);
8693 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8695 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8696 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8697 if (!tg3_flag(tp, 5705_PLUS))
8698 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8700 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8701 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8702 /* reset to prevent losing 1st rx packet intermittently */
8703 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8704 udelay(10);
8707 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8708 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8709 MAC_MODE_FHDE_ENABLE;
8710 if (tg3_flag(tp, ENABLE_APE))
8711 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8712 if (!tg3_flag(tp, 5705_PLUS) &&
8713 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8714 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8715 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8716 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8717 udelay(40);
8719 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8720 * If TG3_FLAG_IS_NIC is zero, we should read the
8721 * register to preserve the GPIO settings for LOMs. The GPIOs,
8722 * whether used as inputs or outputs, are set by boot code after
8723 * reset.
8725 if (!tg3_flag(tp, IS_NIC)) {
8726 u32 gpio_mask;
8728 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8729 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8730 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8733 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8734 GRC_LCLCTRL_GPIO_OUTPUT3;
8736 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8737 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8739 tp->grc_local_ctrl &= ~gpio_mask;
8740 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8742 /* GPIO1 must be driven high for eeprom write protect */
8743 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8744 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8745 GRC_LCLCTRL_GPIO_OUTPUT1);
8747 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8748 udelay(100);
8750 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8751 val = tr32(MSGINT_MODE);
8752 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8753 tw32(MSGINT_MODE, val);
8756 if (!tg3_flag(tp, 5705_PLUS)) {
8757 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8758 udelay(40);
8761 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8762 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8763 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8764 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8765 WDMAC_MODE_LNGREAD_ENAB);
8767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8768 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8769 if (tg3_flag(tp, TSO_CAPABLE) &&
8770 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8771 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8772 /* nothing */
8773 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8774 !tg3_flag(tp, IS_5788)) {
8775 val |= WDMAC_MODE_RX_ACCEL;
8779 /* Enable host coalescing bug fix */
8780 if (tg3_flag(tp, 5755_PLUS))
8781 val |= WDMAC_MODE_STATUS_TAG_FIX;
8783 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8784 val |= WDMAC_MODE_BURST_ALL_DATA;
8786 tw32_f(WDMAC_MODE, val);
8787 udelay(40);
8789 if (tg3_flag(tp, PCIX_MODE)) {
8790 u16 pcix_cmd;
8792 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8793 &pcix_cmd);
8794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8795 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8796 pcix_cmd |= PCI_X_CMD_READ_2K;
8797 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8798 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8799 pcix_cmd |= PCI_X_CMD_READ_2K;
8801 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8802 pcix_cmd);
8805 tw32_f(RDMAC_MODE, rdmac_mode);
8806 udelay(40);
8808 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8809 if (!tg3_flag(tp, 5705_PLUS))
8810 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8812 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8813 tw32(SNDDATAC_MODE,
8814 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8815 else
8816 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8818 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8819 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8820 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8821 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8822 val |= RCVDBDI_MODE_LRG_RING_SZ;
8823 tw32(RCVDBDI_MODE, val);
8824 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8825 if (tg3_flag(tp, HW_TSO_1) ||
8826 tg3_flag(tp, HW_TSO_2) ||
8827 tg3_flag(tp, HW_TSO_3))
8828 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8829 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8830 if (tg3_flag(tp, ENABLE_TSS))
8831 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8832 tw32(SNDBDI_MODE, val);
8833 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8835 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8836 err = tg3_load_5701_a0_firmware_fix(tp);
8837 if (err)
8838 return err;
8841 if (tg3_flag(tp, TSO_CAPABLE)) {
8842 err = tg3_load_tso_firmware(tp);
8843 if (err)
8844 return err;
8847 tp->tx_mode = TX_MODE_ENABLE;
8849 if (tg3_flag(tp, 5755_PLUS) ||
8850 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8851 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8853 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8854 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8855 tp->tx_mode &= ~val;
8856 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8859 tw32_f(MAC_TX_MODE, tp->tx_mode);
8860 udelay(100);
8862 if (tg3_flag(tp, ENABLE_RSS)) {
8863 int i = 0;
8864 u32 reg = MAC_RSS_INDIR_TBL_0;
8866 if (tp->irq_cnt == 2) {
8867 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8868 tw32(reg, 0x0);
8869 reg += 4;
8871 } else {
8872 u32 val;
8874 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8875 val = i % (tp->irq_cnt - 1);
8876 i++;
8877 for (; i % 8; i++) {
8878 val <<= 4;
8879 val |= (i % (tp->irq_cnt - 1));
8881 tw32(reg, val);
8882 reg += 4;
8886 /* Setup the "secret" hash key. */
8887 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8888 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8889 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8890 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8891 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8892 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8893 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8894 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8895 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8896 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8899 tp->rx_mode = RX_MODE_ENABLE;
8900 if (tg3_flag(tp, 5755_PLUS))
8901 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8903 if (tg3_flag(tp, ENABLE_RSS))
8904 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8905 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8906 RX_MODE_RSS_IPV6_HASH_EN |
8907 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8908 RX_MODE_RSS_IPV4_HASH_EN |
8909 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8911 tw32_f(MAC_RX_MODE, tp->rx_mode);
8912 udelay(10);
8914 tw32(MAC_LED_CTRL, tp->led_ctrl);
8916 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8917 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8918 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8919 udelay(10);
8921 tw32_f(MAC_RX_MODE, tp->rx_mode);
8922 udelay(10);
8924 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8925 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8926 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8927 /* Set drive transmission level to 1.2V */
8928 /* only if the signal pre-emphasis bit is not set */
8929 val = tr32(MAC_SERDES_CFG);
8930 val &= 0xfffff000;
8931 val |= 0x880;
8932 tw32(MAC_SERDES_CFG, val);
8934 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8935 tw32(MAC_SERDES_CFG, 0x616000);
8938 /* Prevent chip from dropping frames when flow control
8939 * is enabled.
8941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8942 val = 1;
8943 else
8944 val = 2;
8945 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8948 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8949 /* Use hardware link auto-negotiation */
8950 tg3_flag_set(tp, HW_AUTONEG);
8953 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8954 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8955 u32 tmp;
8957 tmp = tr32(SERDES_RX_CTRL);
8958 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8959 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8960 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8961 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8964 if (!tg3_flag(tp, USE_PHYLIB)) {
8965 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8966 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8967 tp->link_config.speed = tp->link_config.orig_speed;
8968 tp->link_config.duplex = tp->link_config.orig_duplex;
8969 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8972 err = tg3_setup_phy(tp, 0);
8973 if (err)
8974 return err;
8976 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8977 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8978 u32 tmp;
8980 /* Clear CRC stats. */
8981 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8982 tg3_writephy(tp, MII_TG3_TEST1,
8983 tmp | MII_TG3_TEST1_CRC_EN);
8984 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8989 __tg3_set_rx_mode(tp->dev);
8991 /* Initialize receive rules. */
8992 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8993 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8994 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8995 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8997 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8998 limit = 8;
8999 else
9000 limit = 16;
9001 if (tg3_flag(tp, ENABLE_ASF))
9002 limit -= 4;
9003 switch (limit) {
9004 case 16:
9005 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9006 case 15:
9007 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9008 case 14:
9009 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9010 case 13:
9011 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9012 case 12:
9013 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9014 case 11:
9015 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9016 case 10:
9017 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9018 case 9:
9019 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9020 case 8:
9021 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9022 case 7:
9023 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9024 case 6:
9025 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9026 case 5:
9027 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9028 case 4:
9029 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9030 case 3:
9031 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9032 case 2:
9033 case 1:
9035 default:
9036 break;
9039 if (tg3_flag(tp, ENABLE_APE))
9040 /* Write our heartbeat update interval to APE. */
9041 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9042 APE_HOST_HEARTBEAT_INT_DISABLE);
9044 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9046 return 0;
9049 /* Called at device open time to get the chip ready for
9050 * packet processing. Invoked with tp->lock held.
9052 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9054 tg3_switch_clocks(tp);
9056 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9058 return tg3_reset_hw(tp, reset_phy);
9061 #define TG3_STAT_ADD32(PSTAT, REG) \
9062 do { u32 __val = tr32(REG); \
9063 (PSTAT)->low += __val; \
9064 if ((PSTAT)->low < __val) \
9065 (PSTAT)->high += 1; \
9066 } while (0)
9068 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9070 struct tg3_hw_stats *sp = tp->hw_stats;
9072 if (!netif_carrier_ok(tp->dev))
9073 return;
9075 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9076 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9077 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9078 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9079 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9080 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9081 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9082 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9083 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9084 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9085 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9086 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9087 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9089 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9090 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9091 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9092 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9093 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9094 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9095 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9096 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9097 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9098 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9099 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9100 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9101 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9102 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9104 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9105 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9106 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9107 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9108 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9109 } else {
9110 u32 val = tr32(HOSTCC_FLOW_ATTN);
9111 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9112 if (val) {
9113 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9114 sp->rx_discards.low += val;
9115 if (sp->rx_discards.low < val)
9116 sp->rx_discards.high += 1;
9118 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9120 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9123 static void tg3_chk_missed_msi(struct tg3 *tp)
9125 u32 i;
9127 for (i = 0; i < tp->irq_cnt; i++) {
9128 struct tg3_napi *tnapi = &tp->napi[i];
9130 if (tg3_has_work(tnapi)) {
9131 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9132 tnapi->last_tx_cons == tnapi->tx_cons) {
9133 if (tnapi->chk_msi_cnt < 1) {
9134 tnapi->chk_msi_cnt++;
9135 return;
9137 tw32_mailbox(tnapi->int_mbox,
9138 tnapi->last_tag << 24);
9141 tnapi->chk_msi_cnt = 0;
9142 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9143 tnapi->last_tx_cons = tnapi->tx_cons;
9147 static void tg3_timer(unsigned long __opaque)
9149 struct tg3 *tp = (struct tg3 *) __opaque;
9151 if (tp->irq_sync)
9152 goto restart_timer;
9154 spin_lock(&tp->lock);
9156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9158 tg3_chk_missed_msi(tp);
9160 if (!tg3_flag(tp, TAGGED_STATUS)) {
9161 /* All of this garbage is because when using non-tagged
9162 * IRQ status the mailbox/status_block protocol the chip
9163 * uses with the cpu is race prone.
9165 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9166 tw32(GRC_LOCAL_CTRL,
9167 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9168 } else {
9169 tw32(HOSTCC_MODE, tp->coalesce_mode |
9170 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9173 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9174 tg3_flag_set(tp, RESTART_TIMER);
9175 spin_unlock(&tp->lock);
9176 schedule_work(&tp->reset_task);
9177 return;
9181 /* This part only runs once per second. */
9182 if (!--tp->timer_counter) {
9183 if (tg3_flag(tp, 5705_PLUS))
9184 tg3_periodic_fetch_stats(tp);
9186 if (tp->setlpicnt && !--tp->setlpicnt)
9187 tg3_phy_eee_enable(tp);
9189 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9190 u32 mac_stat;
9191 int phy_event;
9193 mac_stat = tr32(MAC_STATUS);
9195 phy_event = 0;
9196 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9197 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9198 phy_event = 1;
9199 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9200 phy_event = 1;
9202 if (phy_event)
9203 tg3_setup_phy(tp, 0);
9204 } else if (tg3_flag(tp, POLL_SERDES)) {
9205 u32 mac_stat = tr32(MAC_STATUS);
9206 int need_setup = 0;
9208 if (netif_carrier_ok(tp->dev) &&
9209 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9210 need_setup = 1;
9212 if (!netif_carrier_ok(tp->dev) &&
9213 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9214 MAC_STATUS_SIGNAL_DET))) {
9215 need_setup = 1;
9217 if (need_setup) {
9218 if (!tp->serdes_counter) {
9219 tw32_f(MAC_MODE,
9220 (tp->mac_mode &
9221 ~MAC_MODE_PORT_MODE_MASK));
9222 udelay(40);
9223 tw32_f(MAC_MODE, tp->mac_mode);
9224 udelay(40);
9226 tg3_setup_phy(tp, 0);
9228 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9229 tg3_flag(tp, 5780_CLASS)) {
9230 tg3_serdes_parallel_detect(tp);
9233 tp->timer_counter = tp->timer_multiplier;
9236 /* Heartbeat is only sent once every 2 seconds.
9238 * The heartbeat is to tell the ASF firmware that the host
9239 * driver is still alive. In the event that the OS crashes,
9240 * ASF needs to reset the hardware to free up the FIFO space
9241 * that may be filled with rx packets destined for the host.
9242 * If the FIFO is full, ASF will no longer function properly.
9244 * Unintended resets have been reported on real time kernels
9245 * where the timer doesn't run on time. Netpoll will also have
9246 * same problem.
9248 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9249 * to check the ring condition when the heartbeat is expiring
9250 * before doing the reset. This will prevent most unintended
9251 * resets.
9253 if (!--tp->asf_counter) {
9254 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9255 tg3_wait_for_event_ack(tp);
9257 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9258 FWCMD_NICDRV_ALIVE3);
9259 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9260 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9261 TG3_FW_UPDATE_TIMEOUT_SEC);
9263 tg3_generate_fw_event(tp);
9265 tp->asf_counter = tp->asf_multiplier;
9268 spin_unlock(&tp->lock);
9270 restart_timer:
9271 tp->timer.expires = jiffies + tp->timer_offset;
9272 add_timer(&tp->timer);
9275 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9277 irq_handler_t fn;
9278 unsigned long flags;
9279 char *name;
9280 struct tg3_napi *tnapi = &tp->napi[irq_num];
9282 if (tp->irq_cnt == 1)
9283 name = tp->dev->name;
9284 else {
9285 name = &tnapi->irq_lbl[0];
9286 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9287 name[IFNAMSIZ-1] = 0;
9290 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9291 fn = tg3_msi;
9292 if (tg3_flag(tp, 1SHOT_MSI))
9293 fn = tg3_msi_1shot;
9294 flags = 0;
9295 } else {
9296 fn = tg3_interrupt;
9297 if (tg3_flag(tp, TAGGED_STATUS))
9298 fn = tg3_interrupt_tagged;
9299 flags = IRQF_SHARED;
9302 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9305 static int tg3_test_interrupt(struct tg3 *tp)
9307 struct tg3_napi *tnapi = &tp->napi[0];
9308 struct net_device *dev = tp->dev;
9309 int err, i, intr_ok = 0;
9310 u32 val;
9312 if (!netif_running(dev))
9313 return -ENODEV;
9315 tg3_disable_ints(tp);
9317 free_irq(tnapi->irq_vec, tnapi);
9320 * Turn off MSI one shot mode. Otherwise this test has no
9321 * observable way to know whether the interrupt was delivered.
9323 if (tg3_flag(tp, 57765_PLUS)) {
9324 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9325 tw32(MSGINT_MODE, val);
9328 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9329 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9330 if (err)
9331 return err;
9333 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9334 tg3_enable_ints(tp);
9336 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9337 tnapi->coal_now);
9339 for (i = 0; i < 5; i++) {
9340 u32 int_mbox, misc_host_ctrl;
9342 int_mbox = tr32_mailbox(tnapi->int_mbox);
9343 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9345 if ((int_mbox != 0) ||
9346 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9347 intr_ok = 1;
9348 break;
9351 if (tg3_flag(tp, 57765_PLUS) &&
9352 tnapi->hw_status->status_tag != tnapi->last_tag)
9353 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9355 msleep(10);
9358 tg3_disable_ints(tp);
9360 free_irq(tnapi->irq_vec, tnapi);
9362 err = tg3_request_irq(tp, 0);
9364 if (err)
9365 return err;
9367 if (intr_ok) {
9368 /* Reenable MSI one shot mode. */
9369 if (tg3_flag(tp, 57765_PLUS)) {
9370 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9371 tw32(MSGINT_MODE, val);
9373 return 0;
9376 return -EIO;
9379 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9380 * successfully restored
9382 static int tg3_test_msi(struct tg3 *tp)
9384 int err;
9385 u16 pci_cmd;
9387 if (!tg3_flag(tp, USING_MSI))
9388 return 0;
9390 /* Turn off SERR reporting in case MSI terminates with Master
9391 * Abort.
9393 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9394 pci_write_config_word(tp->pdev, PCI_COMMAND,
9395 pci_cmd & ~PCI_COMMAND_SERR);
9397 err = tg3_test_interrupt(tp);
9399 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9401 if (!err)
9402 return 0;
9404 /* other failures */
9405 if (err != -EIO)
9406 return err;
9408 /* MSI test failed, go back to INTx mode */
9409 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9410 "to INTx mode. Please report this failure to the PCI "
9411 "maintainer and include system chipset information\n");
9413 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9415 pci_disable_msi(tp->pdev);
9417 tg3_flag_clear(tp, USING_MSI);
9418 tp->napi[0].irq_vec = tp->pdev->irq;
9420 err = tg3_request_irq(tp, 0);
9421 if (err)
9422 return err;
9424 /* Need to reset the chip because the MSI cycle may have terminated
9425 * with Master Abort.
9427 tg3_full_lock(tp, 1);
9429 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9430 err = tg3_init_hw(tp, 1);
9432 tg3_full_unlock(tp);
9434 if (err)
9435 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9437 return err;
9440 static int tg3_request_firmware(struct tg3 *tp)
9442 const __be32 *fw_data;
9444 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9445 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9446 tp->fw_needed);
9447 return -ENOENT;
9450 fw_data = (void *)tp->fw->data;
9452 /* Firmware blob starts with version numbers, followed by
9453 * start address and _full_ length including BSS sections
9454 * (which must be longer than the actual data, of course
9457 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9458 if (tp->fw_len < (tp->fw->size - 12)) {
9459 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9460 tp->fw_len, tp->fw_needed);
9461 release_firmware(tp->fw);
9462 tp->fw = NULL;
9463 return -EINVAL;
9466 /* We no longer need firmware; we have it. */
9467 tp->fw_needed = NULL;
9468 return 0;
9471 static bool tg3_enable_msix(struct tg3 *tp)
9473 int i, rc, cpus = num_online_cpus();
9474 struct msix_entry msix_ent[tp->irq_max];
9476 if (cpus == 1)
9477 /* Just fallback to the simpler MSI mode. */
9478 return false;
9481 * We want as many rx rings enabled as there are cpus.
9482 * The first MSIX vector only deals with link interrupts, etc,
9483 * so we add one to the number of vectors we are requesting.
9485 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9487 for (i = 0; i < tp->irq_max; i++) {
9488 msix_ent[i].entry = i;
9489 msix_ent[i].vector = 0;
9492 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9493 if (rc < 0) {
9494 return false;
9495 } else if (rc != 0) {
9496 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9497 return false;
9498 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9499 tp->irq_cnt, rc);
9500 tp->irq_cnt = rc;
9503 for (i = 0; i < tp->irq_max; i++)
9504 tp->napi[i].irq_vec = msix_ent[i].vector;
9506 netif_set_real_num_tx_queues(tp->dev, 1);
9507 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9508 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9509 pci_disable_msix(tp->pdev);
9510 return false;
9513 if (tp->irq_cnt > 1) {
9514 tg3_flag_set(tp, ENABLE_RSS);
9516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9518 tg3_flag_set(tp, ENABLE_TSS);
9519 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9523 return true;
9526 static void tg3_ints_init(struct tg3 *tp)
9528 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9529 !tg3_flag(tp, TAGGED_STATUS)) {
9530 /* All MSI supporting chips should support tagged
9531 * status. Assert that this is the case.
9533 netdev_warn(tp->dev,
9534 "MSI without TAGGED_STATUS? Not using MSI\n");
9535 goto defcfg;
9538 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9539 tg3_flag_set(tp, USING_MSIX);
9540 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9541 tg3_flag_set(tp, USING_MSI);
9543 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9544 u32 msi_mode = tr32(MSGINT_MODE);
9545 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9546 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9547 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9549 defcfg:
9550 if (!tg3_flag(tp, USING_MSIX)) {
9551 tp->irq_cnt = 1;
9552 tp->napi[0].irq_vec = tp->pdev->irq;
9553 netif_set_real_num_tx_queues(tp->dev, 1);
9554 netif_set_real_num_rx_queues(tp->dev, 1);
9558 static void tg3_ints_fini(struct tg3 *tp)
9560 if (tg3_flag(tp, USING_MSIX))
9561 pci_disable_msix(tp->pdev);
9562 else if (tg3_flag(tp, USING_MSI))
9563 pci_disable_msi(tp->pdev);
9564 tg3_flag_clear(tp, USING_MSI);
9565 tg3_flag_clear(tp, USING_MSIX);
9566 tg3_flag_clear(tp, ENABLE_RSS);
9567 tg3_flag_clear(tp, ENABLE_TSS);
9570 static int tg3_open(struct net_device *dev)
9572 struct tg3 *tp = netdev_priv(dev);
9573 int i, err;
9575 if (tp->fw_needed) {
9576 err = tg3_request_firmware(tp);
9577 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9578 if (err)
9579 return err;
9580 } else if (err) {
9581 netdev_warn(tp->dev, "TSO capability disabled\n");
9582 tg3_flag_clear(tp, TSO_CAPABLE);
9583 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9584 netdev_notice(tp->dev, "TSO capability restored\n");
9585 tg3_flag_set(tp, TSO_CAPABLE);
9589 netif_carrier_off(tp->dev);
9591 err = tg3_power_up(tp);
9592 if (err)
9593 return err;
9595 tg3_full_lock(tp, 0);
9597 tg3_disable_ints(tp);
9598 tg3_flag_clear(tp, INIT_COMPLETE);
9600 tg3_full_unlock(tp);
9603 * Setup interrupts first so we know how
9604 * many NAPI resources to allocate
9606 tg3_ints_init(tp);
9608 /* The placement of this call is tied
9609 * to the setup and use of Host TX descriptors.
9611 err = tg3_alloc_consistent(tp);
9612 if (err)
9613 goto err_out1;
9615 tg3_napi_init(tp);
9617 tg3_napi_enable(tp);
9619 for (i = 0; i < tp->irq_cnt; i++) {
9620 struct tg3_napi *tnapi = &tp->napi[i];
9621 err = tg3_request_irq(tp, i);
9622 if (err) {
9623 for (i--; i >= 0; i--)
9624 free_irq(tnapi->irq_vec, tnapi);
9625 break;
9629 if (err)
9630 goto err_out2;
9632 tg3_full_lock(tp, 0);
9634 err = tg3_init_hw(tp, 1);
9635 if (err) {
9636 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9637 tg3_free_rings(tp);
9638 } else {
9639 if (tg3_flag(tp, TAGGED_STATUS) &&
9640 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9641 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9642 tp->timer_offset = HZ;
9643 else
9644 tp->timer_offset = HZ / 10;
9646 BUG_ON(tp->timer_offset > HZ);
9647 tp->timer_counter = tp->timer_multiplier =
9648 (HZ / tp->timer_offset);
9649 tp->asf_counter = tp->asf_multiplier =
9650 ((HZ / tp->timer_offset) * 2);
9652 init_timer(&tp->timer);
9653 tp->timer.expires = jiffies + tp->timer_offset;
9654 tp->timer.data = (unsigned long) tp;
9655 tp->timer.function = tg3_timer;
9658 tg3_full_unlock(tp);
9660 if (err)
9661 goto err_out3;
9663 if (tg3_flag(tp, USING_MSI)) {
9664 err = tg3_test_msi(tp);
9666 if (err) {
9667 tg3_full_lock(tp, 0);
9668 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9669 tg3_free_rings(tp);
9670 tg3_full_unlock(tp);
9672 goto err_out2;
9675 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9676 u32 val = tr32(PCIE_TRANSACTION_CFG);
9678 tw32(PCIE_TRANSACTION_CFG,
9679 val | PCIE_TRANS_CFG_1SHOT_MSI);
9683 tg3_phy_start(tp);
9685 tg3_full_lock(tp, 0);
9687 add_timer(&tp->timer);
9688 tg3_flag_set(tp, INIT_COMPLETE);
9689 tg3_enable_ints(tp);
9691 tg3_full_unlock(tp);
9693 netif_tx_start_all_queues(dev);
9696 * Reset loopback feature if it was turned on while the device was down
9697 * make sure that it's installed properly now.
9699 if (dev->features & NETIF_F_LOOPBACK)
9700 tg3_set_loopback(dev, dev->features);
9702 return 0;
9704 err_out3:
9705 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9706 struct tg3_napi *tnapi = &tp->napi[i];
9707 free_irq(tnapi->irq_vec, tnapi);
9710 err_out2:
9711 tg3_napi_disable(tp);
9712 tg3_napi_fini(tp);
9713 tg3_free_consistent(tp);
9715 err_out1:
9716 tg3_ints_fini(tp);
9717 tg3_frob_aux_power(tp, false);
9718 pci_set_power_state(tp->pdev, PCI_D3hot);
9719 return err;
9722 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9723 struct rtnl_link_stats64 *);
9724 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9726 static int tg3_close(struct net_device *dev)
9728 int i;
9729 struct tg3 *tp = netdev_priv(dev);
9731 tg3_napi_disable(tp);
9732 cancel_work_sync(&tp->reset_task);
9734 netif_tx_stop_all_queues(dev);
9736 del_timer_sync(&tp->timer);
9738 tg3_phy_stop(tp);
9740 tg3_full_lock(tp, 1);
9742 tg3_disable_ints(tp);
9744 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9745 tg3_free_rings(tp);
9746 tg3_flag_clear(tp, INIT_COMPLETE);
9748 tg3_full_unlock(tp);
9750 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9751 struct tg3_napi *tnapi = &tp->napi[i];
9752 free_irq(tnapi->irq_vec, tnapi);
9755 tg3_ints_fini(tp);
9757 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9759 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9760 sizeof(tp->estats_prev));
9762 tg3_napi_fini(tp);
9764 tg3_free_consistent(tp);
9766 tg3_power_down(tp);
9768 netif_carrier_off(tp->dev);
9770 return 0;
9773 static inline u64 get_stat64(tg3_stat64_t *val)
9775 return ((u64)val->high << 32) | ((u64)val->low);
9778 static u64 calc_crc_errors(struct tg3 *tp)
9780 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9782 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9783 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9785 u32 val;
9787 spin_lock_bh(&tp->lock);
9788 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9789 tg3_writephy(tp, MII_TG3_TEST1,
9790 val | MII_TG3_TEST1_CRC_EN);
9791 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9792 } else
9793 val = 0;
9794 spin_unlock_bh(&tp->lock);
9796 tp->phy_crc_errors += val;
9798 return tp->phy_crc_errors;
9801 return get_stat64(&hw_stats->rx_fcs_errors);
9804 #define ESTAT_ADD(member) \
9805 estats->member = old_estats->member + \
9806 get_stat64(&hw_stats->member)
9808 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9810 struct tg3_ethtool_stats *estats = &tp->estats;
9811 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9812 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9814 if (!hw_stats)
9815 return old_estats;
9817 ESTAT_ADD(rx_octets);
9818 ESTAT_ADD(rx_fragments);
9819 ESTAT_ADD(rx_ucast_packets);
9820 ESTAT_ADD(rx_mcast_packets);
9821 ESTAT_ADD(rx_bcast_packets);
9822 ESTAT_ADD(rx_fcs_errors);
9823 ESTAT_ADD(rx_align_errors);
9824 ESTAT_ADD(rx_xon_pause_rcvd);
9825 ESTAT_ADD(rx_xoff_pause_rcvd);
9826 ESTAT_ADD(rx_mac_ctrl_rcvd);
9827 ESTAT_ADD(rx_xoff_entered);
9828 ESTAT_ADD(rx_frame_too_long_errors);
9829 ESTAT_ADD(rx_jabbers);
9830 ESTAT_ADD(rx_undersize_packets);
9831 ESTAT_ADD(rx_in_length_errors);
9832 ESTAT_ADD(rx_out_length_errors);
9833 ESTAT_ADD(rx_64_or_less_octet_packets);
9834 ESTAT_ADD(rx_65_to_127_octet_packets);
9835 ESTAT_ADD(rx_128_to_255_octet_packets);
9836 ESTAT_ADD(rx_256_to_511_octet_packets);
9837 ESTAT_ADD(rx_512_to_1023_octet_packets);
9838 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9839 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9840 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9841 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9842 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9844 ESTAT_ADD(tx_octets);
9845 ESTAT_ADD(tx_collisions);
9846 ESTAT_ADD(tx_xon_sent);
9847 ESTAT_ADD(tx_xoff_sent);
9848 ESTAT_ADD(tx_flow_control);
9849 ESTAT_ADD(tx_mac_errors);
9850 ESTAT_ADD(tx_single_collisions);
9851 ESTAT_ADD(tx_mult_collisions);
9852 ESTAT_ADD(tx_deferred);
9853 ESTAT_ADD(tx_excessive_collisions);
9854 ESTAT_ADD(tx_late_collisions);
9855 ESTAT_ADD(tx_collide_2times);
9856 ESTAT_ADD(tx_collide_3times);
9857 ESTAT_ADD(tx_collide_4times);
9858 ESTAT_ADD(tx_collide_5times);
9859 ESTAT_ADD(tx_collide_6times);
9860 ESTAT_ADD(tx_collide_7times);
9861 ESTAT_ADD(tx_collide_8times);
9862 ESTAT_ADD(tx_collide_9times);
9863 ESTAT_ADD(tx_collide_10times);
9864 ESTAT_ADD(tx_collide_11times);
9865 ESTAT_ADD(tx_collide_12times);
9866 ESTAT_ADD(tx_collide_13times);
9867 ESTAT_ADD(tx_collide_14times);
9868 ESTAT_ADD(tx_collide_15times);
9869 ESTAT_ADD(tx_ucast_packets);
9870 ESTAT_ADD(tx_mcast_packets);
9871 ESTAT_ADD(tx_bcast_packets);
9872 ESTAT_ADD(tx_carrier_sense_errors);
9873 ESTAT_ADD(tx_discards);
9874 ESTAT_ADD(tx_errors);
9876 ESTAT_ADD(dma_writeq_full);
9877 ESTAT_ADD(dma_write_prioq_full);
9878 ESTAT_ADD(rxbds_empty);
9879 ESTAT_ADD(rx_discards);
9880 ESTAT_ADD(rx_errors);
9881 ESTAT_ADD(rx_threshold_hit);
9883 ESTAT_ADD(dma_readq_full);
9884 ESTAT_ADD(dma_read_prioq_full);
9885 ESTAT_ADD(tx_comp_queue_full);
9887 ESTAT_ADD(ring_set_send_prod_index);
9888 ESTAT_ADD(ring_status_update);
9889 ESTAT_ADD(nic_irqs);
9890 ESTAT_ADD(nic_avoided_irqs);
9891 ESTAT_ADD(nic_tx_threshold_hit);
9893 ESTAT_ADD(mbuf_lwm_thresh_hit);
9895 return estats;
9898 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9899 struct rtnl_link_stats64 *stats)
9901 struct tg3 *tp = netdev_priv(dev);
9902 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9903 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9905 if (!hw_stats)
9906 return old_stats;
9908 stats->rx_packets = old_stats->rx_packets +
9909 get_stat64(&hw_stats->rx_ucast_packets) +
9910 get_stat64(&hw_stats->rx_mcast_packets) +
9911 get_stat64(&hw_stats->rx_bcast_packets);
9913 stats->tx_packets = old_stats->tx_packets +
9914 get_stat64(&hw_stats->tx_ucast_packets) +
9915 get_stat64(&hw_stats->tx_mcast_packets) +
9916 get_stat64(&hw_stats->tx_bcast_packets);
9918 stats->rx_bytes = old_stats->rx_bytes +
9919 get_stat64(&hw_stats->rx_octets);
9920 stats->tx_bytes = old_stats->tx_bytes +
9921 get_stat64(&hw_stats->tx_octets);
9923 stats->rx_errors = old_stats->rx_errors +
9924 get_stat64(&hw_stats->rx_errors);
9925 stats->tx_errors = old_stats->tx_errors +
9926 get_stat64(&hw_stats->tx_errors) +
9927 get_stat64(&hw_stats->tx_mac_errors) +
9928 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9929 get_stat64(&hw_stats->tx_discards);
9931 stats->multicast = old_stats->multicast +
9932 get_stat64(&hw_stats->rx_mcast_packets);
9933 stats->collisions = old_stats->collisions +
9934 get_stat64(&hw_stats->tx_collisions);
9936 stats->rx_length_errors = old_stats->rx_length_errors +
9937 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9938 get_stat64(&hw_stats->rx_undersize_packets);
9940 stats->rx_over_errors = old_stats->rx_over_errors +
9941 get_stat64(&hw_stats->rxbds_empty);
9942 stats->rx_frame_errors = old_stats->rx_frame_errors +
9943 get_stat64(&hw_stats->rx_align_errors);
9944 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9945 get_stat64(&hw_stats->tx_discards);
9946 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9947 get_stat64(&hw_stats->tx_carrier_sense_errors);
9949 stats->rx_crc_errors = old_stats->rx_crc_errors +
9950 calc_crc_errors(tp);
9952 stats->rx_missed_errors = old_stats->rx_missed_errors +
9953 get_stat64(&hw_stats->rx_discards);
9955 stats->rx_dropped = tp->rx_dropped;
9957 return stats;
9960 static inline u32 calc_crc(unsigned char *buf, int len)
9962 u32 reg;
9963 u32 tmp;
9964 int j, k;
9966 reg = 0xffffffff;
9968 for (j = 0; j < len; j++) {
9969 reg ^= buf[j];
9971 for (k = 0; k < 8; k++) {
9972 tmp = reg & 0x01;
9974 reg >>= 1;
9976 if (tmp)
9977 reg ^= 0xedb88320;
9981 return ~reg;
9984 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9986 /* accept or reject all multicast frames */
9987 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9988 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9989 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9990 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9993 static void __tg3_set_rx_mode(struct net_device *dev)
9995 struct tg3 *tp = netdev_priv(dev);
9996 u32 rx_mode;
9998 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9999 RX_MODE_KEEP_VLAN_TAG);
10001 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10002 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10003 * flag clear.
10005 if (!tg3_flag(tp, ENABLE_ASF))
10006 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10007 #endif
10009 if (dev->flags & IFF_PROMISC) {
10010 /* Promiscuous mode. */
10011 rx_mode |= RX_MODE_PROMISC;
10012 } else if (dev->flags & IFF_ALLMULTI) {
10013 /* Accept all multicast. */
10014 tg3_set_multi(tp, 1);
10015 } else if (netdev_mc_empty(dev)) {
10016 /* Reject all multicast. */
10017 tg3_set_multi(tp, 0);
10018 } else {
10019 /* Accept one or more multicast(s). */
10020 struct netdev_hw_addr *ha;
10021 u32 mc_filter[4] = { 0, };
10022 u32 regidx;
10023 u32 bit;
10024 u32 crc;
10026 netdev_for_each_mc_addr(ha, dev) {
10027 crc = calc_crc(ha->addr, ETH_ALEN);
10028 bit = ~crc & 0x7f;
10029 regidx = (bit & 0x60) >> 5;
10030 bit &= 0x1f;
10031 mc_filter[regidx] |= (1 << bit);
10034 tw32(MAC_HASH_REG_0, mc_filter[0]);
10035 tw32(MAC_HASH_REG_1, mc_filter[1]);
10036 tw32(MAC_HASH_REG_2, mc_filter[2]);
10037 tw32(MAC_HASH_REG_3, mc_filter[3]);
10040 if (rx_mode != tp->rx_mode) {
10041 tp->rx_mode = rx_mode;
10042 tw32_f(MAC_RX_MODE, rx_mode);
10043 udelay(10);
10047 static void tg3_set_rx_mode(struct net_device *dev)
10049 struct tg3 *tp = netdev_priv(dev);
10051 if (!netif_running(dev))
10052 return;
10054 tg3_full_lock(tp, 0);
10055 __tg3_set_rx_mode(dev);
10056 tg3_full_unlock(tp);
10059 static int tg3_get_regs_len(struct net_device *dev)
10061 return TG3_REG_BLK_SIZE;
10064 static void tg3_get_regs(struct net_device *dev,
10065 struct ethtool_regs *regs, void *_p)
10067 struct tg3 *tp = netdev_priv(dev);
10069 regs->version = 0;
10071 memset(_p, 0, TG3_REG_BLK_SIZE);
10073 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10074 return;
10076 tg3_full_lock(tp, 0);
10078 tg3_dump_legacy_regs(tp, (u32 *)_p);
10080 tg3_full_unlock(tp);
10083 static int tg3_get_eeprom_len(struct net_device *dev)
10085 struct tg3 *tp = netdev_priv(dev);
10087 return tp->nvram_size;
10090 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10092 struct tg3 *tp = netdev_priv(dev);
10093 int ret;
10094 u8 *pd;
10095 u32 i, offset, len, b_offset, b_count;
10096 __be32 val;
10098 if (tg3_flag(tp, NO_NVRAM))
10099 return -EINVAL;
10101 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10102 return -EAGAIN;
10104 offset = eeprom->offset;
10105 len = eeprom->len;
10106 eeprom->len = 0;
10108 eeprom->magic = TG3_EEPROM_MAGIC;
10110 if (offset & 3) {
10111 /* adjustments to start on required 4 byte boundary */
10112 b_offset = offset & 3;
10113 b_count = 4 - b_offset;
10114 if (b_count > len) {
10115 /* i.e. offset=1 len=2 */
10116 b_count = len;
10118 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10119 if (ret)
10120 return ret;
10121 memcpy(data, ((char *)&val) + b_offset, b_count);
10122 len -= b_count;
10123 offset += b_count;
10124 eeprom->len += b_count;
10127 /* read bytes up to the last 4 byte boundary */
10128 pd = &data[eeprom->len];
10129 for (i = 0; i < (len - (len & 3)); i += 4) {
10130 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10131 if (ret) {
10132 eeprom->len += i;
10133 return ret;
10135 memcpy(pd + i, &val, 4);
10137 eeprom->len += i;
10139 if (len & 3) {
10140 /* read last bytes not ending on 4 byte boundary */
10141 pd = &data[eeprom->len];
10142 b_count = len & 3;
10143 b_offset = offset + len - b_count;
10144 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10145 if (ret)
10146 return ret;
10147 memcpy(pd, &val, b_count);
10148 eeprom->len += b_count;
10150 return 0;
10153 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10155 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10157 struct tg3 *tp = netdev_priv(dev);
10158 int ret;
10159 u32 offset, len, b_offset, odd_len;
10160 u8 *buf;
10161 __be32 start, end;
10163 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10164 return -EAGAIN;
10166 if (tg3_flag(tp, NO_NVRAM) ||
10167 eeprom->magic != TG3_EEPROM_MAGIC)
10168 return -EINVAL;
10170 offset = eeprom->offset;
10171 len = eeprom->len;
10173 if ((b_offset = (offset & 3))) {
10174 /* adjustments to start on required 4 byte boundary */
10175 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10176 if (ret)
10177 return ret;
10178 len += b_offset;
10179 offset &= ~3;
10180 if (len < 4)
10181 len = 4;
10184 odd_len = 0;
10185 if (len & 3) {
10186 /* adjustments to end on required 4 byte boundary */
10187 odd_len = 1;
10188 len = (len + 3) & ~3;
10189 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10190 if (ret)
10191 return ret;
10194 buf = data;
10195 if (b_offset || odd_len) {
10196 buf = kmalloc(len, GFP_KERNEL);
10197 if (!buf)
10198 return -ENOMEM;
10199 if (b_offset)
10200 memcpy(buf, &start, 4);
10201 if (odd_len)
10202 memcpy(buf+len-4, &end, 4);
10203 memcpy(buf + b_offset, data, eeprom->len);
10206 ret = tg3_nvram_write_block(tp, offset, len, buf);
10208 if (buf != data)
10209 kfree(buf);
10211 return ret;
10214 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10216 struct tg3 *tp = netdev_priv(dev);
10218 if (tg3_flag(tp, USE_PHYLIB)) {
10219 struct phy_device *phydev;
10220 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10221 return -EAGAIN;
10222 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10223 return phy_ethtool_gset(phydev, cmd);
10226 cmd->supported = (SUPPORTED_Autoneg);
10228 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10229 cmd->supported |= (SUPPORTED_1000baseT_Half |
10230 SUPPORTED_1000baseT_Full);
10232 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10233 cmd->supported |= (SUPPORTED_100baseT_Half |
10234 SUPPORTED_100baseT_Full |
10235 SUPPORTED_10baseT_Half |
10236 SUPPORTED_10baseT_Full |
10237 SUPPORTED_TP);
10238 cmd->port = PORT_TP;
10239 } else {
10240 cmd->supported |= SUPPORTED_FIBRE;
10241 cmd->port = PORT_FIBRE;
10244 cmd->advertising = tp->link_config.advertising;
10245 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10246 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10247 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10248 cmd->advertising |= ADVERTISED_Pause;
10249 } else {
10250 cmd->advertising |= ADVERTISED_Pause |
10251 ADVERTISED_Asym_Pause;
10253 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10254 cmd->advertising |= ADVERTISED_Asym_Pause;
10257 if (netif_running(dev)) {
10258 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10259 cmd->duplex = tp->link_config.active_duplex;
10260 } else {
10261 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10262 cmd->duplex = DUPLEX_INVALID;
10264 cmd->phy_address = tp->phy_addr;
10265 cmd->transceiver = XCVR_INTERNAL;
10266 cmd->autoneg = tp->link_config.autoneg;
10267 cmd->maxtxpkt = 0;
10268 cmd->maxrxpkt = 0;
10269 return 0;
10272 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10274 struct tg3 *tp = netdev_priv(dev);
10275 u32 speed = ethtool_cmd_speed(cmd);
10277 if (tg3_flag(tp, USE_PHYLIB)) {
10278 struct phy_device *phydev;
10279 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10280 return -EAGAIN;
10281 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10282 return phy_ethtool_sset(phydev, cmd);
10285 if (cmd->autoneg != AUTONEG_ENABLE &&
10286 cmd->autoneg != AUTONEG_DISABLE)
10287 return -EINVAL;
10289 if (cmd->autoneg == AUTONEG_DISABLE &&
10290 cmd->duplex != DUPLEX_FULL &&
10291 cmd->duplex != DUPLEX_HALF)
10292 return -EINVAL;
10294 if (cmd->autoneg == AUTONEG_ENABLE) {
10295 u32 mask = ADVERTISED_Autoneg |
10296 ADVERTISED_Pause |
10297 ADVERTISED_Asym_Pause;
10299 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10300 mask |= ADVERTISED_1000baseT_Half |
10301 ADVERTISED_1000baseT_Full;
10303 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10304 mask |= ADVERTISED_100baseT_Half |
10305 ADVERTISED_100baseT_Full |
10306 ADVERTISED_10baseT_Half |
10307 ADVERTISED_10baseT_Full |
10308 ADVERTISED_TP;
10309 else
10310 mask |= ADVERTISED_FIBRE;
10312 if (cmd->advertising & ~mask)
10313 return -EINVAL;
10315 mask &= (ADVERTISED_1000baseT_Half |
10316 ADVERTISED_1000baseT_Full |
10317 ADVERTISED_100baseT_Half |
10318 ADVERTISED_100baseT_Full |
10319 ADVERTISED_10baseT_Half |
10320 ADVERTISED_10baseT_Full);
10322 cmd->advertising &= mask;
10323 } else {
10324 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10325 if (speed != SPEED_1000)
10326 return -EINVAL;
10328 if (cmd->duplex != DUPLEX_FULL)
10329 return -EINVAL;
10330 } else {
10331 if (speed != SPEED_100 &&
10332 speed != SPEED_10)
10333 return -EINVAL;
10337 tg3_full_lock(tp, 0);
10339 tp->link_config.autoneg = cmd->autoneg;
10340 if (cmd->autoneg == AUTONEG_ENABLE) {
10341 tp->link_config.advertising = (cmd->advertising |
10342 ADVERTISED_Autoneg);
10343 tp->link_config.speed = SPEED_INVALID;
10344 tp->link_config.duplex = DUPLEX_INVALID;
10345 } else {
10346 tp->link_config.advertising = 0;
10347 tp->link_config.speed = speed;
10348 tp->link_config.duplex = cmd->duplex;
10351 tp->link_config.orig_speed = tp->link_config.speed;
10352 tp->link_config.orig_duplex = tp->link_config.duplex;
10353 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10355 if (netif_running(dev))
10356 tg3_setup_phy(tp, 1);
10358 tg3_full_unlock(tp);
10360 return 0;
10363 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10365 struct tg3 *tp = netdev_priv(dev);
10367 strcpy(info->driver, DRV_MODULE_NAME);
10368 strcpy(info->version, DRV_MODULE_VERSION);
10369 strcpy(info->fw_version, tp->fw_ver);
10370 strcpy(info->bus_info, pci_name(tp->pdev));
10373 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10375 struct tg3 *tp = netdev_priv(dev);
10377 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10378 wol->supported = WAKE_MAGIC;
10379 else
10380 wol->supported = 0;
10381 wol->wolopts = 0;
10382 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10383 wol->wolopts = WAKE_MAGIC;
10384 memset(&wol->sopass, 0, sizeof(wol->sopass));
10387 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10389 struct tg3 *tp = netdev_priv(dev);
10390 struct device *dp = &tp->pdev->dev;
10392 if (wol->wolopts & ~WAKE_MAGIC)
10393 return -EINVAL;
10394 if ((wol->wolopts & WAKE_MAGIC) &&
10395 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10396 return -EINVAL;
10398 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10400 spin_lock_bh(&tp->lock);
10401 if (device_may_wakeup(dp))
10402 tg3_flag_set(tp, WOL_ENABLE);
10403 else
10404 tg3_flag_clear(tp, WOL_ENABLE);
10405 spin_unlock_bh(&tp->lock);
10407 return 0;
10410 static u32 tg3_get_msglevel(struct net_device *dev)
10412 struct tg3 *tp = netdev_priv(dev);
10413 return tp->msg_enable;
10416 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10418 struct tg3 *tp = netdev_priv(dev);
10419 tp->msg_enable = value;
10422 static int tg3_nway_reset(struct net_device *dev)
10424 struct tg3 *tp = netdev_priv(dev);
10425 int r;
10427 if (!netif_running(dev))
10428 return -EAGAIN;
10430 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10431 return -EINVAL;
10433 if (tg3_flag(tp, USE_PHYLIB)) {
10434 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10435 return -EAGAIN;
10436 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10437 } else {
10438 u32 bmcr;
10440 spin_lock_bh(&tp->lock);
10441 r = -EINVAL;
10442 tg3_readphy(tp, MII_BMCR, &bmcr);
10443 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10444 ((bmcr & BMCR_ANENABLE) ||
10445 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10446 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10447 BMCR_ANENABLE);
10448 r = 0;
10450 spin_unlock_bh(&tp->lock);
10453 return r;
10456 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10458 struct tg3 *tp = netdev_priv(dev);
10460 ering->rx_max_pending = tp->rx_std_ring_mask;
10461 ering->rx_mini_max_pending = 0;
10462 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10463 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10464 else
10465 ering->rx_jumbo_max_pending = 0;
10467 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10469 ering->rx_pending = tp->rx_pending;
10470 ering->rx_mini_pending = 0;
10471 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10472 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10473 else
10474 ering->rx_jumbo_pending = 0;
10476 ering->tx_pending = tp->napi[0].tx_pending;
10479 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10481 struct tg3 *tp = netdev_priv(dev);
10482 int i, irq_sync = 0, err = 0;
10484 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10485 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10486 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10487 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10488 (tg3_flag(tp, TSO_BUG) &&
10489 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10490 return -EINVAL;
10492 if (netif_running(dev)) {
10493 tg3_phy_stop(tp);
10494 tg3_netif_stop(tp);
10495 irq_sync = 1;
10498 tg3_full_lock(tp, irq_sync);
10500 tp->rx_pending = ering->rx_pending;
10502 if (tg3_flag(tp, MAX_RXPEND_64) &&
10503 tp->rx_pending > 63)
10504 tp->rx_pending = 63;
10505 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10507 for (i = 0; i < tp->irq_max; i++)
10508 tp->napi[i].tx_pending = ering->tx_pending;
10510 if (netif_running(dev)) {
10511 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10512 err = tg3_restart_hw(tp, 1);
10513 if (!err)
10514 tg3_netif_start(tp);
10517 tg3_full_unlock(tp);
10519 if (irq_sync && !err)
10520 tg3_phy_start(tp);
10522 return err;
10525 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10527 struct tg3 *tp = netdev_priv(dev);
10529 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10531 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10532 epause->rx_pause = 1;
10533 else
10534 epause->rx_pause = 0;
10536 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10537 epause->tx_pause = 1;
10538 else
10539 epause->tx_pause = 0;
10542 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10544 struct tg3 *tp = netdev_priv(dev);
10545 int err = 0;
10547 if (tg3_flag(tp, USE_PHYLIB)) {
10548 u32 newadv;
10549 struct phy_device *phydev;
10551 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10553 if (!(phydev->supported & SUPPORTED_Pause) ||
10554 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10555 (epause->rx_pause != epause->tx_pause)))
10556 return -EINVAL;
10558 tp->link_config.flowctrl = 0;
10559 if (epause->rx_pause) {
10560 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10562 if (epause->tx_pause) {
10563 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10564 newadv = ADVERTISED_Pause;
10565 } else
10566 newadv = ADVERTISED_Pause |
10567 ADVERTISED_Asym_Pause;
10568 } else if (epause->tx_pause) {
10569 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10570 newadv = ADVERTISED_Asym_Pause;
10571 } else
10572 newadv = 0;
10574 if (epause->autoneg)
10575 tg3_flag_set(tp, PAUSE_AUTONEG);
10576 else
10577 tg3_flag_clear(tp, PAUSE_AUTONEG);
10579 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10580 u32 oldadv = phydev->advertising &
10581 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10582 if (oldadv != newadv) {
10583 phydev->advertising &=
10584 ~(ADVERTISED_Pause |
10585 ADVERTISED_Asym_Pause);
10586 phydev->advertising |= newadv;
10587 if (phydev->autoneg) {
10589 * Always renegotiate the link to
10590 * inform our link partner of our
10591 * flow control settings, even if the
10592 * flow control is forced. Let
10593 * tg3_adjust_link() do the final
10594 * flow control setup.
10596 return phy_start_aneg(phydev);
10600 if (!epause->autoneg)
10601 tg3_setup_flow_control(tp, 0, 0);
10602 } else {
10603 tp->link_config.orig_advertising &=
10604 ~(ADVERTISED_Pause |
10605 ADVERTISED_Asym_Pause);
10606 tp->link_config.orig_advertising |= newadv;
10608 } else {
10609 int irq_sync = 0;
10611 if (netif_running(dev)) {
10612 tg3_netif_stop(tp);
10613 irq_sync = 1;
10616 tg3_full_lock(tp, irq_sync);
10618 if (epause->autoneg)
10619 tg3_flag_set(tp, PAUSE_AUTONEG);
10620 else
10621 tg3_flag_clear(tp, PAUSE_AUTONEG);
10622 if (epause->rx_pause)
10623 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10624 else
10625 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10626 if (epause->tx_pause)
10627 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10628 else
10629 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10631 if (netif_running(dev)) {
10632 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10633 err = tg3_restart_hw(tp, 1);
10634 if (!err)
10635 tg3_netif_start(tp);
10638 tg3_full_unlock(tp);
10641 return err;
10644 static int tg3_get_sset_count(struct net_device *dev, int sset)
10646 switch (sset) {
10647 case ETH_SS_TEST:
10648 return TG3_NUM_TEST;
10649 case ETH_SS_STATS:
10650 return TG3_NUM_STATS;
10651 default:
10652 return -EOPNOTSUPP;
10656 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10658 switch (stringset) {
10659 case ETH_SS_STATS:
10660 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10661 break;
10662 case ETH_SS_TEST:
10663 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10664 break;
10665 default:
10666 WARN_ON(1); /* we need a WARN() */
10667 break;
10671 static int tg3_set_phys_id(struct net_device *dev,
10672 enum ethtool_phys_id_state state)
10674 struct tg3 *tp = netdev_priv(dev);
10676 if (!netif_running(tp->dev))
10677 return -EAGAIN;
10679 switch (state) {
10680 case ETHTOOL_ID_ACTIVE:
10681 return 1; /* cycle on/off once per second */
10683 case ETHTOOL_ID_ON:
10684 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10685 LED_CTRL_1000MBPS_ON |
10686 LED_CTRL_100MBPS_ON |
10687 LED_CTRL_10MBPS_ON |
10688 LED_CTRL_TRAFFIC_OVERRIDE |
10689 LED_CTRL_TRAFFIC_BLINK |
10690 LED_CTRL_TRAFFIC_LED);
10691 break;
10693 case ETHTOOL_ID_OFF:
10694 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10695 LED_CTRL_TRAFFIC_OVERRIDE);
10696 break;
10698 case ETHTOOL_ID_INACTIVE:
10699 tw32(MAC_LED_CTRL, tp->led_ctrl);
10700 break;
10703 return 0;
10706 static void tg3_get_ethtool_stats(struct net_device *dev,
10707 struct ethtool_stats *estats, u64 *tmp_stats)
10709 struct tg3 *tp = netdev_priv(dev);
10710 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10713 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10715 int i;
10716 __be32 *buf;
10717 u32 offset = 0, len = 0;
10718 u32 magic, val;
10720 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10721 return NULL;
10723 if (magic == TG3_EEPROM_MAGIC) {
10724 for (offset = TG3_NVM_DIR_START;
10725 offset < TG3_NVM_DIR_END;
10726 offset += TG3_NVM_DIRENT_SIZE) {
10727 if (tg3_nvram_read(tp, offset, &val))
10728 return NULL;
10730 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10731 TG3_NVM_DIRTYPE_EXTVPD)
10732 break;
10735 if (offset != TG3_NVM_DIR_END) {
10736 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10737 if (tg3_nvram_read(tp, offset + 4, &offset))
10738 return NULL;
10740 offset = tg3_nvram_logical_addr(tp, offset);
10744 if (!offset || !len) {
10745 offset = TG3_NVM_VPD_OFF;
10746 len = TG3_NVM_VPD_LEN;
10749 buf = kmalloc(len, GFP_KERNEL);
10750 if (buf == NULL)
10751 return NULL;
10753 if (magic == TG3_EEPROM_MAGIC) {
10754 for (i = 0; i < len; i += 4) {
10755 /* The data is in little-endian format in NVRAM.
10756 * Use the big-endian read routines to preserve
10757 * the byte order as it exists in NVRAM.
10759 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10760 goto error;
10762 } else {
10763 u8 *ptr;
10764 ssize_t cnt;
10765 unsigned int pos = 0;
10767 ptr = (u8 *)&buf[0];
10768 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10769 cnt = pci_read_vpd(tp->pdev, pos,
10770 len - pos, ptr);
10771 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10772 cnt = 0;
10773 else if (cnt < 0)
10774 goto error;
10776 if (pos != len)
10777 goto error;
10780 *vpdlen = len;
10782 return buf;
10784 error:
10785 kfree(buf);
10786 return NULL;
10789 #define NVRAM_TEST_SIZE 0x100
10790 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10791 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10792 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10793 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10794 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10795 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10796 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10797 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10799 static int tg3_test_nvram(struct tg3 *tp)
10801 u32 csum, magic, len;
10802 __be32 *buf;
10803 int i, j, k, err = 0, size;
10805 if (tg3_flag(tp, NO_NVRAM))
10806 return 0;
10808 if (tg3_nvram_read(tp, 0, &magic) != 0)
10809 return -EIO;
10811 if (magic == TG3_EEPROM_MAGIC)
10812 size = NVRAM_TEST_SIZE;
10813 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10814 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10815 TG3_EEPROM_SB_FORMAT_1) {
10816 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10817 case TG3_EEPROM_SB_REVISION_0:
10818 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10819 break;
10820 case TG3_EEPROM_SB_REVISION_2:
10821 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10822 break;
10823 case TG3_EEPROM_SB_REVISION_3:
10824 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10825 break;
10826 case TG3_EEPROM_SB_REVISION_4:
10827 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10828 break;
10829 case TG3_EEPROM_SB_REVISION_5:
10830 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10831 break;
10832 case TG3_EEPROM_SB_REVISION_6:
10833 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10834 break;
10835 default:
10836 return -EIO;
10838 } else
10839 return 0;
10840 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10841 size = NVRAM_SELFBOOT_HW_SIZE;
10842 else
10843 return -EIO;
10845 buf = kmalloc(size, GFP_KERNEL);
10846 if (buf == NULL)
10847 return -ENOMEM;
10849 err = -EIO;
10850 for (i = 0, j = 0; i < size; i += 4, j++) {
10851 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10852 if (err)
10853 break;
10855 if (i < size)
10856 goto out;
10858 /* Selfboot format */
10859 magic = be32_to_cpu(buf[0]);
10860 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10861 TG3_EEPROM_MAGIC_FW) {
10862 u8 *buf8 = (u8 *) buf, csum8 = 0;
10864 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10865 TG3_EEPROM_SB_REVISION_2) {
10866 /* For rev 2, the csum doesn't include the MBA. */
10867 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10868 csum8 += buf8[i];
10869 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10870 csum8 += buf8[i];
10871 } else {
10872 for (i = 0; i < size; i++)
10873 csum8 += buf8[i];
10876 if (csum8 == 0) {
10877 err = 0;
10878 goto out;
10881 err = -EIO;
10882 goto out;
10885 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10886 TG3_EEPROM_MAGIC_HW) {
10887 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10888 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10889 u8 *buf8 = (u8 *) buf;
10891 /* Separate the parity bits and the data bytes. */
10892 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10893 if ((i == 0) || (i == 8)) {
10894 int l;
10895 u8 msk;
10897 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10898 parity[k++] = buf8[i] & msk;
10899 i++;
10900 } else if (i == 16) {
10901 int l;
10902 u8 msk;
10904 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10905 parity[k++] = buf8[i] & msk;
10906 i++;
10908 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10909 parity[k++] = buf8[i] & msk;
10910 i++;
10912 data[j++] = buf8[i];
10915 err = -EIO;
10916 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10917 u8 hw8 = hweight8(data[i]);
10919 if ((hw8 & 0x1) && parity[i])
10920 goto out;
10921 else if (!(hw8 & 0x1) && !parity[i])
10922 goto out;
10924 err = 0;
10925 goto out;
10928 err = -EIO;
10930 /* Bootstrap checksum at offset 0x10 */
10931 csum = calc_crc((unsigned char *) buf, 0x10);
10932 if (csum != le32_to_cpu(buf[0x10/4]))
10933 goto out;
10935 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10936 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10937 if (csum != le32_to_cpu(buf[0xfc/4]))
10938 goto out;
10940 kfree(buf);
10942 buf = tg3_vpd_readblock(tp, &len);
10943 if (!buf)
10944 return -ENOMEM;
10946 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
10947 if (i > 0) {
10948 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10949 if (j < 0)
10950 goto out;
10952 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
10953 goto out;
10955 i += PCI_VPD_LRDT_TAG_SIZE;
10956 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10957 PCI_VPD_RO_KEYWORD_CHKSUM);
10958 if (j > 0) {
10959 u8 csum8 = 0;
10961 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10963 for (i = 0; i <= j; i++)
10964 csum8 += ((u8 *)buf)[i];
10966 if (csum8)
10967 goto out;
10971 err = 0;
10973 out:
10974 kfree(buf);
10975 return err;
10978 #define TG3_SERDES_TIMEOUT_SEC 2
10979 #define TG3_COPPER_TIMEOUT_SEC 6
10981 static int tg3_test_link(struct tg3 *tp)
10983 int i, max;
10985 if (!netif_running(tp->dev))
10986 return -ENODEV;
10988 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10989 max = TG3_SERDES_TIMEOUT_SEC;
10990 else
10991 max = TG3_COPPER_TIMEOUT_SEC;
10993 for (i = 0; i < max; i++) {
10994 if (netif_carrier_ok(tp->dev))
10995 return 0;
10997 if (msleep_interruptible(1000))
10998 break;
11001 return -EIO;
11004 /* Only test the commonly used registers */
11005 static int tg3_test_registers(struct tg3 *tp)
11007 int i, is_5705, is_5750;
11008 u32 offset, read_mask, write_mask, val, save_val, read_val;
11009 static struct {
11010 u16 offset;
11011 u16 flags;
11012 #define TG3_FL_5705 0x1
11013 #define TG3_FL_NOT_5705 0x2
11014 #define TG3_FL_NOT_5788 0x4
11015 #define TG3_FL_NOT_5750 0x8
11016 u32 read_mask;
11017 u32 write_mask;
11018 } reg_tbl[] = {
11019 /* MAC Control Registers */
11020 { MAC_MODE, TG3_FL_NOT_5705,
11021 0x00000000, 0x00ef6f8c },
11022 { MAC_MODE, TG3_FL_5705,
11023 0x00000000, 0x01ef6b8c },
11024 { MAC_STATUS, TG3_FL_NOT_5705,
11025 0x03800107, 0x00000000 },
11026 { MAC_STATUS, TG3_FL_5705,
11027 0x03800100, 0x00000000 },
11028 { MAC_ADDR_0_HIGH, 0x0000,
11029 0x00000000, 0x0000ffff },
11030 { MAC_ADDR_0_LOW, 0x0000,
11031 0x00000000, 0xffffffff },
11032 { MAC_RX_MTU_SIZE, 0x0000,
11033 0x00000000, 0x0000ffff },
11034 { MAC_TX_MODE, 0x0000,
11035 0x00000000, 0x00000070 },
11036 { MAC_TX_LENGTHS, 0x0000,
11037 0x00000000, 0x00003fff },
11038 { MAC_RX_MODE, TG3_FL_NOT_5705,
11039 0x00000000, 0x000007fc },
11040 { MAC_RX_MODE, TG3_FL_5705,
11041 0x00000000, 0x000007dc },
11042 { MAC_HASH_REG_0, 0x0000,
11043 0x00000000, 0xffffffff },
11044 { MAC_HASH_REG_1, 0x0000,
11045 0x00000000, 0xffffffff },
11046 { MAC_HASH_REG_2, 0x0000,
11047 0x00000000, 0xffffffff },
11048 { MAC_HASH_REG_3, 0x0000,
11049 0x00000000, 0xffffffff },
11051 /* Receive Data and Receive BD Initiator Control Registers. */
11052 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11053 0x00000000, 0xffffffff },
11054 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11055 0x00000000, 0xffffffff },
11056 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11057 0x00000000, 0x00000003 },
11058 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11059 0x00000000, 0xffffffff },
11060 { RCVDBDI_STD_BD+0, 0x0000,
11061 0x00000000, 0xffffffff },
11062 { RCVDBDI_STD_BD+4, 0x0000,
11063 0x00000000, 0xffffffff },
11064 { RCVDBDI_STD_BD+8, 0x0000,
11065 0x00000000, 0xffff0002 },
11066 { RCVDBDI_STD_BD+0xc, 0x0000,
11067 0x00000000, 0xffffffff },
11069 /* Receive BD Initiator Control Registers. */
11070 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11071 0x00000000, 0xffffffff },
11072 { RCVBDI_STD_THRESH, TG3_FL_5705,
11073 0x00000000, 0x000003ff },
11074 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11075 0x00000000, 0xffffffff },
11077 /* Host Coalescing Control Registers. */
11078 { HOSTCC_MODE, TG3_FL_NOT_5705,
11079 0x00000000, 0x00000004 },
11080 { HOSTCC_MODE, TG3_FL_5705,
11081 0x00000000, 0x000000f6 },
11082 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11083 0x00000000, 0xffffffff },
11084 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11085 0x00000000, 0x000003ff },
11086 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11087 0x00000000, 0xffffffff },
11088 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11089 0x00000000, 0x000003ff },
11090 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11091 0x00000000, 0xffffffff },
11092 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11093 0x00000000, 0x000000ff },
11094 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11095 0x00000000, 0xffffffff },
11096 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11097 0x00000000, 0x000000ff },
11098 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11099 0x00000000, 0xffffffff },
11100 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11101 0x00000000, 0xffffffff },
11102 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11103 0x00000000, 0xffffffff },
11104 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11105 0x00000000, 0x000000ff },
11106 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11107 0x00000000, 0xffffffff },
11108 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11109 0x00000000, 0x000000ff },
11110 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11111 0x00000000, 0xffffffff },
11112 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11113 0x00000000, 0xffffffff },
11114 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11115 0x00000000, 0xffffffff },
11116 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11117 0x00000000, 0xffffffff },
11118 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11119 0x00000000, 0xffffffff },
11120 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11121 0xffffffff, 0x00000000 },
11122 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11123 0xffffffff, 0x00000000 },
11125 /* Buffer Manager Control Registers. */
11126 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11127 0x00000000, 0x007fff80 },
11128 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11129 0x00000000, 0x007fffff },
11130 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11131 0x00000000, 0x0000003f },
11132 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11133 0x00000000, 0x000001ff },
11134 { BUFMGR_MB_HIGH_WATER, 0x0000,
11135 0x00000000, 0x000001ff },
11136 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11137 0xffffffff, 0x00000000 },
11138 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11139 0xffffffff, 0x00000000 },
11141 /* Mailbox Registers */
11142 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11143 0x00000000, 0x000001ff },
11144 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11145 0x00000000, 0x000001ff },
11146 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11147 0x00000000, 0x000007ff },
11148 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11149 0x00000000, 0x000001ff },
11151 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11154 is_5705 = is_5750 = 0;
11155 if (tg3_flag(tp, 5705_PLUS)) {
11156 is_5705 = 1;
11157 if (tg3_flag(tp, 5750_PLUS))
11158 is_5750 = 1;
11161 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11162 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11163 continue;
11165 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11166 continue;
11168 if (tg3_flag(tp, IS_5788) &&
11169 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11170 continue;
11172 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11173 continue;
11175 offset = (u32) reg_tbl[i].offset;
11176 read_mask = reg_tbl[i].read_mask;
11177 write_mask = reg_tbl[i].write_mask;
11179 /* Save the original register content */
11180 save_val = tr32(offset);
11182 /* Determine the read-only value. */
11183 read_val = save_val & read_mask;
11185 /* Write zero to the register, then make sure the read-only bits
11186 * are not changed and the read/write bits are all zeros.
11188 tw32(offset, 0);
11190 val = tr32(offset);
11192 /* Test the read-only and read/write bits. */
11193 if (((val & read_mask) != read_val) || (val & write_mask))
11194 goto out;
11196 /* Write ones to all the bits defined by RdMask and WrMask, then
11197 * make sure the read-only bits are not changed and the
11198 * read/write bits are all ones.
11200 tw32(offset, read_mask | write_mask);
11202 val = tr32(offset);
11204 /* Test the read-only bits. */
11205 if ((val & read_mask) != read_val)
11206 goto out;
11208 /* Test the read/write bits. */
11209 if ((val & write_mask) != write_mask)
11210 goto out;
11212 tw32(offset, save_val);
11215 return 0;
11217 out:
11218 if (netif_msg_hw(tp))
11219 netdev_err(tp->dev,
11220 "Register test failed at offset %x\n", offset);
11221 tw32(offset, save_val);
11222 return -EIO;
11225 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11227 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11228 int i;
11229 u32 j;
11231 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11232 for (j = 0; j < len; j += 4) {
11233 u32 val;
11235 tg3_write_mem(tp, offset + j, test_pattern[i]);
11236 tg3_read_mem(tp, offset + j, &val);
11237 if (val != test_pattern[i])
11238 return -EIO;
11241 return 0;
11244 static int tg3_test_memory(struct tg3 *tp)
11246 static struct mem_entry {
11247 u32 offset;
11248 u32 len;
11249 } mem_tbl_570x[] = {
11250 { 0x00000000, 0x00b50},
11251 { 0x00002000, 0x1c000},
11252 { 0xffffffff, 0x00000}
11253 }, mem_tbl_5705[] = {
11254 { 0x00000100, 0x0000c},
11255 { 0x00000200, 0x00008},
11256 { 0x00004000, 0x00800},
11257 { 0x00006000, 0x01000},
11258 { 0x00008000, 0x02000},
11259 { 0x00010000, 0x0e000},
11260 { 0xffffffff, 0x00000}
11261 }, mem_tbl_5755[] = {
11262 { 0x00000200, 0x00008},
11263 { 0x00004000, 0x00800},
11264 { 0x00006000, 0x00800},
11265 { 0x00008000, 0x02000},
11266 { 0x00010000, 0x0c000},
11267 { 0xffffffff, 0x00000}
11268 }, mem_tbl_5906[] = {
11269 { 0x00000200, 0x00008},
11270 { 0x00004000, 0x00400},
11271 { 0x00006000, 0x00400},
11272 { 0x00008000, 0x01000},
11273 { 0x00010000, 0x01000},
11274 { 0xffffffff, 0x00000}
11275 }, mem_tbl_5717[] = {
11276 { 0x00000200, 0x00008},
11277 { 0x00010000, 0x0a000},
11278 { 0x00020000, 0x13c00},
11279 { 0xffffffff, 0x00000}
11280 }, mem_tbl_57765[] = {
11281 { 0x00000200, 0x00008},
11282 { 0x00004000, 0x00800},
11283 { 0x00006000, 0x09800},
11284 { 0x00010000, 0x0a000},
11285 { 0xffffffff, 0x00000}
11287 struct mem_entry *mem_tbl;
11288 int err = 0;
11289 int i;
11291 if (tg3_flag(tp, 5717_PLUS))
11292 mem_tbl = mem_tbl_5717;
11293 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11294 mem_tbl = mem_tbl_57765;
11295 else if (tg3_flag(tp, 5755_PLUS))
11296 mem_tbl = mem_tbl_5755;
11297 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11298 mem_tbl = mem_tbl_5906;
11299 else if (tg3_flag(tp, 5705_PLUS))
11300 mem_tbl = mem_tbl_5705;
11301 else
11302 mem_tbl = mem_tbl_570x;
11304 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11305 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11306 if (err)
11307 break;
11310 return err;
11313 #define TG3_MAC_LOOPBACK 0
11314 #define TG3_PHY_LOOPBACK 1
11315 #define TG3_TSO_LOOPBACK 2
11317 #define TG3_TSO_MSS 500
11319 #define TG3_TSO_IP_HDR_LEN 20
11320 #define TG3_TSO_TCP_HDR_LEN 20
11321 #define TG3_TSO_TCP_OPT_LEN 12
11323 static const u8 tg3_tso_header[] = {
11324 0x08, 0x00,
11325 0x45, 0x00, 0x00, 0x00,
11326 0x00, 0x00, 0x40, 0x00,
11327 0x40, 0x06, 0x00, 0x00,
11328 0x0a, 0x00, 0x00, 0x01,
11329 0x0a, 0x00, 0x00, 0x02,
11330 0x0d, 0x00, 0xe0, 0x00,
11331 0x00, 0x00, 0x01, 0x00,
11332 0x00, 0x00, 0x02, 0x00,
11333 0x80, 0x10, 0x10, 0x00,
11334 0x14, 0x09, 0x00, 0x00,
11335 0x01, 0x01, 0x08, 0x0a,
11336 0x11, 0x11, 0x11, 0x11,
11337 0x11, 0x11, 0x11, 0x11,
11340 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11342 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11343 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11344 u32 budget;
11345 struct sk_buff *skb, *rx_skb;
11346 u8 *tx_data;
11347 dma_addr_t map;
11348 int num_pkts, tx_len, rx_len, i, err;
11349 struct tg3_rx_buffer_desc *desc;
11350 struct tg3_napi *tnapi, *rnapi;
11351 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11353 tnapi = &tp->napi[0];
11354 rnapi = &tp->napi[0];
11355 if (tp->irq_cnt > 1) {
11356 if (tg3_flag(tp, ENABLE_RSS))
11357 rnapi = &tp->napi[1];
11358 if (tg3_flag(tp, ENABLE_TSS))
11359 tnapi = &tp->napi[1];
11361 coal_now = tnapi->coal_now | rnapi->coal_now;
11363 err = -EIO;
11365 tx_len = pktsz;
11366 skb = netdev_alloc_skb(tp->dev, tx_len);
11367 if (!skb)
11368 return -ENOMEM;
11370 tx_data = skb_put(skb, tx_len);
11371 memcpy(tx_data, tp->dev->dev_addr, 6);
11372 memset(tx_data + 6, 0x0, 8);
11374 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11376 if (loopback_mode == TG3_TSO_LOOPBACK) {
11377 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11379 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11380 TG3_TSO_TCP_OPT_LEN;
11382 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11383 sizeof(tg3_tso_header));
11384 mss = TG3_TSO_MSS;
11386 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11387 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11389 /* Set the total length field in the IP header */
11390 iph->tot_len = htons((u16)(mss + hdr_len));
11392 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11393 TXD_FLAG_CPU_POST_DMA);
11395 if (tg3_flag(tp, HW_TSO_1) ||
11396 tg3_flag(tp, HW_TSO_2) ||
11397 tg3_flag(tp, HW_TSO_3)) {
11398 struct tcphdr *th;
11399 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11400 th = (struct tcphdr *)&tx_data[val];
11401 th->check = 0;
11402 } else
11403 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11405 if (tg3_flag(tp, HW_TSO_3)) {
11406 mss |= (hdr_len & 0xc) << 12;
11407 if (hdr_len & 0x10)
11408 base_flags |= 0x00000010;
11409 base_flags |= (hdr_len & 0x3e0) << 5;
11410 } else if (tg3_flag(tp, HW_TSO_2))
11411 mss |= hdr_len << 9;
11412 else if (tg3_flag(tp, HW_TSO_1) ||
11413 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11414 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11415 } else {
11416 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11419 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11420 } else {
11421 num_pkts = 1;
11422 data_off = ETH_HLEN;
11425 for (i = data_off; i < tx_len; i++)
11426 tx_data[i] = (u8) (i & 0xff);
11428 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11429 if (pci_dma_mapping_error(tp->pdev, map)) {
11430 dev_kfree_skb(skb);
11431 return -EIO;
11434 val = tnapi->tx_prod;
11435 tnapi->tx_buffers[val].skb = skb;
11436 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11438 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11439 rnapi->coal_now);
11441 udelay(10);
11443 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11445 budget = tg3_tx_avail(tnapi);
11446 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11447 base_flags | TXD_FLAG_END, mss, 0)) {
11448 tnapi->tx_buffers[val].skb = NULL;
11449 dev_kfree_skb(skb);
11450 return -EIO;
11453 tnapi->tx_prod++;
11455 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11456 tr32_mailbox(tnapi->prodmbox);
11458 udelay(10);
11460 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11461 for (i = 0; i < 35; i++) {
11462 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11463 coal_now);
11465 udelay(10);
11467 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11468 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11469 if ((tx_idx == tnapi->tx_prod) &&
11470 (rx_idx == (rx_start_idx + num_pkts)))
11471 break;
11474 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11475 dev_kfree_skb(skb);
11477 if (tx_idx != tnapi->tx_prod)
11478 goto out;
11480 if (rx_idx != rx_start_idx + num_pkts)
11481 goto out;
11483 val = data_off;
11484 while (rx_idx != rx_start_idx) {
11485 desc = &rnapi->rx_rcb[rx_start_idx++];
11486 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11487 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11489 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11490 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11491 goto out;
11493 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11494 - ETH_FCS_LEN;
11496 if (loopback_mode != TG3_TSO_LOOPBACK) {
11497 if (rx_len != tx_len)
11498 goto out;
11500 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11501 if (opaque_key != RXD_OPAQUE_RING_STD)
11502 goto out;
11503 } else {
11504 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11505 goto out;
11507 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11508 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11509 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11510 goto out;
11513 if (opaque_key == RXD_OPAQUE_RING_STD) {
11514 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11515 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11516 mapping);
11517 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11518 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11519 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11520 mapping);
11521 } else
11522 goto out;
11524 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11525 PCI_DMA_FROMDEVICE);
11527 for (i = data_off; i < rx_len; i++, val++) {
11528 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11529 goto out;
11533 err = 0;
11535 /* tg3_free_rings will unmap and free the rx_skb */
11536 out:
11537 return err;
11540 #define TG3_STD_LOOPBACK_FAILED 1
11541 #define TG3_JMB_LOOPBACK_FAILED 2
11542 #define TG3_TSO_LOOPBACK_FAILED 4
11544 #define TG3_MAC_LOOPBACK_SHIFT 0
11545 #define TG3_PHY_LOOPBACK_SHIFT 4
11546 #define TG3_LOOPBACK_FAILED 0x00000077
11548 static int tg3_test_loopback(struct tg3 *tp)
11550 int err = 0;
11551 u32 eee_cap;
11553 if (!netif_running(tp->dev))
11554 return TG3_LOOPBACK_FAILED;
11556 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11557 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11559 err = tg3_reset_hw(tp, 1);
11560 if (err) {
11561 err = TG3_LOOPBACK_FAILED;
11562 goto done;
11565 if (tg3_flag(tp, ENABLE_RSS)) {
11566 int i;
11568 /* Reroute all rx packets to the 1st queue */
11569 for (i = MAC_RSS_INDIR_TBL_0;
11570 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11571 tw32(i, 0x0);
11574 /* HW errata - mac loopback fails in some cases on 5780.
11575 * Normal traffic and PHY loopback are not affected by
11576 * errata. Also, the MAC loopback test is deprecated for
11577 * all newer ASIC revisions.
11579 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11580 !tg3_flag(tp, CPMU_PRESENT)) {
11581 tg3_mac_loopback(tp, true);
11583 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11584 err |= TG3_STD_LOOPBACK_FAILED <<
11585 TG3_MAC_LOOPBACK_SHIFT;
11587 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11588 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11589 err |= TG3_JMB_LOOPBACK_FAILED <<
11590 TG3_MAC_LOOPBACK_SHIFT;
11592 tg3_mac_loopback(tp, false);
11595 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11596 !tg3_flag(tp, USE_PHYLIB)) {
11597 int i;
11599 tg3_phy_lpbk_set(tp, 0);
11601 /* Wait for link */
11602 for (i = 0; i < 100; i++) {
11603 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11604 break;
11605 mdelay(1);
11608 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11609 err |= TG3_STD_LOOPBACK_FAILED <<
11610 TG3_PHY_LOOPBACK_SHIFT;
11611 if (tg3_flag(tp, TSO_CAPABLE) &&
11612 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11613 err |= TG3_TSO_LOOPBACK_FAILED <<
11614 TG3_PHY_LOOPBACK_SHIFT;
11615 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11616 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11617 err |= TG3_JMB_LOOPBACK_FAILED <<
11618 TG3_PHY_LOOPBACK_SHIFT;
11620 /* Re-enable gphy autopowerdown. */
11621 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11622 tg3_phy_toggle_apd(tp, true);
11625 done:
11626 tp->phy_flags |= eee_cap;
11628 return err;
11631 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11632 u64 *data)
11634 struct tg3 *tp = netdev_priv(dev);
11636 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11637 tg3_power_up(tp)) {
11638 etest->flags |= ETH_TEST_FL_FAILED;
11639 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11640 return;
11643 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11645 if (tg3_test_nvram(tp) != 0) {
11646 etest->flags |= ETH_TEST_FL_FAILED;
11647 data[0] = 1;
11649 if (tg3_test_link(tp) != 0) {
11650 etest->flags |= ETH_TEST_FL_FAILED;
11651 data[1] = 1;
11653 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11654 int err, err2 = 0, irq_sync = 0;
11656 if (netif_running(dev)) {
11657 tg3_phy_stop(tp);
11658 tg3_netif_stop(tp);
11659 irq_sync = 1;
11662 tg3_full_lock(tp, irq_sync);
11664 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11665 err = tg3_nvram_lock(tp);
11666 tg3_halt_cpu(tp, RX_CPU_BASE);
11667 if (!tg3_flag(tp, 5705_PLUS))
11668 tg3_halt_cpu(tp, TX_CPU_BASE);
11669 if (!err)
11670 tg3_nvram_unlock(tp);
11672 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11673 tg3_phy_reset(tp);
11675 if (tg3_test_registers(tp) != 0) {
11676 etest->flags |= ETH_TEST_FL_FAILED;
11677 data[2] = 1;
11679 if (tg3_test_memory(tp) != 0) {
11680 etest->flags |= ETH_TEST_FL_FAILED;
11681 data[3] = 1;
11683 if ((data[4] = tg3_test_loopback(tp)) != 0)
11684 etest->flags |= ETH_TEST_FL_FAILED;
11686 tg3_full_unlock(tp);
11688 if (tg3_test_interrupt(tp) != 0) {
11689 etest->flags |= ETH_TEST_FL_FAILED;
11690 data[5] = 1;
11693 tg3_full_lock(tp, 0);
11695 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11696 if (netif_running(dev)) {
11697 tg3_flag_set(tp, INIT_COMPLETE);
11698 err2 = tg3_restart_hw(tp, 1);
11699 if (!err2)
11700 tg3_netif_start(tp);
11703 tg3_full_unlock(tp);
11705 if (irq_sync && !err2)
11706 tg3_phy_start(tp);
11708 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11709 tg3_power_down(tp);
11713 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11715 struct mii_ioctl_data *data = if_mii(ifr);
11716 struct tg3 *tp = netdev_priv(dev);
11717 int err;
11719 if (tg3_flag(tp, USE_PHYLIB)) {
11720 struct phy_device *phydev;
11721 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11722 return -EAGAIN;
11723 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11724 return phy_mii_ioctl(phydev, ifr, cmd);
11727 switch (cmd) {
11728 case SIOCGMIIPHY:
11729 data->phy_id = tp->phy_addr;
11731 /* fallthru */
11732 case SIOCGMIIREG: {
11733 u32 mii_regval;
11735 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11736 break; /* We have no PHY */
11738 if (!netif_running(dev))
11739 return -EAGAIN;
11741 spin_lock_bh(&tp->lock);
11742 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11743 spin_unlock_bh(&tp->lock);
11745 data->val_out = mii_regval;
11747 return err;
11750 case SIOCSMIIREG:
11751 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11752 break; /* We have no PHY */
11754 if (!netif_running(dev))
11755 return -EAGAIN;
11757 spin_lock_bh(&tp->lock);
11758 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11759 spin_unlock_bh(&tp->lock);
11761 return err;
11763 default:
11764 /* do nothing */
11765 break;
11767 return -EOPNOTSUPP;
11770 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11772 struct tg3 *tp = netdev_priv(dev);
11774 memcpy(ec, &tp->coal, sizeof(*ec));
11775 return 0;
11778 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11780 struct tg3 *tp = netdev_priv(dev);
11781 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11782 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11784 if (!tg3_flag(tp, 5705_PLUS)) {
11785 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11786 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11787 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11788 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11791 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11792 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11793 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11794 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11795 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11796 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11797 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11798 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11799 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11800 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11801 return -EINVAL;
11803 /* No rx interrupts will be generated if both are zero */
11804 if ((ec->rx_coalesce_usecs == 0) &&
11805 (ec->rx_max_coalesced_frames == 0))
11806 return -EINVAL;
11808 /* No tx interrupts will be generated if both are zero */
11809 if ((ec->tx_coalesce_usecs == 0) &&
11810 (ec->tx_max_coalesced_frames == 0))
11811 return -EINVAL;
11813 /* Only copy relevant parameters, ignore all others. */
11814 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11815 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11816 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11817 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11818 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11819 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11820 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11821 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11822 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11824 if (netif_running(dev)) {
11825 tg3_full_lock(tp, 0);
11826 __tg3_set_coalesce(tp, &tp->coal);
11827 tg3_full_unlock(tp);
11829 return 0;
11832 static const struct ethtool_ops tg3_ethtool_ops = {
11833 .get_settings = tg3_get_settings,
11834 .set_settings = tg3_set_settings,
11835 .get_drvinfo = tg3_get_drvinfo,
11836 .get_regs_len = tg3_get_regs_len,
11837 .get_regs = tg3_get_regs,
11838 .get_wol = tg3_get_wol,
11839 .set_wol = tg3_set_wol,
11840 .get_msglevel = tg3_get_msglevel,
11841 .set_msglevel = tg3_set_msglevel,
11842 .nway_reset = tg3_nway_reset,
11843 .get_link = ethtool_op_get_link,
11844 .get_eeprom_len = tg3_get_eeprom_len,
11845 .get_eeprom = tg3_get_eeprom,
11846 .set_eeprom = tg3_set_eeprom,
11847 .get_ringparam = tg3_get_ringparam,
11848 .set_ringparam = tg3_set_ringparam,
11849 .get_pauseparam = tg3_get_pauseparam,
11850 .set_pauseparam = tg3_set_pauseparam,
11851 .self_test = tg3_self_test,
11852 .get_strings = tg3_get_strings,
11853 .set_phys_id = tg3_set_phys_id,
11854 .get_ethtool_stats = tg3_get_ethtool_stats,
11855 .get_coalesce = tg3_get_coalesce,
11856 .set_coalesce = tg3_set_coalesce,
11857 .get_sset_count = tg3_get_sset_count,
11860 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11862 u32 cursize, val, magic;
11864 tp->nvram_size = EEPROM_CHIP_SIZE;
11866 if (tg3_nvram_read(tp, 0, &magic) != 0)
11867 return;
11869 if ((magic != TG3_EEPROM_MAGIC) &&
11870 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11871 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11872 return;
11875 * Size the chip by reading offsets at increasing powers of two.
11876 * When we encounter our validation signature, we know the addressing
11877 * has wrapped around, and thus have our chip size.
11879 cursize = 0x10;
11881 while (cursize < tp->nvram_size) {
11882 if (tg3_nvram_read(tp, cursize, &val) != 0)
11883 return;
11885 if (val == magic)
11886 break;
11888 cursize <<= 1;
11891 tp->nvram_size = cursize;
11894 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11896 u32 val;
11898 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11899 return;
11901 /* Selfboot format */
11902 if (val != TG3_EEPROM_MAGIC) {
11903 tg3_get_eeprom_size(tp);
11904 return;
11907 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11908 if (val != 0) {
11909 /* This is confusing. We want to operate on the
11910 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11911 * call will read from NVRAM and byteswap the data
11912 * according to the byteswapping settings for all
11913 * other register accesses. This ensures the data we
11914 * want will always reside in the lower 16-bits.
11915 * However, the data in NVRAM is in LE format, which
11916 * means the data from the NVRAM read will always be
11917 * opposite the endianness of the CPU. The 16-bit
11918 * byteswap then brings the data to CPU endianness.
11920 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11921 return;
11924 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11927 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11929 u32 nvcfg1;
11931 nvcfg1 = tr32(NVRAM_CFG1);
11932 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11933 tg3_flag_set(tp, FLASH);
11934 } else {
11935 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11936 tw32(NVRAM_CFG1, nvcfg1);
11939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11940 tg3_flag(tp, 5780_CLASS)) {
11941 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11942 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11943 tp->nvram_jedecnum = JEDEC_ATMEL;
11944 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11945 tg3_flag_set(tp, NVRAM_BUFFERED);
11946 break;
11947 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11948 tp->nvram_jedecnum = JEDEC_ATMEL;
11949 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11950 break;
11951 case FLASH_VENDOR_ATMEL_EEPROM:
11952 tp->nvram_jedecnum = JEDEC_ATMEL;
11953 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11954 tg3_flag_set(tp, NVRAM_BUFFERED);
11955 break;
11956 case FLASH_VENDOR_ST:
11957 tp->nvram_jedecnum = JEDEC_ST;
11958 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11959 tg3_flag_set(tp, NVRAM_BUFFERED);
11960 break;
11961 case FLASH_VENDOR_SAIFUN:
11962 tp->nvram_jedecnum = JEDEC_SAIFUN;
11963 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11964 break;
11965 case FLASH_VENDOR_SST_SMALL:
11966 case FLASH_VENDOR_SST_LARGE:
11967 tp->nvram_jedecnum = JEDEC_SST;
11968 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11969 break;
11971 } else {
11972 tp->nvram_jedecnum = JEDEC_ATMEL;
11973 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11974 tg3_flag_set(tp, NVRAM_BUFFERED);
11978 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11980 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11981 case FLASH_5752PAGE_SIZE_256:
11982 tp->nvram_pagesize = 256;
11983 break;
11984 case FLASH_5752PAGE_SIZE_512:
11985 tp->nvram_pagesize = 512;
11986 break;
11987 case FLASH_5752PAGE_SIZE_1K:
11988 tp->nvram_pagesize = 1024;
11989 break;
11990 case FLASH_5752PAGE_SIZE_2K:
11991 tp->nvram_pagesize = 2048;
11992 break;
11993 case FLASH_5752PAGE_SIZE_4K:
11994 tp->nvram_pagesize = 4096;
11995 break;
11996 case FLASH_5752PAGE_SIZE_264:
11997 tp->nvram_pagesize = 264;
11998 break;
11999 case FLASH_5752PAGE_SIZE_528:
12000 tp->nvram_pagesize = 528;
12001 break;
12005 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12007 u32 nvcfg1;
12009 nvcfg1 = tr32(NVRAM_CFG1);
12011 /* NVRAM protection for TPM */
12012 if (nvcfg1 & (1 << 27))
12013 tg3_flag_set(tp, PROTECTED_NVRAM);
12015 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12016 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12017 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12018 tp->nvram_jedecnum = JEDEC_ATMEL;
12019 tg3_flag_set(tp, NVRAM_BUFFERED);
12020 break;
12021 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12022 tp->nvram_jedecnum = JEDEC_ATMEL;
12023 tg3_flag_set(tp, NVRAM_BUFFERED);
12024 tg3_flag_set(tp, FLASH);
12025 break;
12026 case FLASH_5752VENDOR_ST_M45PE10:
12027 case FLASH_5752VENDOR_ST_M45PE20:
12028 case FLASH_5752VENDOR_ST_M45PE40:
12029 tp->nvram_jedecnum = JEDEC_ST;
12030 tg3_flag_set(tp, NVRAM_BUFFERED);
12031 tg3_flag_set(tp, FLASH);
12032 break;
12035 if (tg3_flag(tp, FLASH)) {
12036 tg3_nvram_get_pagesize(tp, nvcfg1);
12037 } else {
12038 /* For eeprom, set pagesize to maximum eeprom size */
12039 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12041 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12042 tw32(NVRAM_CFG1, nvcfg1);
12046 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12048 u32 nvcfg1, protect = 0;
12050 nvcfg1 = tr32(NVRAM_CFG1);
12052 /* NVRAM protection for TPM */
12053 if (nvcfg1 & (1 << 27)) {
12054 tg3_flag_set(tp, PROTECTED_NVRAM);
12055 protect = 1;
12058 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12059 switch (nvcfg1) {
12060 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12061 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12062 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12063 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12064 tp->nvram_jedecnum = JEDEC_ATMEL;
12065 tg3_flag_set(tp, NVRAM_BUFFERED);
12066 tg3_flag_set(tp, FLASH);
12067 tp->nvram_pagesize = 264;
12068 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12069 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12070 tp->nvram_size = (protect ? 0x3e200 :
12071 TG3_NVRAM_SIZE_512KB);
12072 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12073 tp->nvram_size = (protect ? 0x1f200 :
12074 TG3_NVRAM_SIZE_256KB);
12075 else
12076 tp->nvram_size = (protect ? 0x1f200 :
12077 TG3_NVRAM_SIZE_128KB);
12078 break;
12079 case FLASH_5752VENDOR_ST_M45PE10:
12080 case FLASH_5752VENDOR_ST_M45PE20:
12081 case FLASH_5752VENDOR_ST_M45PE40:
12082 tp->nvram_jedecnum = JEDEC_ST;
12083 tg3_flag_set(tp, NVRAM_BUFFERED);
12084 tg3_flag_set(tp, FLASH);
12085 tp->nvram_pagesize = 256;
12086 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12087 tp->nvram_size = (protect ?
12088 TG3_NVRAM_SIZE_64KB :
12089 TG3_NVRAM_SIZE_128KB);
12090 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12091 tp->nvram_size = (protect ?
12092 TG3_NVRAM_SIZE_64KB :
12093 TG3_NVRAM_SIZE_256KB);
12094 else
12095 tp->nvram_size = (protect ?
12096 TG3_NVRAM_SIZE_128KB :
12097 TG3_NVRAM_SIZE_512KB);
12098 break;
12102 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12104 u32 nvcfg1;
12106 nvcfg1 = tr32(NVRAM_CFG1);
12108 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12109 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12110 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12111 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12112 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12113 tp->nvram_jedecnum = JEDEC_ATMEL;
12114 tg3_flag_set(tp, NVRAM_BUFFERED);
12115 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12117 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12118 tw32(NVRAM_CFG1, nvcfg1);
12119 break;
12120 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12121 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12122 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12123 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12124 tp->nvram_jedecnum = JEDEC_ATMEL;
12125 tg3_flag_set(tp, NVRAM_BUFFERED);
12126 tg3_flag_set(tp, FLASH);
12127 tp->nvram_pagesize = 264;
12128 break;
12129 case FLASH_5752VENDOR_ST_M45PE10:
12130 case FLASH_5752VENDOR_ST_M45PE20:
12131 case FLASH_5752VENDOR_ST_M45PE40:
12132 tp->nvram_jedecnum = JEDEC_ST;
12133 tg3_flag_set(tp, NVRAM_BUFFERED);
12134 tg3_flag_set(tp, FLASH);
12135 tp->nvram_pagesize = 256;
12136 break;
12140 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12142 u32 nvcfg1, protect = 0;
12144 nvcfg1 = tr32(NVRAM_CFG1);
12146 /* NVRAM protection for TPM */
12147 if (nvcfg1 & (1 << 27)) {
12148 tg3_flag_set(tp, PROTECTED_NVRAM);
12149 protect = 1;
12152 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12153 switch (nvcfg1) {
12154 case FLASH_5761VENDOR_ATMEL_ADB021D:
12155 case FLASH_5761VENDOR_ATMEL_ADB041D:
12156 case FLASH_5761VENDOR_ATMEL_ADB081D:
12157 case FLASH_5761VENDOR_ATMEL_ADB161D:
12158 case FLASH_5761VENDOR_ATMEL_MDB021D:
12159 case FLASH_5761VENDOR_ATMEL_MDB041D:
12160 case FLASH_5761VENDOR_ATMEL_MDB081D:
12161 case FLASH_5761VENDOR_ATMEL_MDB161D:
12162 tp->nvram_jedecnum = JEDEC_ATMEL;
12163 tg3_flag_set(tp, NVRAM_BUFFERED);
12164 tg3_flag_set(tp, FLASH);
12165 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12166 tp->nvram_pagesize = 256;
12167 break;
12168 case FLASH_5761VENDOR_ST_A_M45PE20:
12169 case FLASH_5761VENDOR_ST_A_M45PE40:
12170 case FLASH_5761VENDOR_ST_A_M45PE80:
12171 case FLASH_5761VENDOR_ST_A_M45PE16:
12172 case FLASH_5761VENDOR_ST_M_M45PE20:
12173 case FLASH_5761VENDOR_ST_M_M45PE40:
12174 case FLASH_5761VENDOR_ST_M_M45PE80:
12175 case FLASH_5761VENDOR_ST_M_M45PE16:
12176 tp->nvram_jedecnum = JEDEC_ST;
12177 tg3_flag_set(tp, NVRAM_BUFFERED);
12178 tg3_flag_set(tp, FLASH);
12179 tp->nvram_pagesize = 256;
12180 break;
12183 if (protect) {
12184 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12185 } else {
12186 switch (nvcfg1) {
12187 case FLASH_5761VENDOR_ATMEL_ADB161D:
12188 case FLASH_5761VENDOR_ATMEL_MDB161D:
12189 case FLASH_5761VENDOR_ST_A_M45PE16:
12190 case FLASH_5761VENDOR_ST_M_M45PE16:
12191 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12192 break;
12193 case FLASH_5761VENDOR_ATMEL_ADB081D:
12194 case FLASH_5761VENDOR_ATMEL_MDB081D:
12195 case FLASH_5761VENDOR_ST_A_M45PE80:
12196 case FLASH_5761VENDOR_ST_M_M45PE80:
12197 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12198 break;
12199 case FLASH_5761VENDOR_ATMEL_ADB041D:
12200 case FLASH_5761VENDOR_ATMEL_MDB041D:
12201 case FLASH_5761VENDOR_ST_A_M45PE40:
12202 case FLASH_5761VENDOR_ST_M_M45PE40:
12203 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12204 break;
12205 case FLASH_5761VENDOR_ATMEL_ADB021D:
12206 case FLASH_5761VENDOR_ATMEL_MDB021D:
12207 case FLASH_5761VENDOR_ST_A_M45PE20:
12208 case FLASH_5761VENDOR_ST_M_M45PE20:
12209 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12210 break;
12215 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12217 tp->nvram_jedecnum = JEDEC_ATMEL;
12218 tg3_flag_set(tp, NVRAM_BUFFERED);
12219 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12222 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12224 u32 nvcfg1;
12226 nvcfg1 = tr32(NVRAM_CFG1);
12228 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12229 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12230 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12231 tp->nvram_jedecnum = JEDEC_ATMEL;
12232 tg3_flag_set(tp, NVRAM_BUFFERED);
12233 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12235 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12236 tw32(NVRAM_CFG1, nvcfg1);
12237 return;
12238 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12239 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12240 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12241 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12242 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12243 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12244 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12245 tp->nvram_jedecnum = JEDEC_ATMEL;
12246 tg3_flag_set(tp, NVRAM_BUFFERED);
12247 tg3_flag_set(tp, FLASH);
12249 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12250 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12251 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12252 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12253 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12254 break;
12255 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12256 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12257 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12258 break;
12259 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12260 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12261 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12262 break;
12264 break;
12265 case FLASH_5752VENDOR_ST_M45PE10:
12266 case FLASH_5752VENDOR_ST_M45PE20:
12267 case FLASH_5752VENDOR_ST_M45PE40:
12268 tp->nvram_jedecnum = JEDEC_ST;
12269 tg3_flag_set(tp, NVRAM_BUFFERED);
12270 tg3_flag_set(tp, FLASH);
12272 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12273 case FLASH_5752VENDOR_ST_M45PE10:
12274 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12275 break;
12276 case FLASH_5752VENDOR_ST_M45PE20:
12277 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12278 break;
12279 case FLASH_5752VENDOR_ST_M45PE40:
12280 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12281 break;
12283 break;
12284 default:
12285 tg3_flag_set(tp, NO_NVRAM);
12286 return;
12289 tg3_nvram_get_pagesize(tp, nvcfg1);
12290 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12291 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12295 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12297 u32 nvcfg1;
12299 nvcfg1 = tr32(NVRAM_CFG1);
12301 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12302 case FLASH_5717VENDOR_ATMEL_EEPROM:
12303 case FLASH_5717VENDOR_MICRO_EEPROM:
12304 tp->nvram_jedecnum = JEDEC_ATMEL;
12305 tg3_flag_set(tp, NVRAM_BUFFERED);
12306 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12308 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12309 tw32(NVRAM_CFG1, nvcfg1);
12310 return;
12311 case FLASH_5717VENDOR_ATMEL_MDB011D:
12312 case FLASH_5717VENDOR_ATMEL_ADB011B:
12313 case FLASH_5717VENDOR_ATMEL_ADB011D:
12314 case FLASH_5717VENDOR_ATMEL_MDB021D:
12315 case FLASH_5717VENDOR_ATMEL_ADB021B:
12316 case FLASH_5717VENDOR_ATMEL_ADB021D:
12317 case FLASH_5717VENDOR_ATMEL_45USPT:
12318 tp->nvram_jedecnum = JEDEC_ATMEL;
12319 tg3_flag_set(tp, NVRAM_BUFFERED);
12320 tg3_flag_set(tp, FLASH);
12322 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12323 case FLASH_5717VENDOR_ATMEL_MDB021D:
12324 /* Detect size with tg3_nvram_get_size() */
12325 break;
12326 case FLASH_5717VENDOR_ATMEL_ADB021B:
12327 case FLASH_5717VENDOR_ATMEL_ADB021D:
12328 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12329 break;
12330 default:
12331 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12332 break;
12334 break;
12335 case FLASH_5717VENDOR_ST_M_M25PE10:
12336 case FLASH_5717VENDOR_ST_A_M25PE10:
12337 case FLASH_5717VENDOR_ST_M_M45PE10:
12338 case FLASH_5717VENDOR_ST_A_M45PE10:
12339 case FLASH_5717VENDOR_ST_M_M25PE20:
12340 case FLASH_5717VENDOR_ST_A_M25PE20:
12341 case FLASH_5717VENDOR_ST_M_M45PE20:
12342 case FLASH_5717VENDOR_ST_A_M45PE20:
12343 case FLASH_5717VENDOR_ST_25USPT:
12344 case FLASH_5717VENDOR_ST_45USPT:
12345 tp->nvram_jedecnum = JEDEC_ST;
12346 tg3_flag_set(tp, NVRAM_BUFFERED);
12347 tg3_flag_set(tp, FLASH);
12349 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12350 case FLASH_5717VENDOR_ST_M_M25PE20:
12351 case FLASH_5717VENDOR_ST_M_M45PE20:
12352 /* Detect size with tg3_nvram_get_size() */
12353 break;
12354 case FLASH_5717VENDOR_ST_A_M25PE20:
12355 case FLASH_5717VENDOR_ST_A_M45PE20:
12356 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12357 break;
12358 default:
12359 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12360 break;
12362 break;
12363 default:
12364 tg3_flag_set(tp, NO_NVRAM);
12365 return;
12368 tg3_nvram_get_pagesize(tp, nvcfg1);
12369 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12370 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12373 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12375 u32 nvcfg1, nvmpinstrp;
12377 nvcfg1 = tr32(NVRAM_CFG1);
12378 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12380 switch (nvmpinstrp) {
12381 case FLASH_5720_EEPROM_HD:
12382 case FLASH_5720_EEPROM_LD:
12383 tp->nvram_jedecnum = JEDEC_ATMEL;
12384 tg3_flag_set(tp, NVRAM_BUFFERED);
12386 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12387 tw32(NVRAM_CFG1, nvcfg1);
12388 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12389 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12390 else
12391 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12392 return;
12393 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12394 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12395 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12396 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12397 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12398 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12399 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12400 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12401 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12402 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12403 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12404 case FLASH_5720VENDOR_ATMEL_45USPT:
12405 tp->nvram_jedecnum = JEDEC_ATMEL;
12406 tg3_flag_set(tp, NVRAM_BUFFERED);
12407 tg3_flag_set(tp, FLASH);
12409 switch (nvmpinstrp) {
12410 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12411 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12412 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12413 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12414 break;
12415 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12416 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12417 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12418 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12419 break;
12420 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12421 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12422 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12423 break;
12424 default:
12425 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12426 break;
12428 break;
12429 case FLASH_5720VENDOR_M_ST_M25PE10:
12430 case FLASH_5720VENDOR_M_ST_M45PE10:
12431 case FLASH_5720VENDOR_A_ST_M25PE10:
12432 case FLASH_5720VENDOR_A_ST_M45PE10:
12433 case FLASH_5720VENDOR_M_ST_M25PE20:
12434 case FLASH_5720VENDOR_M_ST_M45PE20:
12435 case FLASH_5720VENDOR_A_ST_M25PE20:
12436 case FLASH_5720VENDOR_A_ST_M45PE20:
12437 case FLASH_5720VENDOR_M_ST_M25PE40:
12438 case FLASH_5720VENDOR_M_ST_M45PE40:
12439 case FLASH_5720VENDOR_A_ST_M25PE40:
12440 case FLASH_5720VENDOR_A_ST_M45PE40:
12441 case FLASH_5720VENDOR_M_ST_M25PE80:
12442 case FLASH_5720VENDOR_M_ST_M45PE80:
12443 case FLASH_5720VENDOR_A_ST_M25PE80:
12444 case FLASH_5720VENDOR_A_ST_M45PE80:
12445 case FLASH_5720VENDOR_ST_25USPT:
12446 case FLASH_5720VENDOR_ST_45USPT:
12447 tp->nvram_jedecnum = JEDEC_ST;
12448 tg3_flag_set(tp, NVRAM_BUFFERED);
12449 tg3_flag_set(tp, FLASH);
12451 switch (nvmpinstrp) {
12452 case FLASH_5720VENDOR_M_ST_M25PE20:
12453 case FLASH_5720VENDOR_M_ST_M45PE20:
12454 case FLASH_5720VENDOR_A_ST_M25PE20:
12455 case FLASH_5720VENDOR_A_ST_M45PE20:
12456 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12457 break;
12458 case FLASH_5720VENDOR_M_ST_M25PE40:
12459 case FLASH_5720VENDOR_M_ST_M45PE40:
12460 case FLASH_5720VENDOR_A_ST_M25PE40:
12461 case FLASH_5720VENDOR_A_ST_M45PE40:
12462 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12463 break;
12464 case FLASH_5720VENDOR_M_ST_M25PE80:
12465 case FLASH_5720VENDOR_M_ST_M45PE80:
12466 case FLASH_5720VENDOR_A_ST_M25PE80:
12467 case FLASH_5720VENDOR_A_ST_M45PE80:
12468 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12469 break;
12470 default:
12471 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12472 break;
12474 break;
12475 default:
12476 tg3_flag_set(tp, NO_NVRAM);
12477 return;
12480 tg3_nvram_get_pagesize(tp, nvcfg1);
12481 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12482 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12485 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12486 static void __devinit tg3_nvram_init(struct tg3 *tp)
12488 tw32_f(GRC_EEPROM_ADDR,
12489 (EEPROM_ADDR_FSM_RESET |
12490 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12491 EEPROM_ADDR_CLKPERD_SHIFT)));
12493 msleep(1);
12495 /* Enable seeprom accesses. */
12496 tw32_f(GRC_LOCAL_CTRL,
12497 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12498 udelay(100);
12500 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12501 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12502 tg3_flag_set(tp, NVRAM);
12504 if (tg3_nvram_lock(tp)) {
12505 netdev_warn(tp->dev,
12506 "Cannot get nvram lock, %s failed\n",
12507 __func__);
12508 return;
12510 tg3_enable_nvram_access(tp);
12512 tp->nvram_size = 0;
12514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12515 tg3_get_5752_nvram_info(tp);
12516 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12517 tg3_get_5755_nvram_info(tp);
12518 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12521 tg3_get_5787_nvram_info(tp);
12522 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12523 tg3_get_5761_nvram_info(tp);
12524 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12525 tg3_get_5906_nvram_info(tp);
12526 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12528 tg3_get_57780_nvram_info(tp);
12529 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12531 tg3_get_5717_nvram_info(tp);
12532 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12533 tg3_get_5720_nvram_info(tp);
12534 else
12535 tg3_get_nvram_info(tp);
12537 if (tp->nvram_size == 0)
12538 tg3_get_nvram_size(tp);
12540 tg3_disable_nvram_access(tp);
12541 tg3_nvram_unlock(tp);
12543 } else {
12544 tg3_flag_clear(tp, NVRAM);
12545 tg3_flag_clear(tp, NVRAM_BUFFERED);
12547 tg3_get_eeprom_size(tp);
12551 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12552 u32 offset, u32 len, u8 *buf)
12554 int i, j, rc = 0;
12555 u32 val;
12557 for (i = 0; i < len; i += 4) {
12558 u32 addr;
12559 __be32 data;
12561 addr = offset + i;
12563 memcpy(&data, buf + i, 4);
12566 * The SEEPROM interface expects the data to always be opposite
12567 * the native endian format. We accomplish this by reversing
12568 * all the operations that would have been performed on the
12569 * data from a call to tg3_nvram_read_be32().
12571 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12573 val = tr32(GRC_EEPROM_ADDR);
12574 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12576 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12577 EEPROM_ADDR_READ);
12578 tw32(GRC_EEPROM_ADDR, val |
12579 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12580 (addr & EEPROM_ADDR_ADDR_MASK) |
12581 EEPROM_ADDR_START |
12582 EEPROM_ADDR_WRITE);
12584 for (j = 0; j < 1000; j++) {
12585 val = tr32(GRC_EEPROM_ADDR);
12587 if (val & EEPROM_ADDR_COMPLETE)
12588 break;
12589 msleep(1);
12591 if (!(val & EEPROM_ADDR_COMPLETE)) {
12592 rc = -EBUSY;
12593 break;
12597 return rc;
12600 /* offset and length are dword aligned */
12601 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12602 u8 *buf)
12604 int ret = 0;
12605 u32 pagesize = tp->nvram_pagesize;
12606 u32 pagemask = pagesize - 1;
12607 u32 nvram_cmd;
12608 u8 *tmp;
12610 tmp = kmalloc(pagesize, GFP_KERNEL);
12611 if (tmp == NULL)
12612 return -ENOMEM;
12614 while (len) {
12615 int j;
12616 u32 phy_addr, page_off, size;
12618 phy_addr = offset & ~pagemask;
12620 for (j = 0; j < pagesize; j += 4) {
12621 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12622 (__be32 *) (tmp + j));
12623 if (ret)
12624 break;
12626 if (ret)
12627 break;
12629 page_off = offset & pagemask;
12630 size = pagesize;
12631 if (len < size)
12632 size = len;
12634 len -= size;
12636 memcpy(tmp + page_off, buf, size);
12638 offset = offset + (pagesize - page_off);
12640 tg3_enable_nvram_access(tp);
12643 * Before we can erase the flash page, we need
12644 * to issue a special "write enable" command.
12646 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12648 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12649 break;
12651 /* Erase the target page */
12652 tw32(NVRAM_ADDR, phy_addr);
12654 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12655 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12657 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12658 break;
12660 /* Issue another write enable to start the write. */
12661 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12663 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12664 break;
12666 for (j = 0; j < pagesize; j += 4) {
12667 __be32 data;
12669 data = *((__be32 *) (tmp + j));
12671 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12673 tw32(NVRAM_ADDR, phy_addr + j);
12675 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12676 NVRAM_CMD_WR;
12678 if (j == 0)
12679 nvram_cmd |= NVRAM_CMD_FIRST;
12680 else if (j == (pagesize - 4))
12681 nvram_cmd |= NVRAM_CMD_LAST;
12683 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12684 break;
12686 if (ret)
12687 break;
12690 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12691 tg3_nvram_exec_cmd(tp, nvram_cmd);
12693 kfree(tmp);
12695 return ret;
12698 /* offset and length are dword aligned */
12699 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12700 u8 *buf)
12702 int i, ret = 0;
12704 for (i = 0; i < len; i += 4, offset += 4) {
12705 u32 page_off, phy_addr, nvram_cmd;
12706 __be32 data;
12708 memcpy(&data, buf + i, 4);
12709 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12711 page_off = offset % tp->nvram_pagesize;
12713 phy_addr = tg3_nvram_phys_addr(tp, offset);
12715 tw32(NVRAM_ADDR, phy_addr);
12717 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12719 if (page_off == 0 || i == 0)
12720 nvram_cmd |= NVRAM_CMD_FIRST;
12721 if (page_off == (tp->nvram_pagesize - 4))
12722 nvram_cmd |= NVRAM_CMD_LAST;
12724 if (i == (len - 4))
12725 nvram_cmd |= NVRAM_CMD_LAST;
12727 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12728 !tg3_flag(tp, 5755_PLUS) &&
12729 (tp->nvram_jedecnum == JEDEC_ST) &&
12730 (nvram_cmd & NVRAM_CMD_FIRST)) {
12732 if ((ret = tg3_nvram_exec_cmd(tp,
12733 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12734 NVRAM_CMD_DONE)))
12736 break;
12738 if (!tg3_flag(tp, FLASH)) {
12739 /* We always do complete word writes to eeprom. */
12740 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12743 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12744 break;
12746 return ret;
12749 /* offset and length are dword aligned */
12750 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12752 int ret;
12754 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12755 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12756 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12757 udelay(40);
12760 if (!tg3_flag(tp, NVRAM)) {
12761 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12762 } else {
12763 u32 grc_mode;
12765 ret = tg3_nvram_lock(tp);
12766 if (ret)
12767 return ret;
12769 tg3_enable_nvram_access(tp);
12770 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12771 tw32(NVRAM_WRITE1, 0x406);
12773 grc_mode = tr32(GRC_MODE);
12774 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12776 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12777 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12778 buf);
12779 } else {
12780 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12781 buf);
12784 grc_mode = tr32(GRC_MODE);
12785 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12787 tg3_disable_nvram_access(tp);
12788 tg3_nvram_unlock(tp);
12791 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12792 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12793 udelay(40);
12796 return ret;
12799 struct subsys_tbl_ent {
12800 u16 subsys_vendor, subsys_devid;
12801 u32 phy_id;
12804 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12805 /* Broadcom boards. */
12806 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12807 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12808 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12809 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12810 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12811 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12812 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12813 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12814 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12815 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12816 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12817 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12818 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12819 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12820 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12821 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12822 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12823 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12824 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12825 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12826 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12827 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12829 /* 3com boards. */
12830 { TG3PCI_SUBVENDOR_ID_3COM,
12831 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12832 { TG3PCI_SUBVENDOR_ID_3COM,
12833 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12834 { TG3PCI_SUBVENDOR_ID_3COM,
12835 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12836 { TG3PCI_SUBVENDOR_ID_3COM,
12837 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12838 { TG3PCI_SUBVENDOR_ID_3COM,
12839 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12841 /* DELL boards. */
12842 { TG3PCI_SUBVENDOR_ID_DELL,
12843 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12844 { TG3PCI_SUBVENDOR_ID_DELL,
12845 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12846 { TG3PCI_SUBVENDOR_ID_DELL,
12847 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12848 { TG3PCI_SUBVENDOR_ID_DELL,
12849 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12851 /* Compaq boards. */
12852 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12853 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12854 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12855 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12856 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12857 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12858 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12859 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12860 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12861 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12863 /* IBM boards. */
12864 { TG3PCI_SUBVENDOR_ID_IBM,
12865 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12868 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12870 int i;
12872 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12873 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12874 tp->pdev->subsystem_vendor) &&
12875 (subsys_id_to_phy_id[i].subsys_devid ==
12876 tp->pdev->subsystem_device))
12877 return &subsys_id_to_phy_id[i];
12879 return NULL;
12882 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12884 u32 val;
12886 tp->phy_id = TG3_PHY_ID_INVALID;
12887 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12889 /* Assume an onboard device and WOL capable by default. */
12890 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12891 tg3_flag_set(tp, WOL_CAP);
12893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12894 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12895 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12896 tg3_flag_set(tp, IS_NIC);
12898 val = tr32(VCPU_CFGSHDW);
12899 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12900 tg3_flag_set(tp, ASPM_WORKAROUND);
12901 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12902 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12903 tg3_flag_set(tp, WOL_ENABLE);
12904 device_set_wakeup_enable(&tp->pdev->dev, true);
12906 goto done;
12909 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12910 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12911 u32 nic_cfg, led_cfg;
12912 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12913 int eeprom_phy_serdes = 0;
12915 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12916 tp->nic_sram_data_cfg = nic_cfg;
12918 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12919 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12920 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12921 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12922 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12923 (ver > 0) && (ver < 0x100))
12924 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12926 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12927 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12929 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12930 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12931 eeprom_phy_serdes = 1;
12933 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12934 if (nic_phy_id != 0) {
12935 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12936 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12938 eeprom_phy_id = (id1 >> 16) << 10;
12939 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12940 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12941 } else
12942 eeprom_phy_id = 0;
12944 tp->phy_id = eeprom_phy_id;
12945 if (eeprom_phy_serdes) {
12946 if (!tg3_flag(tp, 5705_PLUS))
12947 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12948 else
12949 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12952 if (tg3_flag(tp, 5750_PLUS))
12953 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12954 SHASTA_EXT_LED_MODE_MASK);
12955 else
12956 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12958 switch (led_cfg) {
12959 default:
12960 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12961 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12962 break;
12964 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12965 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12966 break;
12968 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12969 tp->led_ctrl = LED_CTRL_MODE_MAC;
12971 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12972 * read on some older 5700/5701 bootcode.
12974 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12975 ASIC_REV_5700 ||
12976 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12977 ASIC_REV_5701)
12978 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12980 break;
12982 case SHASTA_EXT_LED_SHARED:
12983 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12984 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12985 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12986 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12987 LED_CTRL_MODE_PHY_2);
12988 break;
12990 case SHASTA_EXT_LED_MAC:
12991 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12992 break;
12994 case SHASTA_EXT_LED_COMBO:
12995 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12996 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12997 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12998 LED_CTRL_MODE_PHY_2);
12999 break;
13003 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13004 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13005 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13006 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13008 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13009 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13011 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13012 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13013 if ((tp->pdev->subsystem_vendor ==
13014 PCI_VENDOR_ID_ARIMA) &&
13015 (tp->pdev->subsystem_device == 0x205a ||
13016 tp->pdev->subsystem_device == 0x2063))
13017 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13018 } else {
13019 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13020 tg3_flag_set(tp, IS_NIC);
13023 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13024 tg3_flag_set(tp, ENABLE_ASF);
13025 if (tg3_flag(tp, 5750_PLUS))
13026 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13029 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13030 tg3_flag(tp, 5750_PLUS))
13031 tg3_flag_set(tp, ENABLE_APE);
13033 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13034 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13035 tg3_flag_clear(tp, WOL_CAP);
13037 if (tg3_flag(tp, WOL_CAP) &&
13038 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13039 tg3_flag_set(tp, WOL_ENABLE);
13040 device_set_wakeup_enable(&tp->pdev->dev, true);
13043 if (cfg2 & (1 << 17))
13044 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13046 /* serdes signal pre-emphasis in register 0x590 set by */
13047 /* bootcode if bit 18 is set */
13048 if (cfg2 & (1 << 18))
13049 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13051 if ((tg3_flag(tp, 57765_PLUS) ||
13052 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13053 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13054 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13055 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13057 if (tg3_flag(tp, PCI_EXPRESS) &&
13058 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13059 !tg3_flag(tp, 57765_PLUS)) {
13060 u32 cfg3;
13062 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13063 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13064 tg3_flag_set(tp, ASPM_WORKAROUND);
13067 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13068 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13069 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13070 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13071 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13072 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13074 done:
13075 if (tg3_flag(tp, WOL_CAP))
13076 device_set_wakeup_enable(&tp->pdev->dev,
13077 tg3_flag(tp, WOL_ENABLE));
13078 else
13079 device_set_wakeup_capable(&tp->pdev->dev, false);
13082 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13084 int i;
13085 u32 val;
13087 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13088 tw32(OTP_CTRL, cmd);
13090 /* Wait for up to 1 ms for command to execute. */
13091 for (i = 0; i < 100; i++) {
13092 val = tr32(OTP_STATUS);
13093 if (val & OTP_STATUS_CMD_DONE)
13094 break;
13095 udelay(10);
13098 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13101 /* Read the gphy configuration from the OTP region of the chip. The gphy
13102 * configuration is a 32-bit value that straddles the alignment boundary.
13103 * We do two 32-bit reads and then shift and merge the results.
13105 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13107 u32 bhalf_otp, thalf_otp;
13109 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13111 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13112 return 0;
13114 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13116 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13117 return 0;
13119 thalf_otp = tr32(OTP_READ_DATA);
13121 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13123 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13124 return 0;
13126 bhalf_otp = tr32(OTP_READ_DATA);
13128 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13131 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13133 u32 adv = ADVERTISED_Autoneg |
13134 ADVERTISED_Pause;
13136 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13137 adv |= ADVERTISED_1000baseT_Half |
13138 ADVERTISED_1000baseT_Full;
13140 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13141 adv |= ADVERTISED_100baseT_Half |
13142 ADVERTISED_100baseT_Full |
13143 ADVERTISED_10baseT_Half |
13144 ADVERTISED_10baseT_Full |
13145 ADVERTISED_TP;
13146 else
13147 adv |= ADVERTISED_FIBRE;
13149 tp->link_config.advertising = adv;
13150 tp->link_config.speed = SPEED_INVALID;
13151 tp->link_config.duplex = DUPLEX_INVALID;
13152 tp->link_config.autoneg = AUTONEG_ENABLE;
13153 tp->link_config.active_speed = SPEED_INVALID;
13154 tp->link_config.active_duplex = DUPLEX_INVALID;
13155 tp->link_config.orig_speed = SPEED_INVALID;
13156 tp->link_config.orig_duplex = DUPLEX_INVALID;
13157 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13160 static int __devinit tg3_phy_probe(struct tg3 *tp)
13162 u32 hw_phy_id_1, hw_phy_id_2;
13163 u32 hw_phy_id, hw_phy_id_masked;
13164 int err;
13166 /* flow control autonegotiation is default behavior */
13167 tg3_flag_set(tp, PAUSE_AUTONEG);
13168 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13170 if (tg3_flag(tp, USE_PHYLIB))
13171 return tg3_phy_init(tp);
13173 /* Reading the PHY ID register can conflict with ASF
13174 * firmware access to the PHY hardware.
13176 err = 0;
13177 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13178 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13179 } else {
13180 /* Now read the physical PHY_ID from the chip and verify
13181 * that it is sane. If it doesn't look good, we fall back
13182 * to either the hard-coded table based PHY_ID and failing
13183 * that the value found in the eeprom area.
13185 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13186 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13188 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13189 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13190 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13192 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13195 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13196 tp->phy_id = hw_phy_id;
13197 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13198 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13199 else
13200 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13201 } else {
13202 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13203 /* Do nothing, phy ID already set up in
13204 * tg3_get_eeprom_hw_cfg().
13206 } else {
13207 struct subsys_tbl_ent *p;
13209 /* No eeprom signature? Try the hardcoded
13210 * subsys device table.
13212 p = tg3_lookup_by_subsys(tp);
13213 if (!p)
13214 return -ENODEV;
13216 tp->phy_id = p->phy_id;
13217 if (!tp->phy_id ||
13218 tp->phy_id == TG3_PHY_ID_BCM8002)
13219 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13223 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13224 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13225 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13226 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13227 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13228 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13229 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13230 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13232 tg3_phy_init_link_config(tp);
13234 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13235 !tg3_flag(tp, ENABLE_APE) &&
13236 !tg3_flag(tp, ENABLE_ASF)) {
13237 u32 bmsr, mask;
13239 tg3_readphy(tp, MII_BMSR, &bmsr);
13240 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13241 (bmsr & BMSR_LSTATUS))
13242 goto skip_phy_reset;
13244 err = tg3_phy_reset(tp);
13245 if (err)
13246 return err;
13248 tg3_phy_set_wirespeed(tp);
13250 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13251 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13252 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13253 if (!tg3_copper_is_advertising_all(tp, mask)) {
13254 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13255 tp->link_config.flowctrl);
13257 tg3_writephy(tp, MII_BMCR,
13258 BMCR_ANENABLE | BMCR_ANRESTART);
13262 skip_phy_reset:
13263 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13264 err = tg3_init_5401phy_dsp(tp);
13265 if (err)
13266 return err;
13268 err = tg3_init_5401phy_dsp(tp);
13271 return err;
13274 static void __devinit tg3_read_vpd(struct tg3 *tp)
13276 u8 *vpd_data;
13277 unsigned int block_end, rosize, len;
13278 u32 vpdlen;
13279 int j, i = 0;
13281 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13282 if (!vpd_data)
13283 goto out_no_vpd;
13285 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13286 if (i < 0)
13287 goto out_not_found;
13289 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13290 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13291 i += PCI_VPD_LRDT_TAG_SIZE;
13293 if (block_end > vpdlen)
13294 goto out_not_found;
13296 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13297 PCI_VPD_RO_KEYWORD_MFR_ID);
13298 if (j > 0) {
13299 len = pci_vpd_info_field_size(&vpd_data[j]);
13301 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13302 if (j + len > block_end || len != 4 ||
13303 memcmp(&vpd_data[j], "1028", 4))
13304 goto partno;
13306 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13307 PCI_VPD_RO_KEYWORD_VENDOR0);
13308 if (j < 0)
13309 goto partno;
13311 len = pci_vpd_info_field_size(&vpd_data[j]);
13313 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13314 if (j + len > block_end)
13315 goto partno;
13317 memcpy(tp->fw_ver, &vpd_data[j], len);
13318 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13321 partno:
13322 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13323 PCI_VPD_RO_KEYWORD_PARTNO);
13324 if (i < 0)
13325 goto out_not_found;
13327 len = pci_vpd_info_field_size(&vpd_data[i]);
13329 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13330 if (len > TG3_BPN_SIZE ||
13331 (len + i) > vpdlen)
13332 goto out_not_found;
13334 memcpy(tp->board_part_number, &vpd_data[i], len);
13336 out_not_found:
13337 kfree(vpd_data);
13338 if (tp->board_part_number[0])
13339 return;
13341 out_no_vpd:
13342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13343 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13344 strcpy(tp->board_part_number, "BCM5717");
13345 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13346 strcpy(tp->board_part_number, "BCM5718");
13347 else
13348 goto nomatch;
13349 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13350 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13351 strcpy(tp->board_part_number, "BCM57780");
13352 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13353 strcpy(tp->board_part_number, "BCM57760");
13354 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13355 strcpy(tp->board_part_number, "BCM57790");
13356 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13357 strcpy(tp->board_part_number, "BCM57788");
13358 else
13359 goto nomatch;
13360 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13361 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13362 strcpy(tp->board_part_number, "BCM57761");
13363 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13364 strcpy(tp->board_part_number, "BCM57765");
13365 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13366 strcpy(tp->board_part_number, "BCM57781");
13367 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13368 strcpy(tp->board_part_number, "BCM57785");
13369 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13370 strcpy(tp->board_part_number, "BCM57791");
13371 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13372 strcpy(tp->board_part_number, "BCM57795");
13373 else
13374 goto nomatch;
13375 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13376 strcpy(tp->board_part_number, "BCM95906");
13377 } else {
13378 nomatch:
13379 strcpy(tp->board_part_number, "none");
13383 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13385 u32 val;
13387 if (tg3_nvram_read(tp, offset, &val) ||
13388 (val & 0xfc000000) != 0x0c000000 ||
13389 tg3_nvram_read(tp, offset + 4, &val) ||
13390 val != 0)
13391 return 0;
13393 return 1;
13396 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13398 u32 val, offset, start, ver_offset;
13399 int i, dst_off;
13400 bool newver = false;
13402 if (tg3_nvram_read(tp, 0xc, &offset) ||
13403 tg3_nvram_read(tp, 0x4, &start))
13404 return;
13406 offset = tg3_nvram_logical_addr(tp, offset);
13408 if (tg3_nvram_read(tp, offset, &val))
13409 return;
13411 if ((val & 0xfc000000) == 0x0c000000) {
13412 if (tg3_nvram_read(tp, offset + 4, &val))
13413 return;
13415 if (val == 0)
13416 newver = true;
13419 dst_off = strlen(tp->fw_ver);
13421 if (newver) {
13422 if (TG3_VER_SIZE - dst_off < 16 ||
13423 tg3_nvram_read(tp, offset + 8, &ver_offset))
13424 return;
13426 offset = offset + ver_offset - start;
13427 for (i = 0; i < 16; i += 4) {
13428 __be32 v;
13429 if (tg3_nvram_read_be32(tp, offset + i, &v))
13430 return;
13432 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13434 } else {
13435 u32 major, minor;
13437 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13438 return;
13440 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13441 TG3_NVM_BCVER_MAJSFT;
13442 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13443 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13444 "v%d.%02d", major, minor);
13448 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13450 u32 val, major, minor;
13452 /* Use native endian representation */
13453 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13454 return;
13456 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13457 TG3_NVM_HWSB_CFG1_MAJSFT;
13458 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13459 TG3_NVM_HWSB_CFG1_MINSFT;
13461 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13464 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13466 u32 offset, major, minor, build;
13468 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13470 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13471 return;
13473 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13474 case TG3_EEPROM_SB_REVISION_0:
13475 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13476 break;
13477 case TG3_EEPROM_SB_REVISION_2:
13478 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13479 break;
13480 case TG3_EEPROM_SB_REVISION_3:
13481 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13482 break;
13483 case TG3_EEPROM_SB_REVISION_4:
13484 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13485 break;
13486 case TG3_EEPROM_SB_REVISION_5:
13487 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13488 break;
13489 case TG3_EEPROM_SB_REVISION_6:
13490 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13491 break;
13492 default:
13493 return;
13496 if (tg3_nvram_read(tp, offset, &val))
13497 return;
13499 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13500 TG3_EEPROM_SB_EDH_BLD_SHFT;
13501 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13502 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13503 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13505 if (minor > 99 || build > 26)
13506 return;
13508 offset = strlen(tp->fw_ver);
13509 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13510 " v%d.%02d", major, minor);
13512 if (build > 0) {
13513 offset = strlen(tp->fw_ver);
13514 if (offset < TG3_VER_SIZE - 1)
13515 tp->fw_ver[offset] = 'a' + build - 1;
13519 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13521 u32 val, offset, start;
13522 int i, vlen;
13524 for (offset = TG3_NVM_DIR_START;
13525 offset < TG3_NVM_DIR_END;
13526 offset += TG3_NVM_DIRENT_SIZE) {
13527 if (tg3_nvram_read(tp, offset, &val))
13528 return;
13530 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13531 break;
13534 if (offset == TG3_NVM_DIR_END)
13535 return;
13537 if (!tg3_flag(tp, 5705_PLUS))
13538 start = 0x08000000;
13539 else if (tg3_nvram_read(tp, offset - 4, &start))
13540 return;
13542 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13543 !tg3_fw_img_is_valid(tp, offset) ||
13544 tg3_nvram_read(tp, offset + 8, &val))
13545 return;
13547 offset += val - start;
13549 vlen = strlen(tp->fw_ver);
13551 tp->fw_ver[vlen++] = ',';
13552 tp->fw_ver[vlen++] = ' ';
13554 for (i = 0; i < 4; i++) {
13555 __be32 v;
13556 if (tg3_nvram_read_be32(tp, offset, &v))
13557 return;
13559 offset += sizeof(v);
13561 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13562 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13563 break;
13566 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13567 vlen += sizeof(v);
13571 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13573 int vlen;
13574 u32 apedata;
13575 char *fwtype;
13577 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13578 return;
13580 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13581 if (apedata != APE_SEG_SIG_MAGIC)
13582 return;
13584 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13585 if (!(apedata & APE_FW_STATUS_READY))
13586 return;
13588 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13590 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13591 tg3_flag_set(tp, APE_HAS_NCSI);
13592 fwtype = "NCSI";
13593 } else {
13594 fwtype = "DASH";
13597 vlen = strlen(tp->fw_ver);
13599 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13600 fwtype,
13601 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13602 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13603 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13604 (apedata & APE_FW_VERSION_BLDMSK));
13607 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13609 u32 val;
13610 bool vpd_vers = false;
13612 if (tp->fw_ver[0] != 0)
13613 vpd_vers = true;
13615 if (tg3_flag(tp, NO_NVRAM)) {
13616 strcat(tp->fw_ver, "sb");
13617 return;
13620 if (tg3_nvram_read(tp, 0, &val))
13621 return;
13623 if (val == TG3_EEPROM_MAGIC)
13624 tg3_read_bc_ver(tp);
13625 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13626 tg3_read_sb_ver(tp, val);
13627 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13628 tg3_read_hwsb_ver(tp);
13629 else
13630 return;
13632 if (vpd_vers)
13633 goto done;
13635 if (tg3_flag(tp, ENABLE_APE)) {
13636 if (tg3_flag(tp, ENABLE_ASF))
13637 tg3_read_dash_ver(tp);
13638 } else if (tg3_flag(tp, ENABLE_ASF)) {
13639 tg3_read_mgmtfw_ver(tp);
13642 done:
13643 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13646 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13648 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13650 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13651 return TG3_RX_RET_MAX_SIZE_5717;
13652 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13653 return TG3_RX_RET_MAX_SIZE_5700;
13654 else
13655 return TG3_RX_RET_MAX_SIZE_5705;
13658 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13659 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13660 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13661 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13662 { },
13665 static int __devinit tg3_get_invariants(struct tg3 *tp)
13667 u32 misc_ctrl_reg;
13668 u32 pci_state_reg, grc_misc_cfg;
13669 u32 val;
13670 u16 pci_cmd;
13671 int err;
13673 /* Force memory write invalidate off. If we leave it on,
13674 * then on 5700_BX chips we have to enable a workaround.
13675 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13676 * to match the cacheline size. The Broadcom driver have this
13677 * workaround but turns MWI off all the times so never uses
13678 * it. This seems to suggest that the workaround is insufficient.
13680 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13681 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13682 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13684 /* Important! -- Make sure register accesses are byteswapped
13685 * correctly. Also, for those chips that require it, make
13686 * sure that indirect register accesses are enabled before
13687 * the first operation.
13689 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13690 &misc_ctrl_reg);
13691 tp->misc_host_ctrl |= (misc_ctrl_reg &
13692 MISC_HOST_CTRL_CHIPREV);
13693 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13694 tp->misc_host_ctrl);
13696 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13697 MISC_HOST_CTRL_CHIPREV_SHIFT);
13698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13699 u32 prod_id_asic_rev;
13701 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13702 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13703 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13704 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13705 pci_read_config_dword(tp->pdev,
13706 TG3PCI_GEN2_PRODID_ASICREV,
13707 &prod_id_asic_rev);
13708 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13709 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13710 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13711 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13712 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13713 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13714 pci_read_config_dword(tp->pdev,
13715 TG3PCI_GEN15_PRODID_ASICREV,
13716 &prod_id_asic_rev);
13717 else
13718 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13719 &prod_id_asic_rev);
13721 tp->pci_chip_rev_id = prod_id_asic_rev;
13724 /* Wrong chip ID in 5752 A0. This code can be removed later
13725 * as A0 is not in production.
13727 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13728 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13730 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13731 * we need to disable memory and use config. cycles
13732 * only to access all registers. The 5702/03 chips
13733 * can mistakenly decode the special cycles from the
13734 * ICH chipsets as memory write cycles, causing corruption
13735 * of register and memory space. Only certain ICH bridges
13736 * will drive special cycles with non-zero data during the
13737 * address phase which can fall within the 5703's address
13738 * range. This is not an ICH bug as the PCI spec allows
13739 * non-zero address during special cycles. However, only
13740 * these ICH bridges are known to drive non-zero addresses
13741 * during special cycles.
13743 * Since special cycles do not cross PCI bridges, we only
13744 * enable this workaround if the 5703 is on the secondary
13745 * bus of these ICH bridges.
13747 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13748 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13749 static struct tg3_dev_id {
13750 u32 vendor;
13751 u32 device;
13752 u32 rev;
13753 } ich_chipsets[] = {
13754 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13755 PCI_ANY_ID },
13756 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13757 PCI_ANY_ID },
13758 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13759 0xa },
13760 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13761 PCI_ANY_ID },
13762 { },
13764 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13765 struct pci_dev *bridge = NULL;
13767 while (pci_id->vendor != 0) {
13768 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13769 bridge);
13770 if (!bridge) {
13771 pci_id++;
13772 continue;
13774 if (pci_id->rev != PCI_ANY_ID) {
13775 if (bridge->revision > pci_id->rev)
13776 continue;
13778 if (bridge->subordinate &&
13779 (bridge->subordinate->number ==
13780 tp->pdev->bus->number)) {
13781 tg3_flag_set(tp, ICH_WORKAROUND);
13782 pci_dev_put(bridge);
13783 break;
13788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13789 static struct tg3_dev_id {
13790 u32 vendor;
13791 u32 device;
13792 } bridge_chipsets[] = {
13793 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13794 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13795 { },
13797 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13798 struct pci_dev *bridge = NULL;
13800 while (pci_id->vendor != 0) {
13801 bridge = pci_get_device(pci_id->vendor,
13802 pci_id->device,
13803 bridge);
13804 if (!bridge) {
13805 pci_id++;
13806 continue;
13808 if (bridge->subordinate &&
13809 (bridge->subordinate->number <=
13810 tp->pdev->bus->number) &&
13811 (bridge->subordinate->subordinate >=
13812 tp->pdev->bus->number)) {
13813 tg3_flag_set(tp, 5701_DMA_BUG);
13814 pci_dev_put(bridge);
13815 break;
13820 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13821 * DMA addresses > 40-bit. This bridge may have other additional
13822 * 57xx devices behind it in some 4-port NIC designs for example.
13823 * Any tg3 device found behind the bridge will also need the 40-bit
13824 * DMA workaround.
13826 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13827 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13828 tg3_flag_set(tp, 5780_CLASS);
13829 tg3_flag_set(tp, 40BIT_DMA_BUG);
13830 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13831 } else {
13832 struct pci_dev *bridge = NULL;
13834 do {
13835 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13836 PCI_DEVICE_ID_SERVERWORKS_EPB,
13837 bridge);
13838 if (bridge && bridge->subordinate &&
13839 (bridge->subordinate->number <=
13840 tp->pdev->bus->number) &&
13841 (bridge->subordinate->subordinate >=
13842 tp->pdev->bus->number)) {
13843 tg3_flag_set(tp, 40BIT_DMA_BUG);
13844 pci_dev_put(bridge);
13845 break;
13847 } while (bridge);
13850 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13852 tp->pdev_peer = tg3_find_peer(tp);
13854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13856 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13857 tg3_flag_set(tp, 5717_PLUS);
13859 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13860 tg3_flag(tp, 5717_PLUS))
13861 tg3_flag_set(tp, 57765_PLUS);
13863 /* Intentionally exclude ASIC_REV_5906 */
13864 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13865 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13866 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13867 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13868 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13869 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13870 tg3_flag(tp, 57765_PLUS))
13871 tg3_flag_set(tp, 5755_PLUS);
13873 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13874 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13875 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13876 tg3_flag(tp, 5755_PLUS) ||
13877 tg3_flag(tp, 5780_CLASS))
13878 tg3_flag_set(tp, 5750_PLUS);
13880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13881 tg3_flag(tp, 5750_PLUS))
13882 tg3_flag_set(tp, 5705_PLUS);
13884 /* Determine TSO capabilities */
13885 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13886 ; /* Do nothing. HW bug. */
13887 else if (tg3_flag(tp, 57765_PLUS))
13888 tg3_flag_set(tp, HW_TSO_3);
13889 else if (tg3_flag(tp, 5755_PLUS) ||
13890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13891 tg3_flag_set(tp, HW_TSO_2);
13892 else if (tg3_flag(tp, 5750_PLUS)) {
13893 tg3_flag_set(tp, HW_TSO_1);
13894 tg3_flag_set(tp, TSO_BUG);
13895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13896 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13897 tg3_flag_clear(tp, TSO_BUG);
13898 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13899 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13900 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13901 tg3_flag_set(tp, TSO_BUG);
13902 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13903 tp->fw_needed = FIRMWARE_TG3TSO5;
13904 else
13905 tp->fw_needed = FIRMWARE_TG3TSO;
13908 /* Selectively allow TSO based on operating conditions */
13909 if (tg3_flag(tp, HW_TSO_1) ||
13910 tg3_flag(tp, HW_TSO_2) ||
13911 tg3_flag(tp, HW_TSO_3) ||
13912 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13913 tg3_flag_set(tp, TSO_CAPABLE);
13914 else {
13915 tg3_flag_clear(tp, TSO_CAPABLE);
13916 tg3_flag_clear(tp, TSO_BUG);
13917 tp->fw_needed = NULL;
13920 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13921 tp->fw_needed = FIRMWARE_TG3;
13923 tp->irq_max = 1;
13925 if (tg3_flag(tp, 5750_PLUS)) {
13926 tg3_flag_set(tp, SUPPORT_MSI);
13927 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13928 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13929 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13930 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13931 tp->pdev_peer == tp->pdev))
13932 tg3_flag_clear(tp, SUPPORT_MSI);
13934 if (tg3_flag(tp, 5755_PLUS) ||
13935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13936 tg3_flag_set(tp, 1SHOT_MSI);
13939 if (tg3_flag(tp, 57765_PLUS)) {
13940 tg3_flag_set(tp, SUPPORT_MSIX);
13941 tp->irq_max = TG3_IRQ_MAX_VECS;
13945 if (tg3_flag(tp, 5755_PLUS))
13946 tg3_flag_set(tp, SHORT_DMA_BUG);
13948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13949 tg3_flag_set(tp, 4K_FIFO_LIMIT);
13951 if (tg3_flag(tp, 5717_PLUS))
13952 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13954 if (tg3_flag(tp, 57765_PLUS) &&
13955 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
13956 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13958 if (!tg3_flag(tp, 5705_PLUS) ||
13959 tg3_flag(tp, 5780_CLASS) ||
13960 tg3_flag(tp, USE_JUMBO_BDFLAG))
13961 tg3_flag_set(tp, JUMBO_CAPABLE);
13963 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13964 &pci_state_reg);
13966 if (pci_is_pcie(tp->pdev)) {
13967 u16 lnkctl;
13969 tg3_flag_set(tp, PCI_EXPRESS);
13971 tp->pcie_readrq = 4096;
13972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13974 tp->pcie_readrq = 2048;
13976 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13978 pci_read_config_word(tp->pdev,
13979 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13980 &lnkctl);
13981 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13982 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13983 ASIC_REV_5906) {
13984 tg3_flag_clear(tp, HW_TSO_2);
13985 tg3_flag_clear(tp, TSO_CAPABLE);
13987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13988 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13989 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13990 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13991 tg3_flag_set(tp, CLKREQ_BUG);
13992 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13993 tg3_flag_set(tp, L1PLLPD_EN);
13995 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13996 /* BCM5785 devices are effectively PCIe devices, and should
13997 * follow PCIe codepaths, but do not have a PCIe capabilities
13998 * section.
14000 tg3_flag_set(tp, PCI_EXPRESS);
14001 } else if (!tg3_flag(tp, 5705_PLUS) ||
14002 tg3_flag(tp, 5780_CLASS)) {
14003 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14004 if (!tp->pcix_cap) {
14005 dev_err(&tp->pdev->dev,
14006 "Cannot find PCI-X capability, aborting\n");
14007 return -EIO;
14010 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14011 tg3_flag_set(tp, PCIX_MODE);
14014 /* If we have an AMD 762 or VIA K8T800 chipset, write
14015 * reordering to the mailbox registers done by the host
14016 * controller can cause major troubles. We read back from
14017 * every mailbox register write to force the writes to be
14018 * posted to the chip in order.
14020 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14021 !tg3_flag(tp, PCI_EXPRESS))
14022 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14024 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14025 &tp->pci_cacheline_sz);
14026 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14027 &tp->pci_lat_timer);
14028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14029 tp->pci_lat_timer < 64) {
14030 tp->pci_lat_timer = 64;
14031 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14032 tp->pci_lat_timer);
14035 /* Important! -- It is critical that the PCI-X hw workaround
14036 * situation is decided before the first MMIO register access.
14038 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14039 /* 5700 BX chips need to have their TX producer index
14040 * mailboxes written twice to workaround a bug.
14042 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14044 /* If we are in PCI-X mode, enable register write workaround.
14046 * The workaround is to use indirect register accesses
14047 * for all chip writes not to mailbox registers.
14049 if (tg3_flag(tp, PCIX_MODE)) {
14050 u32 pm_reg;
14052 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14054 /* The chip can have it's power management PCI config
14055 * space registers clobbered due to this bug.
14056 * So explicitly force the chip into D0 here.
14058 pci_read_config_dword(tp->pdev,
14059 tp->pm_cap + PCI_PM_CTRL,
14060 &pm_reg);
14061 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14062 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14063 pci_write_config_dword(tp->pdev,
14064 tp->pm_cap + PCI_PM_CTRL,
14065 pm_reg);
14067 /* Also, force SERR#/PERR# in PCI command. */
14068 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14069 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14070 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14074 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14075 tg3_flag_set(tp, PCI_HIGH_SPEED);
14076 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14077 tg3_flag_set(tp, PCI_32BIT);
14079 /* Chip-specific fixup from Broadcom driver */
14080 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14081 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14082 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14083 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14086 /* Default fast path register access methods */
14087 tp->read32 = tg3_read32;
14088 tp->write32 = tg3_write32;
14089 tp->read32_mbox = tg3_read32;
14090 tp->write32_mbox = tg3_write32;
14091 tp->write32_tx_mbox = tg3_write32;
14092 tp->write32_rx_mbox = tg3_write32;
14094 /* Various workaround register access methods */
14095 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14096 tp->write32 = tg3_write_indirect_reg32;
14097 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14098 (tg3_flag(tp, PCI_EXPRESS) &&
14099 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14101 * Back to back register writes can cause problems on these
14102 * chips, the workaround is to read back all reg writes
14103 * except those to mailbox regs.
14105 * See tg3_write_indirect_reg32().
14107 tp->write32 = tg3_write_flush_reg32;
14110 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14111 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14112 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14113 tp->write32_rx_mbox = tg3_write_flush_reg32;
14116 if (tg3_flag(tp, ICH_WORKAROUND)) {
14117 tp->read32 = tg3_read_indirect_reg32;
14118 tp->write32 = tg3_write_indirect_reg32;
14119 tp->read32_mbox = tg3_read_indirect_mbox;
14120 tp->write32_mbox = tg3_write_indirect_mbox;
14121 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14122 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14124 iounmap(tp->regs);
14125 tp->regs = NULL;
14127 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14128 pci_cmd &= ~PCI_COMMAND_MEMORY;
14129 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14131 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14132 tp->read32_mbox = tg3_read32_mbox_5906;
14133 tp->write32_mbox = tg3_write32_mbox_5906;
14134 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14135 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14138 if (tp->write32 == tg3_write_indirect_reg32 ||
14139 (tg3_flag(tp, PCIX_MODE) &&
14140 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14141 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14142 tg3_flag_set(tp, SRAM_USE_CONFIG);
14144 /* The memory arbiter has to be enabled in order for SRAM accesses
14145 * to succeed. Normally on powerup the tg3 chip firmware will make
14146 * sure it is enabled, but other entities such as system netboot
14147 * code might disable it.
14149 val = tr32(MEMARB_MODE);
14150 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14152 if (tg3_flag(tp, PCIX_MODE)) {
14153 pci_read_config_dword(tp->pdev,
14154 tp->pcix_cap + PCI_X_STATUS, &val);
14155 tp->pci_fn = val & 0x7;
14156 } else {
14157 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14160 /* Get eeprom hw config before calling tg3_set_power_state().
14161 * In particular, the TG3_FLAG_IS_NIC flag must be
14162 * determined before calling tg3_set_power_state() so that
14163 * we know whether or not to switch out of Vaux power.
14164 * When the flag is set, it means that GPIO1 is used for eeprom
14165 * write protect and also implies that it is a LOM where GPIOs
14166 * are not used to switch power.
14168 tg3_get_eeprom_hw_cfg(tp);
14170 if (tg3_flag(tp, ENABLE_APE)) {
14171 /* Allow reads and writes to the
14172 * APE register and memory space.
14174 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14175 PCISTATE_ALLOW_APE_SHMEM_WR |
14176 PCISTATE_ALLOW_APE_PSPACE_WR;
14177 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14178 pci_state_reg);
14180 tg3_ape_lock_init(tp);
14183 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14184 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14186 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14187 tg3_flag(tp, 57765_PLUS))
14188 tg3_flag_set(tp, CPMU_PRESENT);
14190 /* Set up tp->grc_local_ctrl before calling
14191 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14192 * will bring 5700's external PHY out of reset.
14193 * It is also used as eeprom write protect on LOMs.
14195 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14197 tg3_flag(tp, EEPROM_WRITE_PROT))
14198 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14199 GRC_LCLCTRL_GPIO_OUTPUT1);
14200 /* Unused GPIO3 must be driven as output on 5752 because there
14201 * are no pull-up resistors on unused GPIO pins.
14203 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14204 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14206 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14207 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14208 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14209 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14211 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14212 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14213 /* Turn off the debug UART. */
14214 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14215 if (tg3_flag(tp, IS_NIC))
14216 /* Keep VMain power. */
14217 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14218 GRC_LCLCTRL_GPIO_OUTPUT0;
14221 /* Switch out of Vaux if it is a NIC */
14222 tg3_pwrsrc_switch_to_vmain(tp);
14224 /* Derive initial jumbo mode from MTU assigned in
14225 * ether_setup() via the alloc_etherdev() call
14227 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14228 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14230 /* Determine WakeOnLan speed to use. */
14231 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14232 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14233 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14234 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14235 tg3_flag_clear(tp, WOL_SPEED_100MB);
14236 } else {
14237 tg3_flag_set(tp, WOL_SPEED_100MB);
14240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14241 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14243 /* A few boards don't want Ethernet@WireSpeed phy feature */
14244 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14245 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14246 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14247 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14248 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14249 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14250 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14252 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14253 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14254 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14255 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14256 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14258 if (tg3_flag(tp, 5705_PLUS) &&
14259 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14260 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14261 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14262 !tg3_flag(tp, 57765_PLUS)) {
14263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14267 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14268 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14269 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14270 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14271 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14272 } else
14273 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14277 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14278 tp->phy_otp = tg3_read_otp_phycfg(tp);
14279 if (tp->phy_otp == 0)
14280 tp->phy_otp = TG3_OTP_DEFAULT;
14283 if (tg3_flag(tp, CPMU_PRESENT))
14284 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14285 else
14286 tp->mi_mode = MAC_MI_MODE_BASE;
14288 tp->coalesce_mode = 0;
14289 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14290 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14291 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14293 /* Set these bits to enable statistics workaround. */
14294 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14295 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14296 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14297 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14298 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14302 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14303 tg3_flag_set(tp, USE_PHYLIB);
14305 err = tg3_mdio_init(tp);
14306 if (err)
14307 return err;
14309 /* Initialize data/descriptor byte/word swapping. */
14310 val = tr32(GRC_MODE);
14311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14312 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14313 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14314 GRC_MODE_B2HRX_ENABLE |
14315 GRC_MODE_HTX2B_ENABLE |
14316 GRC_MODE_HOST_STACKUP);
14317 else
14318 val &= GRC_MODE_HOST_STACKUP;
14320 tw32(GRC_MODE, val | tp->grc_mode);
14322 tg3_switch_clocks(tp);
14324 /* Clear this out for sanity. */
14325 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14327 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14328 &pci_state_reg);
14329 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14330 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14331 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14333 if (chiprevid == CHIPREV_ID_5701_A0 ||
14334 chiprevid == CHIPREV_ID_5701_B0 ||
14335 chiprevid == CHIPREV_ID_5701_B2 ||
14336 chiprevid == CHIPREV_ID_5701_B5) {
14337 void __iomem *sram_base;
14339 /* Write some dummy words into the SRAM status block
14340 * area, see if it reads back correctly. If the return
14341 * value is bad, force enable the PCIX workaround.
14343 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14345 writel(0x00000000, sram_base);
14346 writel(0x00000000, sram_base + 4);
14347 writel(0xffffffff, sram_base + 4);
14348 if (readl(sram_base) != 0x00000000)
14349 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14353 udelay(50);
14354 tg3_nvram_init(tp);
14356 grc_misc_cfg = tr32(GRC_MISC_CFG);
14357 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14359 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14360 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14361 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14362 tg3_flag_set(tp, IS_5788);
14364 if (!tg3_flag(tp, IS_5788) &&
14365 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14366 tg3_flag_set(tp, TAGGED_STATUS);
14367 if (tg3_flag(tp, TAGGED_STATUS)) {
14368 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14369 HOSTCC_MODE_CLRTICK_TXBD);
14371 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14372 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14373 tp->misc_host_ctrl);
14376 /* Preserve the APE MAC_MODE bits */
14377 if (tg3_flag(tp, ENABLE_APE))
14378 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14379 else
14380 tp->mac_mode = 0;
14382 /* these are limited to 10/100 only */
14383 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14384 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14385 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14386 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14387 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14388 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14389 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14390 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14391 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14392 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14393 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14394 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14395 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14396 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14397 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14398 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14400 err = tg3_phy_probe(tp);
14401 if (err) {
14402 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14403 /* ... but do not return immediately ... */
14404 tg3_mdio_fini(tp);
14407 tg3_read_vpd(tp);
14408 tg3_read_fw_ver(tp);
14410 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14411 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14412 } else {
14413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14414 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14415 else
14416 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14419 /* 5700 {AX,BX} chips have a broken status block link
14420 * change bit implementation, so we must use the
14421 * status register in those cases.
14423 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14424 tg3_flag_set(tp, USE_LINKCHG_REG);
14425 else
14426 tg3_flag_clear(tp, USE_LINKCHG_REG);
14428 /* The led_ctrl is set during tg3_phy_probe, here we might
14429 * have to force the link status polling mechanism based
14430 * upon subsystem IDs.
14432 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14433 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14434 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14435 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14436 tg3_flag_set(tp, USE_LINKCHG_REG);
14439 /* For all SERDES we poll the MAC status register. */
14440 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14441 tg3_flag_set(tp, POLL_SERDES);
14442 else
14443 tg3_flag_clear(tp, POLL_SERDES);
14445 tp->rx_offset = NET_IP_ALIGN;
14446 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14448 tg3_flag(tp, PCIX_MODE)) {
14449 tp->rx_offset = 0;
14450 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14451 tp->rx_copy_thresh = ~(u16)0;
14452 #endif
14455 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14456 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14457 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14459 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14461 /* Increment the rx prod index on the rx std ring by at most
14462 * 8 for these chips to workaround hw errata.
14464 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14466 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14467 tp->rx_std_max_post = 8;
14469 if (tg3_flag(tp, ASPM_WORKAROUND))
14470 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14471 PCIE_PWR_MGMT_L1_THRESH_MSK;
14473 return err;
14476 #ifdef CONFIG_SPARC
14477 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14479 struct net_device *dev = tp->dev;
14480 struct pci_dev *pdev = tp->pdev;
14481 struct device_node *dp = pci_device_to_OF_node(pdev);
14482 const unsigned char *addr;
14483 int len;
14485 addr = of_get_property(dp, "local-mac-address", &len);
14486 if (addr && len == 6) {
14487 memcpy(dev->dev_addr, addr, 6);
14488 memcpy(dev->perm_addr, dev->dev_addr, 6);
14489 return 0;
14491 return -ENODEV;
14494 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14496 struct net_device *dev = tp->dev;
14498 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14499 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14500 return 0;
14502 #endif
14504 static int __devinit tg3_get_device_address(struct tg3 *tp)
14506 struct net_device *dev = tp->dev;
14507 u32 hi, lo, mac_offset;
14508 int addr_ok = 0;
14510 #ifdef CONFIG_SPARC
14511 if (!tg3_get_macaddr_sparc(tp))
14512 return 0;
14513 #endif
14515 mac_offset = 0x7c;
14516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14517 tg3_flag(tp, 5780_CLASS)) {
14518 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14519 mac_offset = 0xcc;
14520 if (tg3_nvram_lock(tp))
14521 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14522 else
14523 tg3_nvram_unlock(tp);
14524 } else if (tg3_flag(tp, 5717_PLUS)) {
14525 if (tp->pci_fn & 1)
14526 mac_offset = 0xcc;
14527 if (tp->pci_fn > 1)
14528 mac_offset += 0x18c;
14529 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14530 mac_offset = 0x10;
14532 /* First try to get it from MAC address mailbox. */
14533 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14534 if ((hi >> 16) == 0x484b) {
14535 dev->dev_addr[0] = (hi >> 8) & 0xff;
14536 dev->dev_addr[1] = (hi >> 0) & 0xff;
14538 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14539 dev->dev_addr[2] = (lo >> 24) & 0xff;
14540 dev->dev_addr[3] = (lo >> 16) & 0xff;
14541 dev->dev_addr[4] = (lo >> 8) & 0xff;
14542 dev->dev_addr[5] = (lo >> 0) & 0xff;
14544 /* Some old bootcode may report a 0 MAC address in SRAM */
14545 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14547 if (!addr_ok) {
14548 /* Next, try NVRAM. */
14549 if (!tg3_flag(tp, NO_NVRAM) &&
14550 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14551 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14552 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14553 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14555 /* Finally just fetch it out of the MAC control regs. */
14556 else {
14557 hi = tr32(MAC_ADDR_0_HIGH);
14558 lo = tr32(MAC_ADDR_0_LOW);
14560 dev->dev_addr[5] = lo & 0xff;
14561 dev->dev_addr[4] = (lo >> 8) & 0xff;
14562 dev->dev_addr[3] = (lo >> 16) & 0xff;
14563 dev->dev_addr[2] = (lo >> 24) & 0xff;
14564 dev->dev_addr[1] = hi & 0xff;
14565 dev->dev_addr[0] = (hi >> 8) & 0xff;
14569 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14570 #ifdef CONFIG_SPARC
14571 if (!tg3_get_default_macaddr_sparc(tp))
14572 return 0;
14573 #endif
14574 return -EINVAL;
14576 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14577 return 0;
14580 #define BOUNDARY_SINGLE_CACHELINE 1
14581 #define BOUNDARY_MULTI_CACHELINE 2
14583 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14585 int cacheline_size;
14586 u8 byte;
14587 int goal;
14589 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14590 if (byte == 0)
14591 cacheline_size = 1024;
14592 else
14593 cacheline_size = (int) byte * 4;
14595 /* On 5703 and later chips, the boundary bits have no
14596 * effect.
14598 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14599 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14600 !tg3_flag(tp, PCI_EXPRESS))
14601 goto out;
14603 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14604 goal = BOUNDARY_MULTI_CACHELINE;
14605 #else
14606 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14607 goal = BOUNDARY_SINGLE_CACHELINE;
14608 #else
14609 goal = 0;
14610 #endif
14611 #endif
14613 if (tg3_flag(tp, 57765_PLUS)) {
14614 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14615 goto out;
14618 if (!goal)
14619 goto out;
14621 /* PCI controllers on most RISC systems tend to disconnect
14622 * when a device tries to burst across a cache-line boundary.
14623 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14625 * Unfortunately, for PCI-E there are only limited
14626 * write-side controls for this, and thus for reads
14627 * we will still get the disconnects. We'll also waste
14628 * these PCI cycles for both read and write for chips
14629 * other than 5700 and 5701 which do not implement the
14630 * boundary bits.
14632 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14633 switch (cacheline_size) {
14634 case 16:
14635 case 32:
14636 case 64:
14637 case 128:
14638 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14639 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14640 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14641 } else {
14642 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14643 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14645 break;
14647 case 256:
14648 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14649 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14650 break;
14652 default:
14653 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14654 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14655 break;
14657 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14658 switch (cacheline_size) {
14659 case 16:
14660 case 32:
14661 case 64:
14662 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14663 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14664 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14665 break;
14667 /* fallthrough */
14668 case 128:
14669 default:
14670 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14671 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14672 break;
14674 } else {
14675 switch (cacheline_size) {
14676 case 16:
14677 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14678 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14679 DMA_RWCTRL_WRITE_BNDRY_16);
14680 break;
14682 /* fallthrough */
14683 case 32:
14684 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14685 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14686 DMA_RWCTRL_WRITE_BNDRY_32);
14687 break;
14689 /* fallthrough */
14690 case 64:
14691 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14692 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14693 DMA_RWCTRL_WRITE_BNDRY_64);
14694 break;
14696 /* fallthrough */
14697 case 128:
14698 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14699 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14700 DMA_RWCTRL_WRITE_BNDRY_128);
14701 break;
14703 /* fallthrough */
14704 case 256:
14705 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14706 DMA_RWCTRL_WRITE_BNDRY_256);
14707 break;
14708 case 512:
14709 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14710 DMA_RWCTRL_WRITE_BNDRY_512);
14711 break;
14712 case 1024:
14713 default:
14714 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14715 DMA_RWCTRL_WRITE_BNDRY_1024);
14716 break;
14720 out:
14721 return val;
14724 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14726 struct tg3_internal_buffer_desc test_desc;
14727 u32 sram_dma_descs;
14728 int i, ret;
14730 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14732 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14733 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14734 tw32(RDMAC_STATUS, 0);
14735 tw32(WDMAC_STATUS, 0);
14737 tw32(BUFMGR_MODE, 0);
14738 tw32(FTQ_RESET, 0);
14740 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14741 test_desc.addr_lo = buf_dma & 0xffffffff;
14742 test_desc.nic_mbuf = 0x00002100;
14743 test_desc.len = size;
14746 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14747 * the *second* time the tg3 driver was getting loaded after an
14748 * initial scan.
14750 * Broadcom tells me:
14751 * ...the DMA engine is connected to the GRC block and a DMA
14752 * reset may affect the GRC block in some unpredictable way...
14753 * The behavior of resets to individual blocks has not been tested.
14755 * Broadcom noted the GRC reset will also reset all sub-components.
14757 if (to_device) {
14758 test_desc.cqid_sqid = (13 << 8) | 2;
14760 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14761 udelay(40);
14762 } else {
14763 test_desc.cqid_sqid = (16 << 8) | 7;
14765 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14766 udelay(40);
14768 test_desc.flags = 0x00000005;
14770 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14771 u32 val;
14773 val = *(((u32 *)&test_desc) + i);
14774 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14775 sram_dma_descs + (i * sizeof(u32)));
14776 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14778 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14780 if (to_device)
14781 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14782 else
14783 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14785 ret = -ENODEV;
14786 for (i = 0; i < 40; i++) {
14787 u32 val;
14789 if (to_device)
14790 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14791 else
14792 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14793 if ((val & 0xffff) == sram_dma_descs) {
14794 ret = 0;
14795 break;
14798 udelay(100);
14801 return ret;
14804 #define TEST_BUFFER_SIZE 0x2000
14806 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14807 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14808 { },
14811 static int __devinit tg3_test_dma(struct tg3 *tp)
14813 dma_addr_t buf_dma;
14814 u32 *buf, saved_dma_rwctrl;
14815 int ret = 0;
14817 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14818 &buf_dma, GFP_KERNEL);
14819 if (!buf) {
14820 ret = -ENOMEM;
14821 goto out_nofree;
14824 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14825 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14827 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14829 if (tg3_flag(tp, 57765_PLUS))
14830 goto out;
14832 if (tg3_flag(tp, PCI_EXPRESS)) {
14833 /* DMA read watermark not used on PCIE */
14834 tp->dma_rwctrl |= 0x00180000;
14835 } else if (!tg3_flag(tp, PCIX_MODE)) {
14836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14837 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14838 tp->dma_rwctrl |= 0x003f0000;
14839 else
14840 tp->dma_rwctrl |= 0x003f000f;
14841 } else {
14842 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14844 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14845 u32 read_water = 0x7;
14847 /* If the 5704 is behind the EPB bridge, we can
14848 * do the less restrictive ONE_DMA workaround for
14849 * better performance.
14851 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14853 tp->dma_rwctrl |= 0x8000;
14854 else if (ccval == 0x6 || ccval == 0x7)
14855 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14857 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14858 read_water = 4;
14859 /* Set bit 23 to enable PCIX hw bug fix */
14860 tp->dma_rwctrl |=
14861 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14862 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14863 (1 << 23);
14864 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14865 /* 5780 always in PCIX mode */
14866 tp->dma_rwctrl |= 0x00144000;
14867 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14868 /* 5714 always in PCIX mode */
14869 tp->dma_rwctrl |= 0x00148000;
14870 } else {
14871 tp->dma_rwctrl |= 0x001b000f;
14875 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14876 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14877 tp->dma_rwctrl &= 0xfffffff0;
14879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14880 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14881 /* Remove this if it causes problems for some boards. */
14882 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14884 /* On 5700/5701 chips, we need to set this bit.
14885 * Otherwise the chip will issue cacheline transactions
14886 * to streamable DMA memory with not all the byte
14887 * enables turned on. This is an error on several
14888 * RISC PCI controllers, in particular sparc64.
14890 * On 5703/5704 chips, this bit has been reassigned
14891 * a different meaning. In particular, it is used
14892 * on those chips to enable a PCI-X workaround.
14894 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14897 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14899 #if 0
14900 /* Unneeded, already done by tg3_get_invariants. */
14901 tg3_switch_clocks(tp);
14902 #endif
14904 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14905 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14906 goto out;
14908 /* It is best to perform DMA test with maximum write burst size
14909 * to expose the 5700/5701 write DMA bug.
14911 saved_dma_rwctrl = tp->dma_rwctrl;
14912 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14913 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14915 while (1) {
14916 u32 *p = buf, i;
14918 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14919 p[i] = i;
14921 /* Send the buffer to the chip. */
14922 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14923 if (ret) {
14924 dev_err(&tp->pdev->dev,
14925 "%s: Buffer write failed. err = %d\n",
14926 __func__, ret);
14927 break;
14930 #if 0
14931 /* validate data reached card RAM correctly. */
14932 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14933 u32 val;
14934 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14935 if (le32_to_cpu(val) != p[i]) {
14936 dev_err(&tp->pdev->dev,
14937 "%s: Buffer corrupted on device! "
14938 "(%d != %d)\n", __func__, val, i);
14939 /* ret = -ENODEV here? */
14941 p[i] = 0;
14943 #endif
14944 /* Now read it back. */
14945 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14946 if (ret) {
14947 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14948 "err = %d\n", __func__, ret);
14949 break;
14952 /* Verify it. */
14953 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14954 if (p[i] == i)
14955 continue;
14957 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14958 DMA_RWCTRL_WRITE_BNDRY_16) {
14959 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14960 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14961 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14962 break;
14963 } else {
14964 dev_err(&tp->pdev->dev,
14965 "%s: Buffer corrupted on read back! "
14966 "(%d != %d)\n", __func__, p[i], i);
14967 ret = -ENODEV;
14968 goto out;
14972 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14973 /* Success. */
14974 ret = 0;
14975 break;
14978 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14979 DMA_RWCTRL_WRITE_BNDRY_16) {
14980 /* DMA test passed without adjusting DMA boundary,
14981 * now look for chipsets that are known to expose the
14982 * DMA bug without failing the test.
14984 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14985 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14986 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14987 } else {
14988 /* Safe to use the calculated DMA boundary. */
14989 tp->dma_rwctrl = saved_dma_rwctrl;
14992 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14995 out:
14996 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14997 out_nofree:
14998 return ret;
15001 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15003 if (tg3_flag(tp, 57765_PLUS)) {
15004 tp->bufmgr_config.mbuf_read_dma_low_water =
15005 DEFAULT_MB_RDMA_LOW_WATER_5705;
15006 tp->bufmgr_config.mbuf_mac_rx_low_water =
15007 DEFAULT_MB_MACRX_LOW_WATER_57765;
15008 tp->bufmgr_config.mbuf_high_water =
15009 DEFAULT_MB_HIGH_WATER_57765;
15011 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15012 DEFAULT_MB_RDMA_LOW_WATER_5705;
15013 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15014 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15015 tp->bufmgr_config.mbuf_high_water_jumbo =
15016 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15017 } else if (tg3_flag(tp, 5705_PLUS)) {
15018 tp->bufmgr_config.mbuf_read_dma_low_water =
15019 DEFAULT_MB_RDMA_LOW_WATER_5705;
15020 tp->bufmgr_config.mbuf_mac_rx_low_water =
15021 DEFAULT_MB_MACRX_LOW_WATER_5705;
15022 tp->bufmgr_config.mbuf_high_water =
15023 DEFAULT_MB_HIGH_WATER_5705;
15024 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15025 tp->bufmgr_config.mbuf_mac_rx_low_water =
15026 DEFAULT_MB_MACRX_LOW_WATER_5906;
15027 tp->bufmgr_config.mbuf_high_water =
15028 DEFAULT_MB_HIGH_WATER_5906;
15031 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15032 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15033 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15034 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15035 tp->bufmgr_config.mbuf_high_water_jumbo =
15036 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15037 } else {
15038 tp->bufmgr_config.mbuf_read_dma_low_water =
15039 DEFAULT_MB_RDMA_LOW_WATER;
15040 tp->bufmgr_config.mbuf_mac_rx_low_water =
15041 DEFAULT_MB_MACRX_LOW_WATER;
15042 tp->bufmgr_config.mbuf_high_water =
15043 DEFAULT_MB_HIGH_WATER;
15045 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15046 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15047 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15048 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15049 tp->bufmgr_config.mbuf_high_water_jumbo =
15050 DEFAULT_MB_HIGH_WATER_JUMBO;
15053 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15054 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15057 static char * __devinit tg3_phy_string(struct tg3 *tp)
15059 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15060 case TG3_PHY_ID_BCM5400: return "5400";
15061 case TG3_PHY_ID_BCM5401: return "5401";
15062 case TG3_PHY_ID_BCM5411: return "5411";
15063 case TG3_PHY_ID_BCM5701: return "5701";
15064 case TG3_PHY_ID_BCM5703: return "5703";
15065 case TG3_PHY_ID_BCM5704: return "5704";
15066 case TG3_PHY_ID_BCM5705: return "5705";
15067 case TG3_PHY_ID_BCM5750: return "5750";
15068 case TG3_PHY_ID_BCM5752: return "5752";
15069 case TG3_PHY_ID_BCM5714: return "5714";
15070 case TG3_PHY_ID_BCM5780: return "5780";
15071 case TG3_PHY_ID_BCM5755: return "5755";
15072 case TG3_PHY_ID_BCM5787: return "5787";
15073 case TG3_PHY_ID_BCM5784: return "5784";
15074 case TG3_PHY_ID_BCM5756: return "5722/5756";
15075 case TG3_PHY_ID_BCM5906: return "5906";
15076 case TG3_PHY_ID_BCM5761: return "5761";
15077 case TG3_PHY_ID_BCM5718C: return "5718C";
15078 case TG3_PHY_ID_BCM5718S: return "5718S";
15079 case TG3_PHY_ID_BCM57765: return "57765";
15080 case TG3_PHY_ID_BCM5719C: return "5719C";
15081 case TG3_PHY_ID_BCM5720C: return "5720C";
15082 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15083 case 0: return "serdes";
15084 default: return "unknown";
15088 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15090 if (tg3_flag(tp, PCI_EXPRESS)) {
15091 strcpy(str, "PCI Express");
15092 return str;
15093 } else if (tg3_flag(tp, PCIX_MODE)) {
15094 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15096 strcpy(str, "PCIX:");
15098 if ((clock_ctrl == 7) ||
15099 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15100 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15101 strcat(str, "133MHz");
15102 else if (clock_ctrl == 0)
15103 strcat(str, "33MHz");
15104 else if (clock_ctrl == 2)
15105 strcat(str, "50MHz");
15106 else if (clock_ctrl == 4)
15107 strcat(str, "66MHz");
15108 else if (clock_ctrl == 6)
15109 strcat(str, "100MHz");
15110 } else {
15111 strcpy(str, "PCI:");
15112 if (tg3_flag(tp, PCI_HIGH_SPEED))
15113 strcat(str, "66MHz");
15114 else
15115 strcat(str, "33MHz");
15117 if (tg3_flag(tp, PCI_32BIT))
15118 strcat(str, ":32-bit");
15119 else
15120 strcat(str, ":64-bit");
15121 return str;
15124 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15126 struct pci_dev *peer;
15127 unsigned int func, devnr = tp->pdev->devfn & ~7;
15129 for (func = 0; func < 8; func++) {
15130 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15131 if (peer && peer != tp->pdev)
15132 break;
15133 pci_dev_put(peer);
15135 /* 5704 can be configured in single-port mode, set peer to
15136 * tp->pdev in that case.
15138 if (!peer) {
15139 peer = tp->pdev;
15140 return peer;
15144 * We don't need to keep the refcount elevated; there's no way
15145 * to remove one half of this device without removing the other
15147 pci_dev_put(peer);
15149 return peer;
15152 static void __devinit tg3_init_coal(struct tg3 *tp)
15154 struct ethtool_coalesce *ec = &tp->coal;
15156 memset(ec, 0, sizeof(*ec));
15157 ec->cmd = ETHTOOL_GCOALESCE;
15158 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15159 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15160 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15161 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15162 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15163 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15164 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15165 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15166 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15168 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15169 HOSTCC_MODE_CLRTICK_TXBD)) {
15170 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15171 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15172 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15173 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15176 if (tg3_flag(tp, 5705_PLUS)) {
15177 ec->rx_coalesce_usecs_irq = 0;
15178 ec->tx_coalesce_usecs_irq = 0;
15179 ec->stats_block_coalesce_usecs = 0;
15183 static const struct net_device_ops tg3_netdev_ops = {
15184 .ndo_open = tg3_open,
15185 .ndo_stop = tg3_close,
15186 .ndo_start_xmit = tg3_start_xmit,
15187 .ndo_get_stats64 = tg3_get_stats64,
15188 .ndo_validate_addr = eth_validate_addr,
15189 .ndo_set_rx_mode = tg3_set_rx_mode,
15190 .ndo_set_mac_address = tg3_set_mac_addr,
15191 .ndo_do_ioctl = tg3_ioctl,
15192 .ndo_tx_timeout = tg3_tx_timeout,
15193 .ndo_change_mtu = tg3_change_mtu,
15194 .ndo_fix_features = tg3_fix_features,
15195 .ndo_set_features = tg3_set_features,
15196 #ifdef CONFIG_NET_POLL_CONTROLLER
15197 .ndo_poll_controller = tg3_poll_controller,
15198 #endif
15201 static int __devinit tg3_init_one(struct pci_dev *pdev,
15202 const struct pci_device_id *ent)
15204 struct net_device *dev;
15205 struct tg3 *tp;
15206 int i, err, pm_cap;
15207 u32 sndmbx, rcvmbx, intmbx;
15208 char str[40];
15209 u64 dma_mask, persist_dma_mask;
15210 u32 features = 0;
15212 printk_once(KERN_INFO "%s\n", version);
15214 err = pci_enable_device(pdev);
15215 if (err) {
15216 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15217 return err;
15220 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15221 if (err) {
15222 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15223 goto err_out_disable_pdev;
15226 pci_set_master(pdev);
15228 /* Find power-management capability. */
15229 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15230 if (pm_cap == 0) {
15231 dev_err(&pdev->dev,
15232 "Cannot find Power Management capability, aborting\n");
15233 err = -EIO;
15234 goto err_out_free_res;
15237 err = pci_set_power_state(pdev, PCI_D0);
15238 if (err) {
15239 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15240 goto err_out_free_res;
15243 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15244 if (!dev) {
15245 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15246 err = -ENOMEM;
15247 goto err_out_power_down;
15250 SET_NETDEV_DEV(dev, &pdev->dev);
15252 tp = netdev_priv(dev);
15253 tp->pdev = pdev;
15254 tp->dev = dev;
15255 tp->pm_cap = pm_cap;
15256 tp->rx_mode = TG3_DEF_RX_MODE;
15257 tp->tx_mode = TG3_DEF_TX_MODE;
15259 if (tg3_debug > 0)
15260 tp->msg_enable = tg3_debug;
15261 else
15262 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15264 /* The word/byte swap controls here control register access byte
15265 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15266 * setting below.
15268 tp->misc_host_ctrl =
15269 MISC_HOST_CTRL_MASK_PCI_INT |
15270 MISC_HOST_CTRL_WORD_SWAP |
15271 MISC_HOST_CTRL_INDIR_ACCESS |
15272 MISC_HOST_CTRL_PCISTATE_RW;
15274 /* The NONFRM (non-frame) byte/word swap controls take effect
15275 * on descriptor entries, anything which isn't packet data.
15277 * The StrongARM chips on the board (one for tx, one for rx)
15278 * are running in big-endian mode.
15280 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15281 GRC_MODE_WSWAP_NONFRM_DATA);
15282 #ifdef __BIG_ENDIAN
15283 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15284 #endif
15285 spin_lock_init(&tp->lock);
15286 spin_lock_init(&tp->indirect_lock);
15287 INIT_WORK(&tp->reset_task, tg3_reset_task);
15289 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15290 if (!tp->regs) {
15291 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15292 err = -ENOMEM;
15293 goto err_out_free_dev;
15296 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15297 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15298 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15299 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15300 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15301 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15302 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15303 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15304 tg3_flag_set(tp, ENABLE_APE);
15305 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15306 if (!tp->aperegs) {
15307 dev_err(&pdev->dev,
15308 "Cannot map APE registers, aborting\n");
15309 err = -ENOMEM;
15310 goto err_out_iounmap;
15314 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15315 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15317 dev->ethtool_ops = &tg3_ethtool_ops;
15318 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15319 dev->netdev_ops = &tg3_netdev_ops;
15320 dev->irq = pdev->irq;
15322 err = tg3_get_invariants(tp);
15323 if (err) {
15324 dev_err(&pdev->dev,
15325 "Problem fetching invariants of chip, aborting\n");
15326 goto err_out_apeunmap;
15329 /* The EPB bridge inside 5714, 5715, and 5780 and any
15330 * device behind the EPB cannot support DMA addresses > 40-bit.
15331 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15332 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15333 * do DMA address check in tg3_start_xmit().
15335 if (tg3_flag(tp, IS_5788))
15336 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15337 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15338 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15339 #ifdef CONFIG_HIGHMEM
15340 dma_mask = DMA_BIT_MASK(64);
15341 #endif
15342 } else
15343 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15345 /* Configure DMA attributes. */
15346 if (dma_mask > DMA_BIT_MASK(32)) {
15347 err = pci_set_dma_mask(pdev, dma_mask);
15348 if (!err) {
15349 features |= NETIF_F_HIGHDMA;
15350 err = pci_set_consistent_dma_mask(pdev,
15351 persist_dma_mask);
15352 if (err < 0) {
15353 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15354 "DMA for consistent allocations\n");
15355 goto err_out_apeunmap;
15359 if (err || dma_mask == DMA_BIT_MASK(32)) {
15360 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15361 if (err) {
15362 dev_err(&pdev->dev,
15363 "No usable DMA configuration, aborting\n");
15364 goto err_out_apeunmap;
15368 tg3_init_bufmgr_config(tp);
15370 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15372 /* 5700 B0 chips do not support checksumming correctly due
15373 * to hardware bugs.
15375 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15376 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15378 if (tg3_flag(tp, 5755_PLUS))
15379 features |= NETIF_F_IPV6_CSUM;
15382 /* TSO is on by default on chips that support hardware TSO.
15383 * Firmware TSO on older chips gives lower performance, so it
15384 * is off by default, but can be enabled using ethtool.
15386 if ((tg3_flag(tp, HW_TSO_1) ||
15387 tg3_flag(tp, HW_TSO_2) ||
15388 tg3_flag(tp, HW_TSO_3)) &&
15389 (features & NETIF_F_IP_CSUM))
15390 features |= NETIF_F_TSO;
15391 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15392 if (features & NETIF_F_IPV6_CSUM)
15393 features |= NETIF_F_TSO6;
15394 if (tg3_flag(tp, HW_TSO_3) ||
15395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15396 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15397 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15400 features |= NETIF_F_TSO_ECN;
15403 dev->features |= features;
15404 dev->vlan_features |= features;
15407 * Add loopback capability only for a subset of devices that support
15408 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15409 * loopback for the remaining devices.
15411 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15412 !tg3_flag(tp, CPMU_PRESENT))
15413 /* Add the loopback capability */
15414 features |= NETIF_F_LOOPBACK;
15416 dev->hw_features |= features;
15418 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15419 !tg3_flag(tp, TSO_CAPABLE) &&
15420 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15421 tg3_flag_set(tp, MAX_RXPEND_64);
15422 tp->rx_pending = 63;
15425 err = tg3_get_device_address(tp);
15426 if (err) {
15427 dev_err(&pdev->dev,
15428 "Could not obtain valid ethernet address, aborting\n");
15429 goto err_out_apeunmap;
15433 * Reset chip in case UNDI or EFI driver did not shutdown
15434 * DMA self test will enable WDMAC and we'll see (spurious)
15435 * pending DMA on the PCI bus at that point.
15437 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15438 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15439 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15440 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15443 err = tg3_test_dma(tp);
15444 if (err) {
15445 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15446 goto err_out_apeunmap;
15449 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15450 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15451 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15452 for (i = 0; i < tp->irq_max; i++) {
15453 struct tg3_napi *tnapi = &tp->napi[i];
15455 tnapi->tp = tp;
15456 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15458 tnapi->int_mbox = intmbx;
15459 if (i < 4)
15460 intmbx += 0x8;
15461 else
15462 intmbx += 0x4;
15464 tnapi->consmbox = rcvmbx;
15465 tnapi->prodmbox = sndmbx;
15467 if (i)
15468 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15469 else
15470 tnapi->coal_now = HOSTCC_MODE_NOW;
15472 if (!tg3_flag(tp, SUPPORT_MSIX))
15473 break;
15476 * If we support MSIX, we'll be using RSS. If we're using
15477 * RSS, the first vector only handles link interrupts and the
15478 * remaining vectors handle rx and tx interrupts. Reuse the
15479 * mailbox values for the next iteration. The values we setup
15480 * above are still useful for the single vectored mode.
15482 if (!i)
15483 continue;
15485 rcvmbx += 0x8;
15487 if (sndmbx & 0x4)
15488 sndmbx -= 0x4;
15489 else
15490 sndmbx += 0xc;
15493 tg3_init_coal(tp);
15495 pci_set_drvdata(pdev, dev);
15497 if (tg3_flag(tp, 5717_PLUS)) {
15498 /* Resume a low-power mode */
15499 tg3_frob_aux_power(tp, false);
15502 err = register_netdev(dev);
15503 if (err) {
15504 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15505 goto err_out_apeunmap;
15508 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15509 tp->board_part_number,
15510 tp->pci_chip_rev_id,
15511 tg3_bus_string(tp, str),
15512 dev->dev_addr);
15514 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15515 struct phy_device *phydev;
15516 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15517 netdev_info(dev,
15518 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15519 phydev->drv->name, dev_name(&phydev->dev));
15520 } else {
15521 char *ethtype;
15523 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15524 ethtype = "10/100Base-TX";
15525 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15526 ethtype = "1000Base-SX";
15527 else
15528 ethtype = "10/100/1000Base-T";
15530 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15531 "(WireSpeed[%d], EEE[%d])\n",
15532 tg3_phy_string(tp), ethtype,
15533 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15534 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15537 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15538 (dev->features & NETIF_F_RXCSUM) != 0,
15539 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15540 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15541 tg3_flag(tp, ENABLE_ASF) != 0,
15542 tg3_flag(tp, TSO_CAPABLE) != 0);
15543 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15544 tp->dma_rwctrl,
15545 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15546 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15548 pci_save_state(pdev);
15550 return 0;
15552 err_out_apeunmap:
15553 if (tp->aperegs) {
15554 iounmap(tp->aperegs);
15555 tp->aperegs = NULL;
15558 err_out_iounmap:
15559 if (tp->regs) {
15560 iounmap(tp->regs);
15561 tp->regs = NULL;
15564 err_out_free_dev:
15565 free_netdev(dev);
15567 err_out_power_down:
15568 pci_set_power_state(pdev, PCI_D3hot);
15570 err_out_free_res:
15571 pci_release_regions(pdev);
15573 err_out_disable_pdev:
15574 pci_disable_device(pdev);
15575 pci_set_drvdata(pdev, NULL);
15576 return err;
15579 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15581 struct net_device *dev = pci_get_drvdata(pdev);
15583 if (dev) {
15584 struct tg3 *tp = netdev_priv(dev);
15586 if (tp->fw)
15587 release_firmware(tp->fw);
15589 cancel_work_sync(&tp->reset_task);
15591 if (!tg3_flag(tp, USE_PHYLIB)) {
15592 tg3_phy_fini(tp);
15593 tg3_mdio_fini(tp);
15596 unregister_netdev(dev);
15597 if (tp->aperegs) {
15598 iounmap(tp->aperegs);
15599 tp->aperegs = NULL;
15601 if (tp->regs) {
15602 iounmap(tp->regs);
15603 tp->regs = NULL;
15605 free_netdev(dev);
15606 pci_release_regions(pdev);
15607 pci_disable_device(pdev);
15608 pci_set_drvdata(pdev, NULL);
15612 #ifdef CONFIG_PM_SLEEP
15613 static int tg3_suspend(struct device *device)
15615 struct pci_dev *pdev = to_pci_dev(device);
15616 struct net_device *dev = pci_get_drvdata(pdev);
15617 struct tg3 *tp = netdev_priv(dev);
15618 int err;
15620 if (!netif_running(dev))
15621 return 0;
15623 flush_work_sync(&tp->reset_task);
15624 tg3_phy_stop(tp);
15625 tg3_netif_stop(tp);
15627 del_timer_sync(&tp->timer);
15629 tg3_full_lock(tp, 1);
15630 tg3_disable_ints(tp);
15631 tg3_full_unlock(tp);
15633 netif_device_detach(dev);
15635 tg3_full_lock(tp, 0);
15636 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15637 tg3_flag_clear(tp, INIT_COMPLETE);
15638 tg3_full_unlock(tp);
15640 err = tg3_power_down_prepare(tp);
15641 if (err) {
15642 int err2;
15644 tg3_full_lock(tp, 0);
15646 tg3_flag_set(tp, INIT_COMPLETE);
15647 err2 = tg3_restart_hw(tp, 1);
15648 if (err2)
15649 goto out;
15651 tp->timer.expires = jiffies + tp->timer_offset;
15652 add_timer(&tp->timer);
15654 netif_device_attach(dev);
15655 tg3_netif_start(tp);
15657 out:
15658 tg3_full_unlock(tp);
15660 if (!err2)
15661 tg3_phy_start(tp);
15664 return err;
15667 static int tg3_resume(struct device *device)
15669 struct pci_dev *pdev = to_pci_dev(device);
15670 struct net_device *dev = pci_get_drvdata(pdev);
15671 struct tg3 *tp = netdev_priv(dev);
15672 int err;
15674 if (!netif_running(dev))
15675 return 0;
15677 netif_device_attach(dev);
15679 tg3_full_lock(tp, 0);
15681 tg3_flag_set(tp, INIT_COMPLETE);
15682 err = tg3_restart_hw(tp, 1);
15683 if (err)
15684 goto out;
15686 tp->timer.expires = jiffies + tp->timer_offset;
15687 add_timer(&tp->timer);
15689 tg3_netif_start(tp);
15691 out:
15692 tg3_full_unlock(tp);
15694 if (!err)
15695 tg3_phy_start(tp);
15697 return err;
15700 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15701 #define TG3_PM_OPS (&tg3_pm_ops)
15703 #else
15705 #define TG3_PM_OPS NULL
15707 #endif /* CONFIG_PM_SLEEP */
15710 * tg3_io_error_detected - called when PCI error is detected
15711 * @pdev: Pointer to PCI device
15712 * @state: The current pci connection state
15714 * This function is called after a PCI bus error affecting
15715 * this device has been detected.
15717 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15718 pci_channel_state_t state)
15720 struct net_device *netdev = pci_get_drvdata(pdev);
15721 struct tg3 *tp = netdev_priv(netdev);
15722 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15724 netdev_info(netdev, "PCI I/O error detected\n");
15726 rtnl_lock();
15728 if (!netif_running(netdev))
15729 goto done;
15731 tg3_phy_stop(tp);
15733 tg3_netif_stop(tp);
15735 del_timer_sync(&tp->timer);
15736 tg3_flag_clear(tp, RESTART_TIMER);
15738 /* Want to make sure that the reset task doesn't run */
15739 cancel_work_sync(&tp->reset_task);
15740 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15741 tg3_flag_clear(tp, RESTART_TIMER);
15743 netif_device_detach(netdev);
15745 /* Clean up software state, even if MMIO is blocked */
15746 tg3_full_lock(tp, 0);
15747 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15748 tg3_full_unlock(tp);
15750 done:
15751 if (state == pci_channel_io_perm_failure)
15752 err = PCI_ERS_RESULT_DISCONNECT;
15753 else
15754 pci_disable_device(pdev);
15756 rtnl_unlock();
15758 return err;
15762 * tg3_io_slot_reset - called after the pci bus has been reset.
15763 * @pdev: Pointer to PCI device
15765 * Restart the card from scratch, as if from a cold-boot.
15766 * At this point, the card has exprienced a hard reset,
15767 * followed by fixups by BIOS, and has its config space
15768 * set up identically to what it was at cold boot.
15770 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15772 struct net_device *netdev = pci_get_drvdata(pdev);
15773 struct tg3 *tp = netdev_priv(netdev);
15774 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15775 int err;
15777 rtnl_lock();
15779 if (pci_enable_device(pdev)) {
15780 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15781 goto done;
15784 pci_set_master(pdev);
15785 pci_restore_state(pdev);
15786 pci_save_state(pdev);
15788 if (!netif_running(netdev)) {
15789 rc = PCI_ERS_RESULT_RECOVERED;
15790 goto done;
15793 err = tg3_power_up(tp);
15794 if (err)
15795 goto done;
15797 rc = PCI_ERS_RESULT_RECOVERED;
15799 done:
15800 rtnl_unlock();
15802 return rc;
15806 * tg3_io_resume - called when traffic can start flowing again.
15807 * @pdev: Pointer to PCI device
15809 * This callback is called when the error recovery driver tells
15810 * us that its OK to resume normal operation.
15812 static void tg3_io_resume(struct pci_dev *pdev)
15814 struct net_device *netdev = pci_get_drvdata(pdev);
15815 struct tg3 *tp = netdev_priv(netdev);
15816 int err;
15818 rtnl_lock();
15820 if (!netif_running(netdev))
15821 goto done;
15823 tg3_full_lock(tp, 0);
15824 tg3_flag_set(tp, INIT_COMPLETE);
15825 err = tg3_restart_hw(tp, 1);
15826 tg3_full_unlock(tp);
15827 if (err) {
15828 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15829 goto done;
15832 netif_device_attach(netdev);
15834 tp->timer.expires = jiffies + tp->timer_offset;
15835 add_timer(&tp->timer);
15837 tg3_netif_start(tp);
15839 tg3_phy_start(tp);
15841 done:
15842 rtnl_unlock();
15845 static struct pci_error_handlers tg3_err_handler = {
15846 .error_detected = tg3_io_error_detected,
15847 .slot_reset = tg3_io_slot_reset,
15848 .resume = tg3_io_resume
15851 static struct pci_driver tg3_driver = {
15852 .name = DRV_MODULE_NAME,
15853 .id_table = tg3_pci_tbl,
15854 .probe = tg3_init_one,
15855 .remove = __devexit_p(tg3_remove_one),
15856 .err_handler = &tg3_err_handler,
15857 .driver.pm = TG3_PM_OPS,
15860 static int __init tg3_init(void)
15862 return pci_register_driver(&tg3_driver);
15865 static void __exit tg3_cleanup(void)
15867 pci_unregister_driver(&tg3_driver);
15870 module_init(tg3_init);
15871 module_exit(tg3_cleanup);