tg3: Detect APE enabled devs earlier
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tg3.c
blob01d3a271a040fc14e0eb7b5c12bf3465bc97dc96
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
61 #define BAR_0 0
62 #define BAR_2 2
64 #include "tg3.h"
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 set_bit(flag, bits);
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
101 (NETIF_MSG_DRV | \
102 NETIF_MSG_PROBE | \
103 NETIF_MSG_LINK | \
104 NETIF_MSG_TIMER | \
105 NETIF_MSG_IFDOWN | \
106 NETIF_MSG_IFUP | \
107 NETIF_MSG_RX_ERR | \
108 NETIF_MSG_TX_ERR)
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
112 /* length of time before we decide the hardware is borked,
113 * and dev->tx_timeout() should be called to fix the problem
116 #define TG3_TX_TIMEOUT (5 * HZ)
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU 60
120 #define TG3_MAX_MTU(tp) \
121 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124 * You can't change the ring sizes, but you can change where you place
125 * them in the NIC onboard memory.
127 #define TG3_RX_STD_RING_SIZE(tp) \
128 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING 200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
135 #define TG3_RSS_INDIR_TBL_SIZE 128
137 /* Do not place this n-ring entries value into the tp struct itself,
138 * we really want to expose these constants to GCC so that modulo et
139 * al. operations are done with shifts and masks instead of with
140 * hw multiply/modulo instructions. Another solution would be to
141 * replace things like '% foo' with '& (foo - 1)'.
144 #define TG3_TX_RING_SIZE 512
145 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
147 #define TG3_RX_STD_RING_BYTES(tp) \
148 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
154 TG3_TX_RING_SIZE)
155 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157 #define TG3_DMA_BYTE_ENAB 64
159 #define TG3_RX_STD_DMA_SZ 1536
160 #define TG3_RX_JMB_DMA_SZ 9046
162 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
164 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174 * that are at least dword aligned when used in PCIX mode. The driver
175 * works around this bug by double copying the packet. This workaround
176 * is built into the normal double copy length check for efficiency.
178 * However, the double copy is only necessary on those architectures
179 * where unaligned memory accesses are inefficient. For those architectures
180 * where unaligned memory accesses incur little penalty, we can reintegrate
181 * the 5701 in the normal rx path. Doing so saves a device structure
182 * dereference by hardcoding the double copy threshold in place.
184 #define TG3_RX_COPY_THRESHOLD 256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
187 #else
188 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189 #endif
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
194 #define TG3_RAW_IP_ALIGN 2
196 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
198 #define FIRMWARE_TG3 "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
202 static char version[] __devinitdata =
203 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
213 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
304 static const struct {
305 const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307 { "rx_octets" },
308 { "rx_fragments" },
309 { "rx_ucast_packets" },
310 { "rx_mcast_packets" },
311 { "rx_bcast_packets" },
312 { "rx_fcs_errors" },
313 { "rx_align_errors" },
314 { "rx_xon_pause_rcvd" },
315 { "rx_xoff_pause_rcvd" },
316 { "rx_mac_ctrl_rcvd" },
317 { "rx_xoff_entered" },
318 { "rx_frame_too_long_errors" },
319 { "rx_jabbers" },
320 { "rx_undersize_packets" },
321 { "rx_in_length_errors" },
322 { "rx_out_length_errors" },
323 { "rx_64_or_less_octet_packets" },
324 { "rx_65_to_127_octet_packets" },
325 { "rx_128_to_255_octet_packets" },
326 { "rx_256_to_511_octet_packets" },
327 { "rx_512_to_1023_octet_packets" },
328 { "rx_1024_to_1522_octet_packets" },
329 { "rx_1523_to_2047_octet_packets" },
330 { "rx_2048_to_4095_octet_packets" },
331 { "rx_4096_to_8191_octet_packets" },
332 { "rx_8192_to_9022_octet_packets" },
334 { "tx_octets" },
335 { "tx_collisions" },
337 { "tx_xon_sent" },
338 { "tx_xoff_sent" },
339 { "tx_flow_control" },
340 { "tx_mac_errors" },
341 { "tx_single_collisions" },
342 { "tx_mult_collisions" },
343 { "tx_deferred" },
344 { "tx_excessive_collisions" },
345 { "tx_late_collisions" },
346 { "tx_collide_2times" },
347 { "tx_collide_3times" },
348 { "tx_collide_4times" },
349 { "tx_collide_5times" },
350 { "tx_collide_6times" },
351 { "tx_collide_7times" },
352 { "tx_collide_8times" },
353 { "tx_collide_9times" },
354 { "tx_collide_10times" },
355 { "tx_collide_11times" },
356 { "tx_collide_12times" },
357 { "tx_collide_13times" },
358 { "tx_collide_14times" },
359 { "tx_collide_15times" },
360 { "tx_ucast_packets" },
361 { "tx_mcast_packets" },
362 { "tx_bcast_packets" },
363 { "tx_carrier_sense_errors" },
364 { "tx_discards" },
365 { "tx_errors" },
367 { "dma_writeq_full" },
368 { "dma_write_prioq_full" },
369 { "rxbds_empty" },
370 { "rx_discards" },
371 { "rx_errors" },
372 { "rx_threshold_hit" },
374 { "dma_readq_full" },
375 { "dma_read_prioq_full" },
376 { "tx_comp_queue_full" },
378 { "ring_set_send_prod_index" },
379 { "ring_status_update" },
380 { "nic_irqs" },
381 { "nic_avoided_irqs" },
382 { "nic_tx_threshold_hit" },
384 { "mbuf_lwm_thresh_hit" },
387 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
390 static const struct {
391 const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393 { "nvram test (online) " },
394 { "link test (online) " },
395 { "register test (offline)" },
396 { "memory test (offline)" },
397 { "loopback test (offline)" },
398 { "interrupt test (offline)" },
401 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
406 writel(val, tp->regs + off);
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
411 return readl(tp->regs + off);
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
416 writel(val, tp->aperegs + off);
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
421 return readl(tp->aperegs + off);
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
426 unsigned long flags;
428 spin_lock_irqsave(&tp->indirect_lock, flags);
429 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431 spin_unlock_irqrestore(&tp->indirect_lock, flags);
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
436 writel(val, tp->regs + off);
437 readl(tp->regs + off);
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
442 unsigned long flags;
443 u32 val;
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 return val;
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
454 unsigned long flags;
456 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
461 if (off == TG3_RX_STD_PROD_IDX_REG) {
462 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463 TG3_64BIT_REG_LOW, val);
464 return;
467 spin_lock_irqsave(&tp->indirect_lock, flags);
468 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470 spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 /* In indirect mode when disabling interrupts, we also need
473 * to clear the interrupt bit in the GRC local ctrl register.
475 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476 (val == 0x1)) {
477 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
484 unsigned long flags;
485 u32 val;
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 return val;
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495 * where it is unsafe to read back the register without some delay.
496 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
501 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502 /* Non-posted methods */
503 tp->write32(tp, off, val);
504 else {
505 /* Posted method */
506 tg3_write32(tp, off, val);
507 if (usec_wait)
508 udelay(usec_wait);
509 tp->read32(tp, off);
511 /* Wait again after the read for the posted method to guarantee that
512 * the wait time is met.
514 if (usec_wait)
515 udelay(usec_wait);
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
520 tp->write32_mbox(tp, off, val);
521 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522 tp->read32_mbox(tp, off);
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
527 void __iomem *mbox = tp->regs + off;
528 writel(val, mbox);
529 if (tg3_flag(tp, TXD_MBOX_HWBUG))
530 writel(val, mbox);
531 if (tg3_flag(tp, MBOX_WRITE_REORDER))
532 readl(mbox);
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
537 return readl(tp->regs + off + GRCMBOX_BASE);
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
542 writel(val, tp->regs + off + GRCMBOX_BASE);
545 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
551 #define tw32(reg, val) tp->write32(tp, reg, val)
552 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg) tp->read32(tp, reg)
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
558 unsigned long flags;
560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562 return;
564 spin_lock_irqsave(&tp->indirect_lock, flags);
565 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
569 /* Always leave this as zero. */
570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571 } else {
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573 tw32_f(TG3PCI_MEM_WIN_DATA, val);
575 /* Always leave this as zero. */
576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
578 spin_unlock_irqrestore(&tp->indirect_lock, flags);
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
583 unsigned long flags;
585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587 *val = 0;
588 return;
591 spin_lock_irqsave(&tp->indirect_lock, flags);
592 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
596 /* Always leave this as zero. */
597 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598 } else {
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600 *val = tr32(TG3PCI_MEM_WIN_DATA);
602 /* Always leave this as zero. */
603 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
605 spin_unlock_irqrestore(&tp->indirect_lock, flags);
608 static void tg3_ape_lock_init(struct tg3 *tp)
610 int i;
611 u32 regbase;
613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614 regbase = TG3_APE_LOCK_GRANT;
615 else
616 regbase = TG3_APE_PER_LOCK_GRANT;
618 /* Make sure the driver hasn't any stale locks. */
619 for (i = 0; i < 8; i++)
620 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
623 static int tg3_ape_lock(struct tg3 *tp, int locknum)
625 int i, off;
626 int ret = 0;
627 u32 status, req, gnt;
629 if (!tg3_flag(tp, ENABLE_APE))
630 return 0;
632 switch (locknum) {
633 case TG3_APE_LOCK_GRC:
634 case TG3_APE_LOCK_MEM:
635 break;
636 default:
637 return -EINVAL;
640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
641 req = TG3_APE_LOCK_REQ;
642 gnt = TG3_APE_LOCK_GRANT;
643 } else {
644 req = TG3_APE_PER_LOCK_REQ;
645 gnt = TG3_APE_PER_LOCK_GRANT;
648 off = 4 * locknum;
650 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
652 /* Wait for up to 1 millisecond to acquire lock. */
653 for (i = 0; i < 100; i++) {
654 status = tg3_ape_read32(tp, gnt + off);
655 if (status == APE_LOCK_GRANT_DRIVER)
656 break;
657 udelay(10);
660 if (status != APE_LOCK_GRANT_DRIVER) {
661 /* Revoke the lock request. */
662 tg3_ape_write32(tp, gnt + off,
663 APE_LOCK_GRANT_DRIVER);
665 ret = -EBUSY;
668 return ret;
671 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
673 u32 gnt;
675 if (!tg3_flag(tp, ENABLE_APE))
676 return;
678 switch (locknum) {
679 case TG3_APE_LOCK_GRC:
680 case TG3_APE_LOCK_MEM:
681 break;
682 default:
683 return;
686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
687 gnt = TG3_APE_LOCK_GRANT;
688 else
689 gnt = TG3_APE_PER_LOCK_GRANT;
691 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
694 static void tg3_disable_ints(struct tg3 *tp)
696 int i;
698 tw32(TG3PCI_MISC_HOST_CTRL,
699 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
700 for (i = 0; i < tp->irq_max; i++)
701 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
704 static void tg3_enable_ints(struct tg3 *tp)
706 int i;
708 tp->irq_sync = 0;
709 wmb();
711 tw32(TG3PCI_MISC_HOST_CTRL,
712 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
714 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
715 for (i = 0; i < tp->irq_cnt; i++) {
716 struct tg3_napi *tnapi = &tp->napi[i];
718 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719 if (tg3_flag(tp, 1SHOT_MSI))
720 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
722 tp->coal_now |= tnapi->coal_now;
725 /* Force an initial interrupt */
726 if (!tg3_flag(tp, TAGGED_STATUS) &&
727 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
728 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
729 else
730 tw32(HOSTCC_MODE, tp->coal_now);
732 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
735 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
737 struct tg3 *tp = tnapi->tp;
738 struct tg3_hw_status *sblk = tnapi->hw_status;
739 unsigned int work_exists = 0;
741 /* check for phy events */
742 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
743 if (sblk->status & SD_STATUS_LINK_CHG)
744 work_exists = 1;
746 /* check for RX/TX work to do */
747 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
748 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
749 work_exists = 1;
751 return work_exists;
754 /* tg3_int_reenable
755 * similar to tg3_enable_ints, but it accurately determines whether there
756 * is new work pending and can return without flushing the PIO write
757 * which reenables interrupts
759 static void tg3_int_reenable(struct tg3_napi *tnapi)
761 struct tg3 *tp = tnapi->tp;
763 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
764 mmiowb();
766 /* When doing tagged status, this work check is unnecessary.
767 * The last_tag we write above tells the chip which piece of
768 * work we've completed.
770 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
771 tw32(HOSTCC_MODE, tp->coalesce_mode |
772 HOSTCC_MODE_ENABLE | tnapi->coal_now);
775 static void tg3_switch_clocks(struct tg3 *tp)
777 u32 clock_ctrl;
778 u32 orig_clock_ctrl;
780 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
781 return;
783 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
785 orig_clock_ctrl = clock_ctrl;
786 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
787 CLOCK_CTRL_CLKRUN_OENABLE |
788 0x1f);
789 tp->pci_clock_ctrl = clock_ctrl;
791 if (tg3_flag(tp, 5705_PLUS)) {
792 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
793 tw32_wait_f(TG3PCI_CLOCK_CTRL,
794 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
796 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
797 tw32_wait_f(TG3PCI_CLOCK_CTRL,
798 clock_ctrl |
799 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
800 40);
801 tw32_wait_f(TG3PCI_CLOCK_CTRL,
802 clock_ctrl | (CLOCK_CTRL_ALTCLK),
803 40);
805 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
808 #define PHY_BUSY_LOOPS 5000
810 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
812 u32 frame_val;
813 unsigned int loops;
814 int ret;
816 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
817 tw32_f(MAC_MI_MODE,
818 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
819 udelay(80);
822 *val = 0x0;
824 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
825 MI_COM_PHY_ADDR_MASK);
826 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
827 MI_COM_REG_ADDR_MASK);
828 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
830 tw32_f(MAC_MI_COM, frame_val);
832 loops = PHY_BUSY_LOOPS;
833 while (loops != 0) {
834 udelay(10);
835 frame_val = tr32(MAC_MI_COM);
837 if ((frame_val & MI_COM_BUSY) == 0) {
838 udelay(5);
839 frame_val = tr32(MAC_MI_COM);
840 break;
842 loops -= 1;
845 ret = -EBUSY;
846 if (loops != 0) {
847 *val = frame_val & MI_COM_DATA_MASK;
848 ret = 0;
851 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
852 tw32_f(MAC_MI_MODE, tp->mi_mode);
853 udelay(80);
856 return ret;
859 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
861 u32 frame_val;
862 unsigned int loops;
863 int ret;
865 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
866 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
867 return 0;
869 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
870 tw32_f(MAC_MI_MODE,
871 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
872 udelay(80);
875 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
876 MI_COM_PHY_ADDR_MASK);
877 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
878 MI_COM_REG_ADDR_MASK);
879 frame_val |= (val & MI_COM_DATA_MASK);
880 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
882 tw32_f(MAC_MI_COM, frame_val);
884 loops = PHY_BUSY_LOOPS;
885 while (loops != 0) {
886 udelay(10);
887 frame_val = tr32(MAC_MI_COM);
888 if ((frame_val & MI_COM_BUSY) == 0) {
889 udelay(5);
890 frame_val = tr32(MAC_MI_COM);
891 break;
893 loops -= 1;
896 ret = -EBUSY;
897 if (loops != 0)
898 ret = 0;
900 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
901 tw32_f(MAC_MI_MODE, tp->mi_mode);
902 udelay(80);
905 return ret;
908 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
910 int err;
912 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
913 if (err)
914 goto done;
916 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
917 if (err)
918 goto done;
920 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
921 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
922 if (err)
923 goto done;
925 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
927 done:
928 return err;
931 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
933 int err;
935 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
936 if (err)
937 goto done;
939 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
940 if (err)
941 goto done;
943 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
944 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
945 if (err)
946 goto done;
948 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
950 done:
951 return err;
954 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
956 int err;
958 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
959 if (!err)
960 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
962 return err;
965 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
967 int err;
969 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
970 if (!err)
971 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
973 return err;
976 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
978 int err;
980 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
981 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
982 MII_TG3_AUXCTL_SHDWSEL_MISC);
983 if (!err)
984 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
986 return err;
989 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
991 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
992 set |= MII_TG3_AUXCTL_MISC_WREN;
994 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
997 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
998 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1000 MII_TG3_AUXCTL_ACTL_TX_6DB)
1002 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1003 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1004 MII_TG3_AUXCTL_ACTL_TX_6DB);
1006 static int tg3_bmcr_reset(struct tg3 *tp)
1008 u32 phy_control;
1009 int limit, err;
1011 /* OK, reset it, and poll the BMCR_RESET bit until it
1012 * clears or we time out.
1014 phy_control = BMCR_RESET;
1015 err = tg3_writephy(tp, MII_BMCR, phy_control);
1016 if (err != 0)
1017 return -EBUSY;
1019 limit = 5000;
1020 while (limit--) {
1021 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1022 if (err != 0)
1023 return -EBUSY;
1025 if ((phy_control & BMCR_RESET) == 0) {
1026 udelay(40);
1027 break;
1029 udelay(10);
1031 if (limit < 0)
1032 return -EBUSY;
1034 return 0;
1037 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1039 struct tg3 *tp = bp->priv;
1040 u32 val;
1042 spin_lock_bh(&tp->lock);
1044 if (tg3_readphy(tp, reg, &val))
1045 val = -EIO;
1047 spin_unlock_bh(&tp->lock);
1049 return val;
1052 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1054 struct tg3 *tp = bp->priv;
1055 u32 ret = 0;
1057 spin_lock_bh(&tp->lock);
1059 if (tg3_writephy(tp, reg, val))
1060 ret = -EIO;
1062 spin_unlock_bh(&tp->lock);
1064 return ret;
1067 static int tg3_mdio_reset(struct mii_bus *bp)
1069 return 0;
1072 static void tg3_mdio_config_5785(struct tg3 *tp)
1074 u32 val;
1075 struct phy_device *phydev;
1077 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1078 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1079 case PHY_ID_BCM50610:
1080 case PHY_ID_BCM50610M:
1081 val = MAC_PHYCFG2_50610_LED_MODES;
1082 break;
1083 case PHY_ID_BCMAC131:
1084 val = MAC_PHYCFG2_AC131_LED_MODES;
1085 break;
1086 case PHY_ID_RTL8211C:
1087 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1088 break;
1089 case PHY_ID_RTL8201E:
1090 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1091 break;
1092 default:
1093 return;
1096 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1097 tw32(MAC_PHYCFG2, val);
1099 val = tr32(MAC_PHYCFG1);
1100 val &= ~(MAC_PHYCFG1_RGMII_INT |
1101 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1102 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1103 tw32(MAC_PHYCFG1, val);
1105 return;
1108 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1109 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1110 MAC_PHYCFG2_FMODE_MASK_MASK |
1111 MAC_PHYCFG2_GMODE_MASK_MASK |
1112 MAC_PHYCFG2_ACT_MASK_MASK |
1113 MAC_PHYCFG2_QUAL_MASK_MASK |
1114 MAC_PHYCFG2_INBAND_ENABLE;
1116 tw32(MAC_PHYCFG2, val);
1118 val = tr32(MAC_PHYCFG1);
1119 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1120 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1121 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1122 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1123 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1124 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1125 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1127 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1128 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1129 tw32(MAC_PHYCFG1, val);
1131 val = tr32(MAC_EXT_RGMII_MODE);
1132 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1133 MAC_RGMII_MODE_RX_QUALITY |
1134 MAC_RGMII_MODE_RX_ACTIVITY |
1135 MAC_RGMII_MODE_RX_ENG_DET |
1136 MAC_RGMII_MODE_TX_ENABLE |
1137 MAC_RGMII_MODE_TX_LOWPWR |
1138 MAC_RGMII_MODE_TX_RESET);
1139 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1140 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1141 val |= MAC_RGMII_MODE_RX_INT_B |
1142 MAC_RGMII_MODE_RX_QUALITY |
1143 MAC_RGMII_MODE_RX_ACTIVITY |
1144 MAC_RGMII_MODE_RX_ENG_DET;
1145 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1146 val |= MAC_RGMII_MODE_TX_ENABLE |
1147 MAC_RGMII_MODE_TX_LOWPWR |
1148 MAC_RGMII_MODE_TX_RESET;
1150 tw32(MAC_EXT_RGMII_MODE, val);
1153 static void tg3_mdio_start(struct tg3 *tp)
1155 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1156 tw32_f(MAC_MI_MODE, tp->mi_mode);
1157 udelay(80);
1159 if (tg3_flag(tp, MDIOBUS_INITED) &&
1160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1161 tg3_mdio_config_5785(tp);
1164 static int tg3_mdio_init(struct tg3 *tp)
1166 int i;
1167 u32 reg;
1168 struct phy_device *phydev;
1170 if (tg3_flag(tp, 5717_PLUS)) {
1171 u32 is_serdes;
1173 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1175 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1176 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1177 else
1178 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1179 TG3_CPMU_PHY_STRAP_IS_SERDES;
1180 if (is_serdes)
1181 tp->phy_addr += 7;
1182 } else
1183 tp->phy_addr = TG3_PHY_MII_ADDR;
1185 tg3_mdio_start(tp);
1187 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1188 return 0;
1190 tp->mdio_bus = mdiobus_alloc();
1191 if (tp->mdio_bus == NULL)
1192 return -ENOMEM;
1194 tp->mdio_bus->name = "tg3 mdio bus";
1195 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1196 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1197 tp->mdio_bus->priv = tp;
1198 tp->mdio_bus->parent = &tp->pdev->dev;
1199 tp->mdio_bus->read = &tg3_mdio_read;
1200 tp->mdio_bus->write = &tg3_mdio_write;
1201 tp->mdio_bus->reset = &tg3_mdio_reset;
1202 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1203 tp->mdio_bus->irq = &tp->mdio_irq[0];
1205 for (i = 0; i < PHY_MAX_ADDR; i++)
1206 tp->mdio_bus->irq[i] = PHY_POLL;
1208 /* The bus registration will look for all the PHYs on the mdio bus.
1209 * Unfortunately, it does not ensure the PHY is powered up before
1210 * accessing the PHY ID registers. A chip reset is the
1211 * quickest way to bring the device back to an operational state..
1213 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1214 tg3_bmcr_reset(tp);
1216 i = mdiobus_register(tp->mdio_bus);
1217 if (i) {
1218 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1219 mdiobus_free(tp->mdio_bus);
1220 return i;
1223 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1225 if (!phydev || !phydev->drv) {
1226 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1227 mdiobus_unregister(tp->mdio_bus);
1228 mdiobus_free(tp->mdio_bus);
1229 return -ENODEV;
1232 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1233 case PHY_ID_BCM57780:
1234 phydev->interface = PHY_INTERFACE_MODE_GMII;
1235 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1236 break;
1237 case PHY_ID_BCM50610:
1238 case PHY_ID_BCM50610M:
1239 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1240 PHY_BRCM_RX_REFCLK_UNUSED |
1241 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1242 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1243 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1244 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1245 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1246 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1247 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1248 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1249 /* fallthru */
1250 case PHY_ID_RTL8211C:
1251 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1252 break;
1253 case PHY_ID_RTL8201E:
1254 case PHY_ID_BCMAC131:
1255 phydev->interface = PHY_INTERFACE_MODE_MII;
1256 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1257 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1258 break;
1261 tg3_flag_set(tp, MDIOBUS_INITED);
1263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1264 tg3_mdio_config_5785(tp);
1266 return 0;
1269 static void tg3_mdio_fini(struct tg3 *tp)
1271 if (tg3_flag(tp, MDIOBUS_INITED)) {
1272 tg3_flag_clear(tp, MDIOBUS_INITED);
1273 mdiobus_unregister(tp->mdio_bus);
1274 mdiobus_free(tp->mdio_bus);
1278 /* tp->lock is held. */
1279 static inline void tg3_generate_fw_event(struct tg3 *tp)
1281 u32 val;
1283 val = tr32(GRC_RX_CPU_EVENT);
1284 val |= GRC_RX_CPU_DRIVER_EVENT;
1285 tw32_f(GRC_RX_CPU_EVENT, val);
1287 tp->last_event_jiffies = jiffies;
1290 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1292 /* tp->lock is held. */
1293 static void tg3_wait_for_event_ack(struct tg3 *tp)
1295 int i;
1296 unsigned int delay_cnt;
1297 long time_remain;
1299 /* If enough time has passed, no wait is necessary. */
1300 time_remain = (long)(tp->last_event_jiffies + 1 +
1301 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1302 (long)jiffies;
1303 if (time_remain < 0)
1304 return;
1306 /* Check if we can shorten the wait time. */
1307 delay_cnt = jiffies_to_usecs(time_remain);
1308 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1309 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1310 delay_cnt = (delay_cnt >> 3) + 1;
1312 for (i = 0; i < delay_cnt; i++) {
1313 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1314 break;
1315 udelay(8);
1319 /* tp->lock is held. */
1320 static void tg3_ump_link_report(struct tg3 *tp)
1322 u32 reg;
1323 u32 val;
1325 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1326 return;
1328 tg3_wait_for_event_ack(tp);
1330 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1332 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1334 val = 0;
1335 if (!tg3_readphy(tp, MII_BMCR, &reg))
1336 val = reg << 16;
1337 if (!tg3_readphy(tp, MII_BMSR, &reg))
1338 val |= (reg & 0xffff);
1339 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1341 val = 0;
1342 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1343 val = reg << 16;
1344 if (!tg3_readphy(tp, MII_LPA, &reg))
1345 val |= (reg & 0xffff);
1346 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1348 val = 0;
1349 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1350 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1351 val = reg << 16;
1352 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1353 val |= (reg & 0xffff);
1355 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1357 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1358 val = reg << 16;
1359 else
1360 val = 0;
1361 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1363 tg3_generate_fw_event(tp);
1366 static void tg3_link_report(struct tg3 *tp)
1368 if (!netif_carrier_ok(tp->dev)) {
1369 netif_info(tp, link, tp->dev, "Link is down\n");
1370 tg3_ump_link_report(tp);
1371 } else if (netif_msg_link(tp)) {
1372 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1373 (tp->link_config.active_speed == SPEED_1000 ?
1374 1000 :
1375 (tp->link_config.active_speed == SPEED_100 ?
1376 100 : 10)),
1377 (tp->link_config.active_duplex == DUPLEX_FULL ?
1378 "full" : "half"));
1380 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1381 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1382 "on" : "off",
1383 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1384 "on" : "off");
1386 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1387 netdev_info(tp->dev, "EEE is %s\n",
1388 tp->setlpicnt ? "enabled" : "disabled");
1390 tg3_ump_link_report(tp);
1394 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1396 u16 miireg;
1398 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1399 miireg = ADVERTISE_PAUSE_CAP;
1400 else if (flow_ctrl & FLOW_CTRL_TX)
1401 miireg = ADVERTISE_PAUSE_ASYM;
1402 else if (flow_ctrl & FLOW_CTRL_RX)
1403 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1404 else
1405 miireg = 0;
1407 return miireg;
1410 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1412 u16 miireg;
1414 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1415 miireg = ADVERTISE_1000XPAUSE;
1416 else if (flow_ctrl & FLOW_CTRL_TX)
1417 miireg = ADVERTISE_1000XPSE_ASYM;
1418 else if (flow_ctrl & FLOW_CTRL_RX)
1419 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1420 else
1421 miireg = 0;
1423 return miireg;
1426 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1428 u8 cap = 0;
1430 if (lcladv & ADVERTISE_1000XPAUSE) {
1431 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1432 if (rmtadv & LPA_1000XPAUSE)
1433 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1434 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1435 cap = FLOW_CTRL_RX;
1436 } else {
1437 if (rmtadv & LPA_1000XPAUSE)
1438 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1440 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1441 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1442 cap = FLOW_CTRL_TX;
1445 return cap;
1448 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1450 u8 autoneg;
1451 u8 flowctrl = 0;
1452 u32 old_rx_mode = tp->rx_mode;
1453 u32 old_tx_mode = tp->tx_mode;
1455 if (tg3_flag(tp, USE_PHYLIB))
1456 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1457 else
1458 autoneg = tp->link_config.autoneg;
1460 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1461 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1462 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1463 else
1464 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1465 } else
1466 flowctrl = tp->link_config.flowctrl;
1468 tp->link_config.active_flowctrl = flowctrl;
1470 if (flowctrl & FLOW_CTRL_RX)
1471 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1472 else
1473 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1475 if (old_rx_mode != tp->rx_mode)
1476 tw32_f(MAC_RX_MODE, tp->rx_mode);
1478 if (flowctrl & FLOW_CTRL_TX)
1479 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1480 else
1481 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1483 if (old_tx_mode != tp->tx_mode)
1484 tw32_f(MAC_TX_MODE, tp->tx_mode);
1487 static void tg3_adjust_link(struct net_device *dev)
1489 u8 oldflowctrl, linkmesg = 0;
1490 u32 mac_mode, lcl_adv, rmt_adv;
1491 struct tg3 *tp = netdev_priv(dev);
1492 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1494 spin_lock_bh(&tp->lock);
1496 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1497 MAC_MODE_HALF_DUPLEX);
1499 oldflowctrl = tp->link_config.active_flowctrl;
1501 if (phydev->link) {
1502 lcl_adv = 0;
1503 rmt_adv = 0;
1505 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1506 mac_mode |= MAC_MODE_PORT_MODE_MII;
1507 else if (phydev->speed == SPEED_1000 ||
1508 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1509 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1510 else
1511 mac_mode |= MAC_MODE_PORT_MODE_MII;
1513 if (phydev->duplex == DUPLEX_HALF)
1514 mac_mode |= MAC_MODE_HALF_DUPLEX;
1515 else {
1516 lcl_adv = tg3_advert_flowctrl_1000T(
1517 tp->link_config.flowctrl);
1519 if (phydev->pause)
1520 rmt_adv = LPA_PAUSE_CAP;
1521 if (phydev->asym_pause)
1522 rmt_adv |= LPA_PAUSE_ASYM;
1525 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1526 } else
1527 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1529 if (mac_mode != tp->mac_mode) {
1530 tp->mac_mode = mac_mode;
1531 tw32_f(MAC_MODE, tp->mac_mode);
1532 udelay(40);
1535 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1536 if (phydev->speed == SPEED_10)
1537 tw32(MAC_MI_STAT,
1538 MAC_MI_STAT_10MBPS_MODE |
1539 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540 else
1541 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1544 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1545 tw32(MAC_TX_LENGTHS,
1546 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547 (6 << TX_LENGTHS_IPG_SHIFT) |
1548 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1549 else
1550 tw32(MAC_TX_LENGTHS,
1551 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1552 (6 << TX_LENGTHS_IPG_SHIFT) |
1553 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1555 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1556 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1557 phydev->speed != tp->link_config.active_speed ||
1558 phydev->duplex != tp->link_config.active_duplex ||
1559 oldflowctrl != tp->link_config.active_flowctrl)
1560 linkmesg = 1;
1562 tp->link_config.active_speed = phydev->speed;
1563 tp->link_config.active_duplex = phydev->duplex;
1565 spin_unlock_bh(&tp->lock);
1567 if (linkmesg)
1568 tg3_link_report(tp);
1571 static int tg3_phy_init(struct tg3 *tp)
1573 struct phy_device *phydev;
1575 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1576 return 0;
1578 /* Bring the PHY back to a known state. */
1579 tg3_bmcr_reset(tp);
1581 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1583 /* Attach the MAC to the PHY. */
1584 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1585 phydev->dev_flags, phydev->interface);
1586 if (IS_ERR(phydev)) {
1587 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1588 return PTR_ERR(phydev);
1591 /* Mask with MAC supported features. */
1592 switch (phydev->interface) {
1593 case PHY_INTERFACE_MODE_GMII:
1594 case PHY_INTERFACE_MODE_RGMII:
1595 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1596 phydev->supported &= (PHY_GBIT_FEATURES |
1597 SUPPORTED_Pause |
1598 SUPPORTED_Asym_Pause);
1599 break;
1601 /* fallthru */
1602 case PHY_INTERFACE_MODE_MII:
1603 phydev->supported &= (PHY_BASIC_FEATURES |
1604 SUPPORTED_Pause |
1605 SUPPORTED_Asym_Pause);
1606 break;
1607 default:
1608 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1609 return -EINVAL;
1612 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1614 phydev->advertising = phydev->supported;
1616 return 0;
1619 static void tg3_phy_start(struct tg3 *tp)
1621 struct phy_device *phydev;
1623 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1624 return;
1626 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1628 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1629 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1630 phydev->speed = tp->link_config.orig_speed;
1631 phydev->duplex = tp->link_config.orig_duplex;
1632 phydev->autoneg = tp->link_config.orig_autoneg;
1633 phydev->advertising = tp->link_config.orig_advertising;
1636 phy_start(phydev);
1638 phy_start_aneg(phydev);
1641 static void tg3_phy_stop(struct tg3 *tp)
1643 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1644 return;
1646 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1649 static void tg3_phy_fini(struct tg3 *tp)
1651 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1652 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1653 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1657 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1659 u32 phytest;
1661 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1662 u32 phy;
1664 tg3_writephy(tp, MII_TG3_FET_TEST,
1665 phytest | MII_TG3_FET_SHADOW_EN);
1666 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1667 if (enable)
1668 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669 else
1670 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1671 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1673 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1677 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1679 u32 reg;
1681 if (!tg3_flag(tp, 5705_PLUS) ||
1682 (tg3_flag(tp, 5717_PLUS) &&
1683 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1684 return;
1686 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1687 tg3_phy_fet_toggle_apd(tp, enable);
1688 return;
1691 reg = MII_TG3_MISC_SHDW_WREN |
1692 MII_TG3_MISC_SHDW_SCR5_SEL |
1693 MII_TG3_MISC_SHDW_SCR5_LPED |
1694 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1695 MII_TG3_MISC_SHDW_SCR5_SDTL |
1696 MII_TG3_MISC_SHDW_SCR5_C125OE;
1697 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1698 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1700 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1703 reg = MII_TG3_MISC_SHDW_WREN |
1704 MII_TG3_MISC_SHDW_APD_SEL |
1705 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1706 if (enable)
1707 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1709 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1712 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1714 u32 phy;
1716 if (!tg3_flag(tp, 5705_PLUS) ||
1717 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1718 return;
1720 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1721 u32 ephy;
1723 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1724 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1726 tg3_writephy(tp, MII_TG3_FET_TEST,
1727 ephy | MII_TG3_FET_SHADOW_EN);
1728 if (!tg3_readphy(tp, reg, &phy)) {
1729 if (enable)
1730 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731 else
1732 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1733 tg3_writephy(tp, reg, phy);
1735 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1737 } else {
1738 int ret;
1740 ret = tg3_phy_auxctl_read(tp,
1741 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1742 if (!ret) {
1743 if (enable)
1744 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745 else
1746 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1747 tg3_phy_auxctl_write(tp,
1748 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1753 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1755 int ret;
1756 u32 val;
1758 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1759 return;
1761 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1762 if (!ret)
1763 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1764 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1767 static void tg3_phy_apply_otp(struct tg3 *tp)
1769 u32 otp, phy;
1771 if (!tp->phy_otp)
1772 return;
1774 otp = tp->phy_otp;
1776 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1777 return;
1779 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1780 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1781 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1783 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1784 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1785 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1787 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1788 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1789 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1791 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1792 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1794 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1795 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1797 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1798 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1799 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1801 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1804 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1806 u32 val;
1808 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1809 return;
1811 tp->setlpicnt = 0;
1813 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1814 current_link_up == 1 &&
1815 tp->link_config.active_duplex == DUPLEX_FULL &&
1816 (tp->link_config.active_speed == SPEED_100 ||
1817 tp->link_config.active_speed == SPEED_1000)) {
1818 u32 eeectl;
1820 if (tp->link_config.active_speed == SPEED_1000)
1821 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1822 else
1823 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1825 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1827 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1828 TG3_CL45_D7_EEERES_STAT, &val);
1830 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1831 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1832 tp->setlpicnt = 2;
1835 if (!tp->setlpicnt) {
1836 val = tr32(TG3_CPMU_EEE_MODE);
1837 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1841 static void tg3_phy_eee_enable(struct tg3 *tp)
1843 u32 val;
1845 if (tp->link_config.active_speed == SPEED_1000 &&
1846 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1849 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1850 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1851 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1854 val = tr32(TG3_CPMU_EEE_MODE);
1855 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1858 static int tg3_wait_macro_done(struct tg3 *tp)
1860 int limit = 100;
1862 while (limit--) {
1863 u32 tmp32;
1865 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1866 if ((tmp32 & 0x1000) == 0)
1867 break;
1870 if (limit < 0)
1871 return -EBUSY;
1873 return 0;
1876 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1878 static const u32 test_pat[4][6] = {
1879 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1880 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1881 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1882 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1884 int chan;
1886 for (chan = 0; chan < 4; chan++) {
1887 int i;
1889 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1890 (chan * 0x2000) | 0x0200);
1891 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1893 for (i = 0; i < 6; i++)
1894 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1895 test_pat[chan][i]);
1897 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1898 if (tg3_wait_macro_done(tp)) {
1899 *resetp = 1;
1900 return -EBUSY;
1903 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1904 (chan * 0x2000) | 0x0200);
1905 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1906 if (tg3_wait_macro_done(tp)) {
1907 *resetp = 1;
1908 return -EBUSY;
1911 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1912 if (tg3_wait_macro_done(tp)) {
1913 *resetp = 1;
1914 return -EBUSY;
1917 for (i = 0; i < 6; i += 2) {
1918 u32 low, high;
1920 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1921 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1922 tg3_wait_macro_done(tp)) {
1923 *resetp = 1;
1924 return -EBUSY;
1926 low &= 0x7fff;
1927 high &= 0x000f;
1928 if (low != test_pat[chan][i] ||
1929 high != test_pat[chan][i+1]) {
1930 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1931 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1932 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1934 return -EBUSY;
1939 return 0;
1942 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1944 int chan;
1946 for (chan = 0; chan < 4; chan++) {
1947 int i;
1949 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1950 (chan * 0x2000) | 0x0200);
1951 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1952 for (i = 0; i < 6; i++)
1953 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1954 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1955 if (tg3_wait_macro_done(tp))
1956 return -EBUSY;
1959 return 0;
1962 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1964 u32 reg32, phy9_orig;
1965 int retries, do_phy_reset, err;
1967 retries = 10;
1968 do_phy_reset = 1;
1969 do {
1970 if (do_phy_reset) {
1971 err = tg3_bmcr_reset(tp);
1972 if (err)
1973 return err;
1974 do_phy_reset = 0;
1977 /* Disable transmitter and interrupt. */
1978 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1979 continue;
1981 reg32 |= 0x3000;
1982 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1984 /* Set full-duplex, 1000 mbps. */
1985 tg3_writephy(tp, MII_BMCR,
1986 BMCR_FULLDPLX | BMCR_SPEED1000);
1988 /* Set to master mode. */
1989 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1990 continue;
1992 tg3_writephy(tp, MII_CTRL1000,
1993 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1995 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1996 if (err)
1997 return err;
1999 /* Block the PHY control access. */
2000 tg3_phydsp_write(tp, 0x8005, 0x0800);
2002 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2003 if (!err)
2004 break;
2005 } while (--retries);
2007 err = tg3_phy_reset_chanpat(tp);
2008 if (err)
2009 return err;
2011 tg3_phydsp_write(tp, 0x8005, 0x0000);
2013 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2014 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2016 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2018 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2020 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2021 reg32 &= ~0x3000;
2022 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2023 } else if (!err)
2024 err = -EBUSY;
2026 return err;
2029 /* This will reset the tigon3 PHY if there is no valid
2030 * link unless the FORCE argument is non-zero.
2032 static int tg3_phy_reset(struct tg3 *tp)
2034 u32 val, cpmuctrl;
2035 int err;
2037 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2038 val = tr32(GRC_MISC_CFG);
2039 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2040 udelay(40);
2042 err = tg3_readphy(tp, MII_BMSR, &val);
2043 err |= tg3_readphy(tp, MII_BMSR, &val);
2044 if (err != 0)
2045 return -EBUSY;
2047 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2048 netif_carrier_off(tp->dev);
2049 tg3_link_report(tp);
2052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2053 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2055 err = tg3_phy_reset_5703_4_5(tp);
2056 if (err)
2057 return err;
2058 goto out;
2061 cpmuctrl = 0;
2062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2063 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2064 cpmuctrl = tr32(TG3_CPMU_CTRL);
2065 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2066 tw32(TG3_CPMU_CTRL,
2067 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2070 err = tg3_bmcr_reset(tp);
2071 if (err)
2072 return err;
2074 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2075 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2076 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2078 tw32(TG3_CPMU_CTRL, cpmuctrl);
2081 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2082 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2083 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2084 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2085 CPMU_LSPD_1000MB_MACCLK_12_5) {
2086 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2087 udelay(40);
2088 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2092 if (tg3_flag(tp, 5717_PLUS) &&
2093 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2094 return 0;
2096 tg3_phy_apply_otp(tp);
2098 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2099 tg3_phy_toggle_apd(tp, true);
2100 else
2101 tg3_phy_toggle_apd(tp, false);
2103 out:
2104 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2105 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2106 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2107 tg3_phydsp_write(tp, 0x000a, 0x0323);
2108 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2111 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2112 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2116 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2117 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2118 tg3_phydsp_write(tp, 0x000a, 0x310b);
2119 tg3_phydsp_write(tp, 0x201f, 0x9506);
2120 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2121 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2123 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2124 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2126 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2127 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2128 tg3_writephy(tp, MII_TG3_TEST1,
2129 MII_TG3_TEST1_TRIM_EN | 0x4);
2130 } else
2131 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2133 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2137 /* Set Extended packet length bit (bit 14) on all chips that */
2138 /* support jumbo frames */
2139 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2140 /* Cannot do read-modify-write on 5401 */
2141 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2142 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2143 /* Set bit 14 with read-modify-write to preserve other bits */
2144 err = tg3_phy_auxctl_read(tp,
2145 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2146 if (!err)
2147 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2148 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2151 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2152 * jumbo frames transmission.
2154 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2155 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2156 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2157 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2161 /* adjust output voltage */
2162 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2165 tg3_phy_toggle_automdix(tp, 1);
2166 tg3_phy_set_wirespeed(tp);
2167 return 0;
2170 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2172 if (!tg3_flag(tp, IS_NIC))
2173 return 0;
2175 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2176 TG3_GRC_LCLCTL_PWRSW_DELAY);
2178 return 0;
2181 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2183 u32 grc_local_ctrl;
2185 if (!tg3_flag(tp, IS_NIC) ||
2186 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2187 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2188 return;
2190 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2192 tw32_wait_f(GRC_LOCAL_CTRL,
2193 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2194 TG3_GRC_LCLCTL_PWRSW_DELAY);
2196 tw32_wait_f(GRC_LOCAL_CTRL,
2197 grc_local_ctrl,
2198 TG3_GRC_LCLCTL_PWRSW_DELAY);
2200 tw32_wait_f(GRC_LOCAL_CTRL,
2201 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2202 TG3_GRC_LCLCTL_PWRSW_DELAY);
2205 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2207 if (!tg3_flag(tp, IS_NIC))
2208 return;
2210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2212 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2213 (GRC_LCLCTRL_GPIO_OE0 |
2214 GRC_LCLCTRL_GPIO_OE1 |
2215 GRC_LCLCTRL_GPIO_OE2 |
2216 GRC_LCLCTRL_GPIO_OUTPUT0 |
2217 GRC_LCLCTRL_GPIO_OUTPUT1),
2218 TG3_GRC_LCLCTL_PWRSW_DELAY);
2219 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2220 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2221 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2222 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2223 GRC_LCLCTRL_GPIO_OE1 |
2224 GRC_LCLCTRL_GPIO_OE2 |
2225 GRC_LCLCTRL_GPIO_OUTPUT0 |
2226 GRC_LCLCTRL_GPIO_OUTPUT1 |
2227 tp->grc_local_ctrl;
2228 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2229 TG3_GRC_LCLCTL_PWRSW_DELAY);
2231 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2232 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2233 TG3_GRC_LCLCTL_PWRSW_DELAY);
2235 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2236 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2237 TG3_GRC_LCLCTL_PWRSW_DELAY);
2238 } else {
2239 u32 no_gpio2;
2240 u32 grc_local_ctrl = 0;
2242 /* Workaround to prevent overdrawing Amps. */
2243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2244 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2245 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2246 grc_local_ctrl,
2247 TG3_GRC_LCLCTL_PWRSW_DELAY);
2250 /* On 5753 and variants, GPIO2 cannot be used. */
2251 no_gpio2 = tp->nic_sram_data_cfg &
2252 NIC_SRAM_DATA_CFG_NO_GPIO2;
2254 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2255 GRC_LCLCTRL_GPIO_OE1 |
2256 GRC_LCLCTRL_GPIO_OE2 |
2257 GRC_LCLCTRL_GPIO_OUTPUT1 |
2258 GRC_LCLCTRL_GPIO_OUTPUT2;
2259 if (no_gpio2) {
2260 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2261 GRC_LCLCTRL_GPIO_OUTPUT2);
2263 tw32_wait_f(GRC_LOCAL_CTRL,
2264 tp->grc_local_ctrl | grc_local_ctrl,
2265 TG3_GRC_LCLCTL_PWRSW_DELAY);
2267 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2269 tw32_wait_f(GRC_LOCAL_CTRL,
2270 tp->grc_local_ctrl | grc_local_ctrl,
2271 TG3_GRC_LCLCTL_PWRSW_DELAY);
2273 if (!no_gpio2) {
2274 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2275 tw32_wait_f(GRC_LOCAL_CTRL,
2276 tp->grc_local_ctrl | grc_local_ctrl,
2277 TG3_GRC_LCLCTL_PWRSW_DELAY);
2282 static void tg3_frob_aux_power(struct tg3 *tp)
2284 bool need_vaux = false;
2286 /* The GPIOs do something completely different on 57765. */
2287 if (!tg3_flag(tp, IS_NIC) ||
2288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2290 return;
2292 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2295 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2296 tp->pdev_peer != tp->pdev) {
2297 struct net_device *dev_peer;
2299 dev_peer = pci_get_drvdata(tp->pdev_peer);
2301 /* remove_one() may have been run on the peer. */
2302 if (dev_peer) {
2303 struct tg3 *tp_peer = netdev_priv(dev_peer);
2305 if (tg3_flag(tp_peer, INIT_COMPLETE))
2306 return;
2308 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2309 tg3_flag(tp_peer, ENABLE_ASF))
2310 need_vaux = true;
2314 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2315 need_vaux = true;
2317 if (need_vaux)
2318 tg3_pwrsrc_switch_to_vaux(tp);
2319 else
2320 tg3_pwrsrc_die_with_vmain(tp);
2323 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2325 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2326 return 1;
2327 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2328 if (speed != SPEED_10)
2329 return 1;
2330 } else if (speed == SPEED_10)
2331 return 1;
2333 return 0;
2336 static int tg3_setup_phy(struct tg3 *, int);
2338 #define RESET_KIND_SHUTDOWN 0
2339 #define RESET_KIND_INIT 1
2340 #define RESET_KIND_SUSPEND 2
2342 static void tg3_write_sig_post_reset(struct tg3 *, int);
2343 static int tg3_halt_cpu(struct tg3 *, u32);
2345 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2347 u32 val;
2349 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2351 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2352 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2354 sg_dig_ctrl |=
2355 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2356 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2357 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2359 return;
2362 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2363 tg3_bmcr_reset(tp);
2364 val = tr32(GRC_MISC_CFG);
2365 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2366 udelay(40);
2367 return;
2368 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2369 u32 phytest;
2370 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2371 u32 phy;
2373 tg3_writephy(tp, MII_ADVERTISE, 0);
2374 tg3_writephy(tp, MII_BMCR,
2375 BMCR_ANENABLE | BMCR_ANRESTART);
2377 tg3_writephy(tp, MII_TG3_FET_TEST,
2378 phytest | MII_TG3_FET_SHADOW_EN);
2379 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2380 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2381 tg3_writephy(tp,
2382 MII_TG3_FET_SHDW_AUXMODE4,
2383 phy);
2385 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2387 return;
2388 } else if (do_low_power) {
2389 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2390 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2392 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2393 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2394 MII_TG3_AUXCTL_PCTL_VREG_11V;
2395 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2398 /* The PHY should not be powered down on some chips because
2399 * of bugs.
2401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2402 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2403 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2404 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2405 return;
2407 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2408 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2409 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2410 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2411 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2412 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2415 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2418 /* tp->lock is held. */
2419 static int tg3_nvram_lock(struct tg3 *tp)
2421 if (tg3_flag(tp, NVRAM)) {
2422 int i;
2424 if (tp->nvram_lock_cnt == 0) {
2425 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2426 for (i = 0; i < 8000; i++) {
2427 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2428 break;
2429 udelay(20);
2431 if (i == 8000) {
2432 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2433 return -ENODEV;
2436 tp->nvram_lock_cnt++;
2438 return 0;
2441 /* tp->lock is held. */
2442 static void tg3_nvram_unlock(struct tg3 *tp)
2444 if (tg3_flag(tp, NVRAM)) {
2445 if (tp->nvram_lock_cnt > 0)
2446 tp->nvram_lock_cnt--;
2447 if (tp->nvram_lock_cnt == 0)
2448 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2452 /* tp->lock is held. */
2453 static void tg3_enable_nvram_access(struct tg3 *tp)
2455 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2456 u32 nvaccess = tr32(NVRAM_ACCESS);
2458 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2462 /* tp->lock is held. */
2463 static void tg3_disable_nvram_access(struct tg3 *tp)
2465 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2466 u32 nvaccess = tr32(NVRAM_ACCESS);
2468 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2472 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2473 u32 offset, u32 *val)
2475 u32 tmp;
2476 int i;
2478 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2479 return -EINVAL;
2481 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2482 EEPROM_ADDR_DEVID_MASK |
2483 EEPROM_ADDR_READ);
2484 tw32(GRC_EEPROM_ADDR,
2485 tmp |
2486 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2487 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2488 EEPROM_ADDR_ADDR_MASK) |
2489 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2491 for (i = 0; i < 1000; i++) {
2492 tmp = tr32(GRC_EEPROM_ADDR);
2494 if (tmp & EEPROM_ADDR_COMPLETE)
2495 break;
2496 msleep(1);
2498 if (!(tmp & EEPROM_ADDR_COMPLETE))
2499 return -EBUSY;
2501 tmp = tr32(GRC_EEPROM_DATA);
2504 * The data will always be opposite the native endian
2505 * format. Perform a blind byteswap to compensate.
2507 *val = swab32(tmp);
2509 return 0;
2512 #define NVRAM_CMD_TIMEOUT 10000
2514 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2516 int i;
2518 tw32(NVRAM_CMD, nvram_cmd);
2519 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2520 udelay(10);
2521 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2522 udelay(10);
2523 break;
2527 if (i == NVRAM_CMD_TIMEOUT)
2528 return -EBUSY;
2530 return 0;
2533 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2535 if (tg3_flag(tp, NVRAM) &&
2536 tg3_flag(tp, NVRAM_BUFFERED) &&
2537 tg3_flag(tp, FLASH) &&
2538 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2539 (tp->nvram_jedecnum == JEDEC_ATMEL))
2541 addr = ((addr / tp->nvram_pagesize) <<
2542 ATMEL_AT45DB0X1B_PAGE_POS) +
2543 (addr % tp->nvram_pagesize);
2545 return addr;
2548 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2550 if (tg3_flag(tp, NVRAM) &&
2551 tg3_flag(tp, NVRAM_BUFFERED) &&
2552 tg3_flag(tp, FLASH) &&
2553 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2554 (tp->nvram_jedecnum == JEDEC_ATMEL))
2556 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2557 tp->nvram_pagesize) +
2558 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2560 return addr;
2563 /* NOTE: Data read in from NVRAM is byteswapped according to
2564 * the byteswapping settings for all other register accesses.
2565 * tg3 devices are BE devices, so on a BE machine, the data
2566 * returned will be exactly as it is seen in NVRAM. On a LE
2567 * machine, the 32-bit value will be byteswapped.
2569 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2571 int ret;
2573 if (!tg3_flag(tp, NVRAM))
2574 return tg3_nvram_read_using_eeprom(tp, offset, val);
2576 offset = tg3_nvram_phys_addr(tp, offset);
2578 if (offset > NVRAM_ADDR_MSK)
2579 return -EINVAL;
2581 ret = tg3_nvram_lock(tp);
2582 if (ret)
2583 return ret;
2585 tg3_enable_nvram_access(tp);
2587 tw32(NVRAM_ADDR, offset);
2588 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2589 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2591 if (ret == 0)
2592 *val = tr32(NVRAM_RDDATA);
2594 tg3_disable_nvram_access(tp);
2596 tg3_nvram_unlock(tp);
2598 return ret;
2601 /* Ensures NVRAM data is in bytestream format. */
2602 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2604 u32 v;
2605 int res = tg3_nvram_read(tp, offset, &v);
2606 if (!res)
2607 *val = cpu_to_be32(v);
2608 return res;
2611 /* tp->lock is held. */
2612 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2614 u32 addr_high, addr_low;
2615 int i;
2617 addr_high = ((tp->dev->dev_addr[0] << 8) |
2618 tp->dev->dev_addr[1]);
2619 addr_low = ((tp->dev->dev_addr[2] << 24) |
2620 (tp->dev->dev_addr[3] << 16) |
2621 (tp->dev->dev_addr[4] << 8) |
2622 (tp->dev->dev_addr[5] << 0));
2623 for (i = 0; i < 4; i++) {
2624 if (i == 1 && skip_mac_1)
2625 continue;
2626 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2627 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2632 for (i = 0; i < 12; i++) {
2633 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2634 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2638 addr_high = (tp->dev->dev_addr[0] +
2639 tp->dev->dev_addr[1] +
2640 tp->dev->dev_addr[2] +
2641 tp->dev->dev_addr[3] +
2642 tp->dev->dev_addr[4] +
2643 tp->dev->dev_addr[5]) &
2644 TX_BACKOFF_SEED_MASK;
2645 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2648 static void tg3_enable_register_access(struct tg3 *tp)
2651 * Make sure register accesses (indirect or otherwise) will function
2652 * correctly.
2654 pci_write_config_dword(tp->pdev,
2655 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2658 static int tg3_power_up(struct tg3 *tp)
2660 tg3_enable_register_access(tp);
2662 pci_set_power_state(tp->pdev, PCI_D0);
2664 /* Switch out of Vaux if it is a NIC */
2665 tg3_pwrsrc_switch_to_vmain(tp);
2667 return 0;
2670 static int tg3_power_down_prepare(struct tg3 *tp)
2672 u32 misc_host_ctrl;
2673 bool device_should_wake, do_low_power;
2675 tg3_enable_register_access(tp);
2677 /* Restore the CLKREQ setting. */
2678 if (tg3_flag(tp, CLKREQ_BUG)) {
2679 u16 lnkctl;
2681 pci_read_config_word(tp->pdev,
2682 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2683 &lnkctl);
2684 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2685 pci_write_config_word(tp->pdev,
2686 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2687 lnkctl);
2690 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2691 tw32(TG3PCI_MISC_HOST_CTRL,
2692 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2694 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2695 tg3_flag(tp, WOL_ENABLE);
2697 if (tg3_flag(tp, USE_PHYLIB)) {
2698 do_low_power = false;
2699 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2700 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2701 struct phy_device *phydev;
2702 u32 phyid, advertising;
2704 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2706 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2708 tp->link_config.orig_speed = phydev->speed;
2709 tp->link_config.orig_duplex = phydev->duplex;
2710 tp->link_config.orig_autoneg = phydev->autoneg;
2711 tp->link_config.orig_advertising = phydev->advertising;
2713 advertising = ADVERTISED_TP |
2714 ADVERTISED_Pause |
2715 ADVERTISED_Autoneg |
2716 ADVERTISED_10baseT_Half;
2718 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2719 if (tg3_flag(tp, WOL_SPEED_100MB))
2720 advertising |=
2721 ADVERTISED_100baseT_Half |
2722 ADVERTISED_100baseT_Full |
2723 ADVERTISED_10baseT_Full;
2724 else
2725 advertising |= ADVERTISED_10baseT_Full;
2728 phydev->advertising = advertising;
2730 phy_start_aneg(phydev);
2732 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2733 if (phyid != PHY_ID_BCMAC131) {
2734 phyid &= PHY_BCM_OUI_MASK;
2735 if (phyid == PHY_BCM_OUI_1 ||
2736 phyid == PHY_BCM_OUI_2 ||
2737 phyid == PHY_BCM_OUI_3)
2738 do_low_power = true;
2741 } else {
2742 do_low_power = true;
2744 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2745 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2746 tp->link_config.orig_speed = tp->link_config.speed;
2747 tp->link_config.orig_duplex = tp->link_config.duplex;
2748 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2751 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2752 tp->link_config.speed = SPEED_10;
2753 tp->link_config.duplex = DUPLEX_HALF;
2754 tp->link_config.autoneg = AUTONEG_ENABLE;
2755 tg3_setup_phy(tp, 0);
2759 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2760 u32 val;
2762 val = tr32(GRC_VCPU_EXT_CTRL);
2763 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2764 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2765 int i;
2766 u32 val;
2768 for (i = 0; i < 200; i++) {
2769 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2770 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2771 break;
2772 msleep(1);
2775 if (tg3_flag(tp, WOL_CAP))
2776 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2777 WOL_DRV_STATE_SHUTDOWN |
2778 WOL_DRV_WOL |
2779 WOL_SET_MAGIC_PKT);
2781 if (device_should_wake) {
2782 u32 mac_mode;
2784 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2785 if (do_low_power &&
2786 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2787 tg3_phy_auxctl_write(tp,
2788 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2789 MII_TG3_AUXCTL_PCTL_WOL_EN |
2790 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2791 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2792 udelay(40);
2795 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2796 mac_mode = MAC_MODE_PORT_MODE_GMII;
2797 else
2798 mac_mode = MAC_MODE_PORT_MODE_MII;
2800 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2801 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2802 ASIC_REV_5700) {
2803 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2804 SPEED_100 : SPEED_10;
2805 if (tg3_5700_link_polarity(tp, speed))
2806 mac_mode |= MAC_MODE_LINK_POLARITY;
2807 else
2808 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2810 } else {
2811 mac_mode = MAC_MODE_PORT_MODE_TBI;
2814 if (!tg3_flag(tp, 5750_PLUS))
2815 tw32(MAC_LED_CTRL, tp->led_ctrl);
2817 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2818 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2819 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2820 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2822 if (tg3_flag(tp, ENABLE_APE))
2823 mac_mode |= MAC_MODE_APE_TX_EN |
2824 MAC_MODE_APE_RX_EN |
2825 MAC_MODE_TDE_ENABLE;
2827 tw32_f(MAC_MODE, mac_mode);
2828 udelay(100);
2830 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2831 udelay(10);
2834 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2835 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2836 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2837 u32 base_val;
2839 base_val = tp->pci_clock_ctrl;
2840 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2841 CLOCK_CTRL_TXCLK_DISABLE);
2843 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2844 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2845 } else if (tg3_flag(tp, 5780_CLASS) ||
2846 tg3_flag(tp, CPMU_PRESENT) ||
2847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2848 /* do nothing */
2849 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2850 u32 newbits1, newbits2;
2852 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2853 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2854 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2855 CLOCK_CTRL_TXCLK_DISABLE |
2856 CLOCK_CTRL_ALTCLK);
2857 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2858 } else if (tg3_flag(tp, 5705_PLUS)) {
2859 newbits1 = CLOCK_CTRL_625_CORE;
2860 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2861 } else {
2862 newbits1 = CLOCK_CTRL_ALTCLK;
2863 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2866 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2867 40);
2869 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2870 40);
2872 if (!tg3_flag(tp, 5705_PLUS)) {
2873 u32 newbits3;
2875 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2876 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2877 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2878 CLOCK_CTRL_TXCLK_DISABLE |
2879 CLOCK_CTRL_44MHZ_CORE);
2880 } else {
2881 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2884 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2885 tp->pci_clock_ctrl | newbits3, 40);
2889 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2890 tg3_power_down_phy(tp, do_low_power);
2892 tg3_frob_aux_power(tp);
2894 /* Workaround for unstable PLL clock */
2895 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2896 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2897 u32 val = tr32(0x7d00);
2899 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2900 tw32(0x7d00, val);
2901 if (!tg3_flag(tp, ENABLE_ASF)) {
2902 int err;
2904 err = tg3_nvram_lock(tp);
2905 tg3_halt_cpu(tp, RX_CPU_BASE);
2906 if (!err)
2907 tg3_nvram_unlock(tp);
2911 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2913 return 0;
2916 static void tg3_power_down(struct tg3 *tp)
2918 tg3_power_down_prepare(tp);
2920 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2921 pci_set_power_state(tp->pdev, PCI_D3hot);
2924 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2926 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2927 case MII_TG3_AUX_STAT_10HALF:
2928 *speed = SPEED_10;
2929 *duplex = DUPLEX_HALF;
2930 break;
2932 case MII_TG3_AUX_STAT_10FULL:
2933 *speed = SPEED_10;
2934 *duplex = DUPLEX_FULL;
2935 break;
2937 case MII_TG3_AUX_STAT_100HALF:
2938 *speed = SPEED_100;
2939 *duplex = DUPLEX_HALF;
2940 break;
2942 case MII_TG3_AUX_STAT_100FULL:
2943 *speed = SPEED_100;
2944 *duplex = DUPLEX_FULL;
2945 break;
2947 case MII_TG3_AUX_STAT_1000HALF:
2948 *speed = SPEED_1000;
2949 *duplex = DUPLEX_HALF;
2950 break;
2952 case MII_TG3_AUX_STAT_1000FULL:
2953 *speed = SPEED_1000;
2954 *duplex = DUPLEX_FULL;
2955 break;
2957 default:
2958 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2959 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2960 SPEED_10;
2961 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2962 DUPLEX_HALF;
2963 break;
2965 *speed = SPEED_INVALID;
2966 *duplex = DUPLEX_INVALID;
2967 break;
2971 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2973 int err = 0;
2974 u32 val, new_adv;
2976 new_adv = ADVERTISE_CSMA;
2977 if (advertise & ADVERTISED_10baseT_Half)
2978 new_adv |= ADVERTISE_10HALF;
2979 if (advertise & ADVERTISED_10baseT_Full)
2980 new_adv |= ADVERTISE_10FULL;
2981 if (advertise & ADVERTISED_100baseT_Half)
2982 new_adv |= ADVERTISE_100HALF;
2983 if (advertise & ADVERTISED_100baseT_Full)
2984 new_adv |= ADVERTISE_100FULL;
2986 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2988 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2989 if (err)
2990 goto done;
2992 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2993 goto done;
2995 new_adv = 0;
2996 if (advertise & ADVERTISED_1000baseT_Half)
2997 new_adv |= ADVERTISE_1000HALF;
2998 if (advertise & ADVERTISED_1000baseT_Full)
2999 new_adv |= ADVERTISE_1000FULL;
3001 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3002 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3003 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3005 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3006 if (err)
3007 goto done;
3009 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3010 goto done;
3012 tw32(TG3_CPMU_EEE_MODE,
3013 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3015 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3016 if (!err) {
3017 u32 err2;
3019 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3020 case ASIC_REV_5717:
3021 case ASIC_REV_57765:
3022 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3023 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3024 MII_TG3_DSP_CH34TP2_HIBW01);
3025 /* Fall through */
3026 case ASIC_REV_5719:
3027 val = MII_TG3_DSP_TAP26_ALNOKO |
3028 MII_TG3_DSP_TAP26_RMRXSTO |
3029 MII_TG3_DSP_TAP26_OPCSINPT;
3030 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3033 val = 0;
3034 /* Advertise 100-BaseTX EEE ability */
3035 if (advertise & ADVERTISED_100baseT_Full)
3036 val |= MDIO_AN_EEE_ADV_100TX;
3037 /* Advertise 1000-BaseT EEE ability */
3038 if (advertise & ADVERTISED_1000baseT_Full)
3039 val |= MDIO_AN_EEE_ADV_1000T;
3040 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3042 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3043 if (!err)
3044 err = err2;
3047 done:
3048 return err;
3051 static void tg3_phy_copper_begin(struct tg3 *tp)
3053 u32 new_adv;
3054 int i;
3056 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3057 new_adv = ADVERTISED_10baseT_Half |
3058 ADVERTISED_10baseT_Full;
3059 if (tg3_flag(tp, WOL_SPEED_100MB))
3060 new_adv |= ADVERTISED_100baseT_Half |
3061 ADVERTISED_100baseT_Full;
3063 tg3_phy_autoneg_cfg(tp, new_adv,
3064 FLOW_CTRL_TX | FLOW_CTRL_RX);
3065 } else if (tp->link_config.speed == SPEED_INVALID) {
3066 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3067 tp->link_config.advertising &=
3068 ~(ADVERTISED_1000baseT_Half |
3069 ADVERTISED_1000baseT_Full);
3071 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3072 tp->link_config.flowctrl);
3073 } else {
3074 /* Asking for a specific link mode. */
3075 if (tp->link_config.speed == SPEED_1000) {
3076 if (tp->link_config.duplex == DUPLEX_FULL)
3077 new_adv = ADVERTISED_1000baseT_Full;
3078 else
3079 new_adv = ADVERTISED_1000baseT_Half;
3080 } else if (tp->link_config.speed == SPEED_100) {
3081 if (tp->link_config.duplex == DUPLEX_FULL)
3082 new_adv = ADVERTISED_100baseT_Full;
3083 else
3084 new_adv = ADVERTISED_100baseT_Half;
3085 } else {
3086 if (tp->link_config.duplex == DUPLEX_FULL)
3087 new_adv = ADVERTISED_10baseT_Full;
3088 else
3089 new_adv = ADVERTISED_10baseT_Half;
3092 tg3_phy_autoneg_cfg(tp, new_adv,
3093 tp->link_config.flowctrl);
3096 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3097 tp->link_config.speed != SPEED_INVALID) {
3098 u32 bmcr, orig_bmcr;
3100 tp->link_config.active_speed = tp->link_config.speed;
3101 tp->link_config.active_duplex = tp->link_config.duplex;
3103 bmcr = 0;
3104 switch (tp->link_config.speed) {
3105 default:
3106 case SPEED_10:
3107 break;
3109 case SPEED_100:
3110 bmcr |= BMCR_SPEED100;
3111 break;
3113 case SPEED_1000:
3114 bmcr |= BMCR_SPEED1000;
3115 break;
3118 if (tp->link_config.duplex == DUPLEX_FULL)
3119 bmcr |= BMCR_FULLDPLX;
3121 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3122 (bmcr != orig_bmcr)) {
3123 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3124 for (i = 0; i < 1500; i++) {
3125 u32 tmp;
3127 udelay(10);
3128 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3129 tg3_readphy(tp, MII_BMSR, &tmp))
3130 continue;
3131 if (!(tmp & BMSR_LSTATUS)) {
3132 udelay(40);
3133 break;
3136 tg3_writephy(tp, MII_BMCR, bmcr);
3137 udelay(40);
3139 } else {
3140 tg3_writephy(tp, MII_BMCR,
3141 BMCR_ANENABLE | BMCR_ANRESTART);
3145 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3147 int err;
3149 /* Turn off tap power management. */
3150 /* Set Extended packet length bit */
3151 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3153 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3154 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3155 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3156 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3157 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3159 udelay(40);
3161 return err;
3164 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3166 u32 adv_reg, all_mask = 0;
3168 if (mask & ADVERTISED_10baseT_Half)
3169 all_mask |= ADVERTISE_10HALF;
3170 if (mask & ADVERTISED_10baseT_Full)
3171 all_mask |= ADVERTISE_10FULL;
3172 if (mask & ADVERTISED_100baseT_Half)
3173 all_mask |= ADVERTISE_100HALF;
3174 if (mask & ADVERTISED_100baseT_Full)
3175 all_mask |= ADVERTISE_100FULL;
3177 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3178 return 0;
3180 if ((adv_reg & all_mask) != all_mask)
3181 return 0;
3182 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3183 u32 tg3_ctrl;
3185 all_mask = 0;
3186 if (mask & ADVERTISED_1000baseT_Half)
3187 all_mask |= ADVERTISE_1000HALF;
3188 if (mask & ADVERTISED_1000baseT_Full)
3189 all_mask |= ADVERTISE_1000FULL;
3191 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3192 return 0;
3194 if ((tg3_ctrl & all_mask) != all_mask)
3195 return 0;
3197 return 1;
3200 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3202 u32 curadv, reqadv;
3204 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3205 return 1;
3207 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3208 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3210 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3211 if (curadv != reqadv)
3212 return 0;
3214 if (tg3_flag(tp, PAUSE_AUTONEG))
3215 tg3_readphy(tp, MII_LPA, rmtadv);
3216 } else {
3217 /* Reprogram the advertisement register, even if it
3218 * does not affect the current link. If the link
3219 * gets renegotiated in the future, we can save an
3220 * additional renegotiation cycle by advertising
3221 * it correctly in the first place.
3223 if (curadv != reqadv) {
3224 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3225 ADVERTISE_PAUSE_ASYM);
3226 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3230 return 1;
3233 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3235 int current_link_up;
3236 u32 bmsr, val;
3237 u32 lcl_adv, rmt_adv;
3238 u16 current_speed;
3239 u8 current_duplex;
3240 int i, err;
3242 tw32(MAC_EVENT, 0);
3244 tw32_f(MAC_STATUS,
3245 (MAC_STATUS_SYNC_CHANGED |
3246 MAC_STATUS_CFG_CHANGED |
3247 MAC_STATUS_MI_COMPLETION |
3248 MAC_STATUS_LNKSTATE_CHANGED));
3249 udelay(40);
3251 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3252 tw32_f(MAC_MI_MODE,
3253 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3254 udelay(80);
3257 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3259 /* Some third-party PHYs need to be reset on link going
3260 * down.
3262 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3263 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3265 netif_carrier_ok(tp->dev)) {
3266 tg3_readphy(tp, MII_BMSR, &bmsr);
3267 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3268 !(bmsr & BMSR_LSTATUS))
3269 force_reset = 1;
3271 if (force_reset)
3272 tg3_phy_reset(tp);
3274 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3275 tg3_readphy(tp, MII_BMSR, &bmsr);
3276 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3277 !tg3_flag(tp, INIT_COMPLETE))
3278 bmsr = 0;
3280 if (!(bmsr & BMSR_LSTATUS)) {
3281 err = tg3_init_5401phy_dsp(tp);
3282 if (err)
3283 return err;
3285 tg3_readphy(tp, MII_BMSR, &bmsr);
3286 for (i = 0; i < 1000; i++) {
3287 udelay(10);
3288 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3289 (bmsr & BMSR_LSTATUS)) {
3290 udelay(40);
3291 break;
3295 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3296 TG3_PHY_REV_BCM5401_B0 &&
3297 !(bmsr & BMSR_LSTATUS) &&
3298 tp->link_config.active_speed == SPEED_1000) {
3299 err = tg3_phy_reset(tp);
3300 if (!err)
3301 err = tg3_init_5401phy_dsp(tp);
3302 if (err)
3303 return err;
3306 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3307 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3308 /* 5701 {A0,B0} CRC bug workaround */
3309 tg3_writephy(tp, 0x15, 0x0a75);
3310 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3311 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3312 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3315 /* Clear pending interrupts... */
3316 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3317 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3319 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3320 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3321 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3322 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3325 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3326 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3327 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3328 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3329 else
3330 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3333 current_link_up = 0;
3334 current_speed = SPEED_INVALID;
3335 current_duplex = DUPLEX_INVALID;
3337 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3338 err = tg3_phy_auxctl_read(tp,
3339 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3340 &val);
3341 if (!err && !(val & (1 << 10))) {
3342 tg3_phy_auxctl_write(tp,
3343 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3344 val | (1 << 10));
3345 goto relink;
3349 bmsr = 0;
3350 for (i = 0; i < 100; i++) {
3351 tg3_readphy(tp, MII_BMSR, &bmsr);
3352 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3353 (bmsr & BMSR_LSTATUS))
3354 break;
3355 udelay(40);
3358 if (bmsr & BMSR_LSTATUS) {
3359 u32 aux_stat, bmcr;
3361 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3362 for (i = 0; i < 2000; i++) {
3363 udelay(10);
3364 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3365 aux_stat)
3366 break;
3369 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3370 &current_speed,
3371 &current_duplex);
3373 bmcr = 0;
3374 for (i = 0; i < 200; i++) {
3375 tg3_readphy(tp, MII_BMCR, &bmcr);
3376 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3377 continue;
3378 if (bmcr && bmcr != 0x7fff)
3379 break;
3380 udelay(10);
3383 lcl_adv = 0;
3384 rmt_adv = 0;
3386 tp->link_config.active_speed = current_speed;
3387 tp->link_config.active_duplex = current_duplex;
3389 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3390 if ((bmcr & BMCR_ANENABLE) &&
3391 tg3_copper_is_advertising_all(tp,
3392 tp->link_config.advertising)) {
3393 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3394 &rmt_adv))
3395 current_link_up = 1;
3397 } else {
3398 if (!(bmcr & BMCR_ANENABLE) &&
3399 tp->link_config.speed == current_speed &&
3400 tp->link_config.duplex == current_duplex &&
3401 tp->link_config.flowctrl ==
3402 tp->link_config.active_flowctrl) {
3403 current_link_up = 1;
3407 if (current_link_up == 1 &&
3408 tp->link_config.active_duplex == DUPLEX_FULL)
3409 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3412 relink:
3413 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3414 tg3_phy_copper_begin(tp);
3416 tg3_readphy(tp, MII_BMSR, &bmsr);
3417 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3418 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3419 current_link_up = 1;
3422 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3423 if (current_link_up == 1) {
3424 if (tp->link_config.active_speed == SPEED_100 ||
3425 tp->link_config.active_speed == SPEED_10)
3426 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3427 else
3428 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3429 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3430 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3431 else
3432 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3434 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3435 if (tp->link_config.active_duplex == DUPLEX_HALF)
3436 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3439 if (current_link_up == 1 &&
3440 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3441 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3442 else
3443 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3446 /* ??? Without this setting Netgear GA302T PHY does not
3447 * ??? send/receive packets...
3449 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3450 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3451 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3452 tw32_f(MAC_MI_MODE, tp->mi_mode);
3453 udelay(80);
3456 tw32_f(MAC_MODE, tp->mac_mode);
3457 udelay(40);
3459 tg3_phy_eee_adjust(tp, current_link_up);
3461 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3462 /* Polled via timer. */
3463 tw32_f(MAC_EVENT, 0);
3464 } else {
3465 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3467 udelay(40);
3469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3470 current_link_up == 1 &&
3471 tp->link_config.active_speed == SPEED_1000 &&
3472 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3473 udelay(120);
3474 tw32_f(MAC_STATUS,
3475 (MAC_STATUS_SYNC_CHANGED |
3476 MAC_STATUS_CFG_CHANGED));
3477 udelay(40);
3478 tg3_write_mem(tp,
3479 NIC_SRAM_FIRMWARE_MBOX,
3480 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3483 /* Prevent send BD corruption. */
3484 if (tg3_flag(tp, CLKREQ_BUG)) {
3485 u16 oldlnkctl, newlnkctl;
3487 pci_read_config_word(tp->pdev,
3488 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3489 &oldlnkctl);
3490 if (tp->link_config.active_speed == SPEED_100 ||
3491 tp->link_config.active_speed == SPEED_10)
3492 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3493 else
3494 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3495 if (newlnkctl != oldlnkctl)
3496 pci_write_config_word(tp->pdev,
3497 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3498 newlnkctl);
3501 if (current_link_up != netif_carrier_ok(tp->dev)) {
3502 if (current_link_up)
3503 netif_carrier_on(tp->dev);
3504 else
3505 netif_carrier_off(tp->dev);
3506 tg3_link_report(tp);
3509 return 0;
3512 struct tg3_fiber_aneginfo {
3513 int state;
3514 #define ANEG_STATE_UNKNOWN 0
3515 #define ANEG_STATE_AN_ENABLE 1
3516 #define ANEG_STATE_RESTART_INIT 2
3517 #define ANEG_STATE_RESTART 3
3518 #define ANEG_STATE_DISABLE_LINK_OK 4
3519 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3520 #define ANEG_STATE_ABILITY_DETECT 6
3521 #define ANEG_STATE_ACK_DETECT_INIT 7
3522 #define ANEG_STATE_ACK_DETECT 8
3523 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3524 #define ANEG_STATE_COMPLETE_ACK 10
3525 #define ANEG_STATE_IDLE_DETECT_INIT 11
3526 #define ANEG_STATE_IDLE_DETECT 12
3527 #define ANEG_STATE_LINK_OK 13
3528 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3529 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3531 u32 flags;
3532 #define MR_AN_ENABLE 0x00000001
3533 #define MR_RESTART_AN 0x00000002
3534 #define MR_AN_COMPLETE 0x00000004
3535 #define MR_PAGE_RX 0x00000008
3536 #define MR_NP_LOADED 0x00000010
3537 #define MR_TOGGLE_TX 0x00000020
3538 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3539 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3540 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3541 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3542 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3543 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3544 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3545 #define MR_TOGGLE_RX 0x00002000
3546 #define MR_NP_RX 0x00004000
3548 #define MR_LINK_OK 0x80000000
3550 unsigned long link_time, cur_time;
3552 u32 ability_match_cfg;
3553 int ability_match_count;
3555 char ability_match, idle_match, ack_match;
3557 u32 txconfig, rxconfig;
3558 #define ANEG_CFG_NP 0x00000080
3559 #define ANEG_CFG_ACK 0x00000040
3560 #define ANEG_CFG_RF2 0x00000020
3561 #define ANEG_CFG_RF1 0x00000010
3562 #define ANEG_CFG_PS2 0x00000001
3563 #define ANEG_CFG_PS1 0x00008000
3564 #define ANEG_CFG_HD 0x00004000
3565 #define ANEG_CFG_FD 0x00002000
3566 #define ANEG_CFG_INVAL 0x00001f06
3569 #define ANEG_OK 0
3570 #define ANEG_DONE 1
3571 #define ANEG_TIMER_ENAB 2
3572 #define ANEG_FAILED -1
3574 #define ANEG_STATE_SETTLE_TIME 10000
3576 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3577 struct tg3_fiber_aneginfo *ap)
3579 u16 flowctrl;
3580 unsigned long delta;
3581 u32 rx_cfg_reg;
3582 int ret;
3584 if (ap->state == ANEG_STATE_UNKNOWN) {
3585 ap->rxconfig = 0;
3586 ap->link_time = 0;
3587 ap->cur_time = 0;
3588 ap->ability_match_cfg = 0;
3589 ap->ability_match_count = 0;
3590 ap->ability_match = 0;
3591 ap->idle_match = 0;
3592 ap->ack_match = 0;
3594 ap->cur_time++;
3596 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3597 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3599 if (rx_cfg_reg != ap->ability_match_cfg) {
3600 ap->ability_match_cfg = rx_cfg_reg;
3601 ap->ability_match = 0;
3602 ap->ability_match_count = 0;
3603 } else {
3604 if (++ap->ability_match_count > 1) {
3605 ap->ability_match = 1;
3606 ap->ability_match_cfg = rx_cfg_reg;
3609 if (rx_cfg_reg & ANEG_CFG_ACK)
3610 ap->ack_match = 1;
3611 else
3612 ap->ack_match = 0;
3614 ap->idle_match = 0;
3615 } else {
3616 ap->idle_match = 1;
3617 ap->ability_match_cfg = 0;
3618 ap->ability_match_count = 0;
3619 ap->ability_match = 0;
3620 ap->ack_match = 0;
3622 rx_cfg_reg = 0;
3625 ap->rxconfig = rx_cfg_reg;
3626 ret = ANEG_OK;
3628 switch (ap->state) {
3629 case ANEG_STATE_UNKNOWN:
3630 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3631 ap->state = ANEG_STATE_AN_ENABLE;
3633 /* fallthru */
3634 case ANEG_STATE_AN_ENABLE:
3635 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3636 if (ap->flags & MR_AN_ENABLE) {
3637 ap->link_time = 0;
3638 ap->cur_time = 0;
3639 ap->ability_match_cfg = 0;
3640 ap->ability_match_count = 0;
3641 ap->ability_match = 0;
3642 ap->idle_match = 0;
3643 ap->ack_match = 0;
3645 ap->state = ANEG_STATE_RESTART_INIT;
3646 } else {
3647 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3649 break;
3651 case ANEG_STATE_RESTART_INIT:
3652 ap->link_time = ap->cur_time;
3653 ap->flags &= ~(MR_NP_LOADED);
3654 ap->txconfig = 0;
3655 tw32(MAC_TX_AUTO_NEG, 0);
3656 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3657 tw32_f(MAC_MODE, tp->mac_mode);
3658 udelay(40);
3660 ret = ANEG_TIMER_ENAB;
3661 ap->state = ANEG_STATE_RESTART;
3663 /* fallthru */
3664 case ANEG_STATE_RESTART:
3665 delta = ap->cur_time - ap->link_time;
3666 if (delta > ANEG_STATE_SETTLE_TIME)
3667 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3668 else
3669 ret = ANEG_TIMER_ENAB;
3670 break;
3672 case ANEG_STATE_DISABLE_LINK_OK:
3673 ret = ANEG_DONE;
3674 break;
3676 case ANEG_STATE_ABILITY_DETECT_INIT:
3677 ap->flags &= ~(MR_TOGGLE_TX);
3678 ap->txconfig = ANEG_CFG_FD;
3679 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3680 if (flowctrl & ADVERTISE_1000XPAUSE)
3681 ap->txconfig |= ANEG_CFG_PS1;
3682 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3683 ap->txconfig |= ANEG_CFG_PS2;
3684 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3685 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3686 tw32_f(MAC_MODE, tp->mac_mode);
3687 udelay(40);
3689 ap->state = ANEG_STATE_ABILITY_DETECT;
3690 break;
3692 case ANEG_STATE_ABILITY_DETECT:
3693 if (ap->ability_match != 0 && ap->rxconfig != 0)
3694 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3695 break;
3697 case ANEG_STATE_ACK_DETECT_INIT:
3698 ap->txconfig |= ANEG_CFG_ACK;
3699 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3700 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3701 tw32_f(MAC_MODE, tp->mac_mode);
3702 udelay(40);
3704 ap->state = ANEG_STATE_ACK_DETECT;
3706 /* fallthru */
3707 case ANEG_STATE_ACK_DETECT:
3708 if (ap->ack_match != 0) {
3709 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3710 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3711 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3712 } else {
3713 ap->state = ANEG_STATE_AN_ENABLE;
3715 } else if (ap->ability_match != 0 &&
3716 ap->rxconfig == 0) {
3717 ap->state = ANEG_STATE_AN_ENABLE;
3719 break;
3721 case ANEG_STATE_COMPLETE_ACK_INIT:
3722 if (ap->rxconfig & ANEG_CFG_INVAL) {
3723 ret = ANEG_FAILED;
3724 break;
3726 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3727 MR_LP_ADV_HALF_DUPLEX |
3728 MR_LP_ADV_SYM_PAUSE |
3729 MR_LP_ADV_ASYM_PAUSE |
3730 MR_LP_ADV_REMOTE_FAULT1 |
3731 MR_LP_ADV_REMOTE_FAULT2 |
3732 MR_LP_ADV_NEXT_PAGE |
3733 MR_TOGGLE_RX |
3734 MR_NP_RX);
3735 if (ap->rxconfig & ANEG_CFG_FD)
3736 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3737 if (ap->rxconfig & ANEG_CFG_HD)
3738 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3739 if (ap->rxconfig & ANEG_CFG_PS1)
3740 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3741 if (ap->rxconfig & ANEG_CFG_PS2)
3742 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3743 if (ap->rxconfig & ANEG_CFG_RF1)
3744 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3745 if (ap->rxconfig & ANEG_CFG_RF2)
3746 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3747 if (ap->rxconfig & ANEG_CFG_NP)
3748 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3750 ap->link_time = ap->cur_time;
3752 ap->flags ^= (MR_TOGGLE_TX);
3753 if (ap->rxconfig & 0x0008)
3754 ap->flags |= MR_TOGGLE_RX;
3755 if (ap->rxconfig & ANEG_CFG_NP)
3756 ap->flags |= MR_NP_RX;
3757 ap->flags |= MR_PAGE_RX;
3759 ap->state = ANEG_STATE_COMPLETE_ACK;
3760 ret = ANEG_TIMER_ENAB;
3761 break;
3763 case ANEG_STATE_COMPLETE_ACK:
3764 if (ap->ability_match != 0 &&
3765 ap->rxconfig == 0) {
3766 ap->state = ANEG_STATE_AN_ENABLE;
3767 break;
3769 delta = ap->cur_time - ap->link_time;
3770 if (delta > ANEG_STATE_SETTLE_TIME) {
3771 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3772 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3773 } else {
3774 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3775 !(ap->flags & MR_NP_RX)) {
3776 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3777 } else {
3778 ret = ANEG_FAILED;
3782 break;
3784 case ANEG_STATE_IDLE_DETECT_INIT:
3785 ap->link_time = ap->cur_time;
3786 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3787 tw32_f(MAC_MODE, tp->mac_mode);
3788 udelay(40);
3790 ap->state = ANEG_STATE_IDLE_DETECT;
3791 ret = ANEG_TIMER_ENAB;
3792 break;
3794 case ANEG_STATE_IDLE_DETECT:
3795 if (ap->ability_match != 0 &&
3796 ap->rxconfig == 0) {
3797 ap->state = ANEG_STATE_AN_ENABLE;
3798 break;
3800 delta = ap->cur_time - ap->link_time;
3801 if (delta > ANEG_STATE_SETTLE_TIME) {
3802 /* XXX another gem from the Broadcom driver :( */
3803 ap->state = ANEG_STATE_LINK_OK;
3805 break;
3807 case ANEG_STATE_LINK_OK:
3808 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3809 ret = ANEG_DONE;
3810 break;
3812 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3813 /* ??? unimplemented */
3814 break;
3816 case ANEG_STATE_NEXT_PAGE_WAIT:
3817 /* ??? unimplemented */
3818 break;
3820 default:
3821 ret = ANEG_FAILED;
3822 break;
3825 return ret;
3828 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3830 int res = 0;
3831 struct tg3_fiber_aneginfo aninfo;
3832 int status = ANEG_FAILED;
3833 unsigned int tick;
3834 u32 tmp;
3836 tw32_f(MAC_TX_AUTO_NEG, 0);
3838 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3839 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3840 udelay(40);
3842 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3843 udelay(40);
3845 memset(&aninfo, 0, sizeof(aninfo));
3846 aninfo.flags |= MR_AN_ENABLE;
3847 aninfo.state = ANEG_STATE_UNKNOWN;
3848 aninfo.cur_time = 0;
3849 tick = 0;
3850 while (++tick < 195000) {
3851 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3852 if (status == ANEG_DONE || status == ANEG_FAILED)
3853 break;
3855 udelay(1);
3858 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3859 tw32_f(MAC_MODE, tp->mac_mode);
3860 udelay(40);
3862 *txflags = aninfo.txconfig;
3863 *rxflags = aninfo.flags;
3865 if (status == ANEG_DONE &&
3866 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3867 MR_LP_ADV_FULL_DUPLEX)))
3868 res = 1;
3870 return res;
3873 static void tg3_init_bcm8002(struct tg3 *tp)
3875 u32 mac_status = tr32(MAC_STATUS);
3876 int i;
3878 /* Reset when initting first time or we have a link. */
3879 if (tg3_flag(tp, INIT_COMPLETE) &&
3880 !(mac_status & MAC_STATUS_PCS_SYNCED))
3881 return;
3883 /* Set PLL lock range. */
3884 tg3_writephy(tp, 0x16, 0x8007);
3886 /* SW reset */
3887 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3889 /* Wait for reset to complete. */
3890 /* XXX schedule_timeout() ... */
3891 for (i = 0; i < 500; i++)
3892 udelay(10);
3894 /* Config mode; select PMA/Ch 1 regs. */
3895 tg3_writephy(tp, 0x10, 0x8411);
3897 /* Enable auto-lock and comdet, select txclk for tx. */
3898 tg3_writephy(tp, 0x11, 0x0a10);
3900 tg3_writephy(tp, 0x18, 0x00a0);
3901 tg3_writephy(tp, 0x16, 0x41ff);
3903 /* Assert and deassert POR. */
3904 tg3_writephy(tp, 0x13, 0x0400);
3905 udelay(40);
3906 tg3_writephy(tp, 0x13, 0x0000);
3908 tg3_writephy(tp, 0x11, 0x0a50);
3909 udelay(40);
3910 tg3_writephy(tp, 0x11, 0x0a10);
3912 /* Wait for signal to stabilize */
3913 /* XXX schedule_timeout() ... */
3914 for (i = 0; i < 15000; i++)
3915 udelay(10);
3917 /* Deselect the channel register so we can read the PHYID
3918 * later.
3920 tg3_writephy(tp, 0x10, 0x8011);
3923 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3925 u16 flowctrl;
3926 u32 sg_dig_ctrl, sg_dig_status;
3927 u32 serdes_cfg, expected_sg_dig_ctrl;
3928 int workaround, port_a;
3929 int current_link_up;
3931 serdes_cfg = 0;
3932 expected_sg_dig_ctrl = 0;
3933 workaround = 0;
3934 port_a = 1;
3935 current_link_up = 0;
3937 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3938 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3939 workaround = 1;
3940 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3941 port_a = 0;
3943 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3944 /* preserve bits 20-23 for voltage regulator */
3945 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3948 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3950 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3951 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3952 if (workaround) {
3953 u32 val = serdes_cfg;
3955 if (port_a)
3956 val |= 0xc010000;
3957 else
3958 val |= 0x4010000;
3959 tw32_f(MAC_SERDES_CFG, val);
3962 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3964 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3965 tg3_setup_flow_control(tp, 0, 0);
3966 current_link_up = 1;
3968 goto out;
3971 /* Want auto-negotiation. */
3972 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3974 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3975 if (flowctrl & ADVERTISE_1000XPAUSE)
3976 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3977 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3978 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3980 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3981 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3982 tp->serdes_counter &&
3983 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3984 MAC_STATUS_RCVD_CFG)) ==
3985 MAC_STATUS_PCS_SYNCED)) {
3986 tp->serdes_counter--;
3987 current_link_up = 1;
3988 goto out;
3990 restart_autoneg:
3991 if (workaround)
3992 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3993 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3994 udelay(5);
3995 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3997 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3998 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3999 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4000 MAC_STATUS_SIGNAL_DET)) {
4001 sg_dig_status = tr32(SG_DIG_STATUS);
4002 mac_status = tr32(MAC_STATUS);
4004 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4005 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4006 u32 local_adv = 0, remote_adv = 0;
4008 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4009 local_adv |= ADVERTISE_1000XPAUSE;
4010 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4011 local_adv |= ADVERTISE_1000XPSE_ASYM;
4013 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4014 remote_adv |= LPA_1000XPAUSE;
4015 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4016 remote_adv |= LPA_1000XPAUSE_ASYM;
4018 tg3_setup_flow_control(tp, local_adv, remote_adv);
4019 current_link_up = 1;
4020 tp->serdes_counter = 0;
4021 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4022 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4023 if (tp->serdes_counter)
4024 tp->serdes_counter--;
4025 else {
4026 if (workaround) {
4027 u32 val = serdes_cfg;
4029 if (port_a)
4030 val |= 0xc010000;
4031 else
4032 val |= 0x4010000;
4034 tw32_f(MAC_SERDES_CFG, val);
4037 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4038 udelay(40);
4040 /* Link parallel detection - link is up */
4041 /* only if we have PCS_SYNC and not */
4042 /* receiving config code words */
4043 mac_status = tr32(MAC_STATUS);
4044 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4045 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4046 tg3_setup_flow_control(tp, 0, 0);
4047 current_link_up = 1;
4048 tp->phy_flags |=
4049 TG3_PHYFLG_PARALLEL_DETECT;
4050 tp->serdes_counter =
4051 SERDES_PARALLEL_DET_TIMEOUT;
4052 } else
4053 goto restart_autoneg;
4056 } else {
4057 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4058 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4061 out:
4062 return current_link_up;
4065 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4067 int current_link_up = 0;
4069 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4070 goto out;
4072 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4073 u32 txflags, rxflags;
4074 int i;
4076 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4077 u32 local_adv = 0, remote_adv = 0;
4079 if (txflags & ANEG_CFG_PS1)
4080 local_adv |= ADVERTISE_1000XPAUSE;
4081 if (txflags & ANEG_CFG_PS2)
4082 local_adv |= ADVERTISE_1000XPSE_ASYM;
4084 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4085 remote_adv |= LPA_1000XPAUSE;
4086 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4087 remote_adv |= LPA_1000XPAUSE_ASYM;
4089 tg3_setup_flow_control(tp, local_adv, remote_adv);
4091 current_link_up = 1;
4093 for (i = 0; i < 30; i++) {
4094 udelay(20);
4095 tw32_f(MAC_STATUS,
4096 (MAC_STATUS_SYNC_CHANGED |
4097 MAC_STATUS_CFG_CHANGED));
4098 udelay(40);
4099 if ((tr32(MAC_STATUS) &
4100 (MAC_STATUS_SYNC_CHANGED |
4101 MAC_STATUS_CFG_CHANGED)) == 0)
4102 break;
4105 mac_status = tr32(MAC_STATUS);
4106 if (current_link_up == 0 &&
4107 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4108 !(mac_status & MAC_STATUS_RCVD_CFG))
4109 current_link_up = 1;
4110 } else {
4111 tg3_setup_flow_control(tp, 0, 0);
4113 /* Forcing 1000FD link up. */
4114 current_link_up = 1;
4116 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4117 udelay(40);
4119 tw32_f(MAC_MODE, tp->mac_mode);
4120 udelay(40);
4123 out:
4124 return current_link_up;
4127 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4129 u32 orig_pause_cfg;
4130 u16 orig_active_speed;
4131 u8 orig_active_duplex;
4132 u32 mac_status;
4133 int current_link_up;
4134 int i;
4136 orig_pause_cfg = tp->link_config.active_flowctrl;
4137 orig_active_speed = tp->link_config.active_speed;
4138 orig_active_duplex = tp->link_config.active_duplex;
4140 if (!tg3_flag(tp, HW_AUTONEG) &&
4141 netif_carrier_ok(tp->dev) &&
4142 tg3_flag(tp, INIT_COMPLETE)) {
4143 mac_status = tr32(MAC_STATUS);
4144 mac_status &= (MAC_STATUS_PCS_SYNCED |
4145 MAC_STATUS_SIGNAL_DET |
4146 MAC_STATUS_CFG_CHANGED |
4147 MAC_STATUS_RCVD_CFG);
4148 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4149 MAC_STATUS_SIGNAL_DET)) {
4150 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4151 MAC_STATUS_CFG_CHANGED));
4152 return 0;
4156 tw32_f(MAC_TX_AUTO_NEG, 0);
4158 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4159 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4160 tw32_f(MAC_MODE, tp->mac_mode);
4161 udelay(40);
4163 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4164 tg3_init_bcm8002(tp);
4166 /* Enable link change event even when serdes polling. */
4167 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4168 udelay(40);
4170 current_link_up = 0;
4171 mac_status = tr32(MAC_STATUS);
4173 if (tg3_flag(tp, HW_AUTONEG))
4174 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4175 else
4176 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4178 tp->napi[0].hw_status->status =
4179 (SD_STATUS_UPDATED |
4180 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4182 for (i = 0; i < 100; i++) {
4183 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4184 MAC_STATUS_CFG_CHANGED));
4185 udelay(5);
4186 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4187 MAC_STATUS_CFG_CHANGED |
4188 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4189 break;
4192 mac_status = tr32(MAC_STATUS);
4193 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4194 current_link_up = 0;
4195 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4196 tp->serdes_counter == 0) {
4197 tw32_f(MAC_MODE, (tp->mac_mode |
4198 MAC_MODE_SEND_CONFIGS));
4199 udelay(1);
4200 tw32_f(MAC_MODE, tp->mac_mode);
4204 if (current_link_up == 1) {
4205 tp->link_config.active_speed = SPEED_1000;
4206 tp->link_config.active_duplex = DUPLEX_FULL;
4207 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4208 LED_CTRL_LNKLED_OVERRIDE |
4209 LED_CTRL_1000MBPS_ON));
4210 } else {
4211 tp->link_config.active_speed = SPEED_INVALID;
4212 tp->link_config.active_duplex = DUPLEX_INVALID;
4213 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4214 LED_CTRL_LNKLED_OVERRIDE |
4215 LED_CTRL_TRAFFIC_OVERRIDE));
4218 if (current_link_up != netif_carrier_ok(tp->dev)) {
4219 if (current_link_up)
4220 netif_carrier_on(tp->dev);
4221 else
4222 netif_carrier_off(tp->dev);
4223 tg3_link_report(tp);
4224 } else {
4225 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4226 if (orig_pause_cfg != now_pause_cfg ||
4227 orig_active_speed != tp->link_config.active_speed ||
4228 orig_active_duplex != tp->link_config.active_duplex)
4229 tg3_link_report(tp);
4232 return 0;
4235 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4237 int current_link_up, err = 0;
4238 u32 bmsr, bmcr;
4239 u16 current_speed;
4240 u8 current_duplex;
4241 u32 local_adv, remote_adv;
4243 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4244 tw32_f(MAC_MODE, tp->mac_mode);
4245 udelay(40);
4247 tw32(MAC_EVENT, 0);
4249 tw32_f(MAC_STATUS,
4250 (MAC_STATUS_SYNC_CHANGED |
4251 MAC_STATUS_CFG_CHANGED |
4252 MAC_STATUS_MI_COMPLETION |
4253 MAC_STATUS_LNKSTATE_CHANGED));
4254 udelay(40);
4256 if (force_reset)
4257 tg3_phy_reset(tp);
4259 current_link_up = 0;
4260 current_speed = SPEED_INVALID;
4261 current_duplex = DUPLEX_INVALID;
4263 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4264 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4266 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4267 bmsr |= BMSR_LSTATUS;
4268 else
4269 bmsr &= ~BMSR_LSTATUS;
4272 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4274 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4275 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4276 /* do nothing, just check for link up at the end */
4277 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4278 u32 adv, new_adv;
4280 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4281 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4282 ADVERTISE_1000XPAUSE |
4283 ADVERTISE_1000XPSE_ASYM |
4284 ADVERTISE_SLCT);
4286 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4288 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4289 new_adv |= ADVERTISE_1000XHALF;
4290 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4291 new_adv |= ADVERTISE_1000XFULL;
4293 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4294 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4295 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4296 tg3_writephy(tp, MII_BMCR, bmcr);
4298 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4299 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4300 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4302 return err;
4304 } else {
4305 u32 new_bmcr;
4307 bmcr &= ~BMCR_SPEED1000;
4308 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4310 if (tp->link_config.duplex == DUPLEX_FULL)
4311 new_bmcr |= BMCR_FULLDPLX;
4313 if (new_bmcr != bmcr) {
4314 /* BMCR_SPEED1000 is a reserved bit that needs
4315 * to be set on write.
4317 new_bmcr |= BMCR_SPEED1000;
4319 /* Force a linkdown */
4320 if (netif_carrier_ok(tp->dev)) {
4321 u32 adv;
4323 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4324 adv &= ~(ADVERTISE_1000XFULL |
4325 ADVERTISE_1000XHALF |
4326 ADVERTISE_SLCT);
4327 tg3_writephy(tp, MII_ADVERTISE, adv);
4328 tg3_writephy(tp, MII_BMCR, bmcr |
4329 BMCR_ANRESTART |
4330 BMCR_ANENABLE);
4331 udelay(10);
4332 netif_carrier_off(tp->dev);
4334 tg3_writephy(tp, MII_BMCR, new_bmcr);
4335 bmcr = new_bmcr;
4336 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4337 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4338 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4339 ASIC_REV_5714) {
4340 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4341 bmsr |= BMSR_LSTATUS;
4342 else
4343 bmsr &= ~BMSR_LSTATUS;
4345 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4349 if (bmsr & BMSR_LSTATUS) {
4350 current_speed = SPEED_1000;
4351 current_link_up = 1;
4352 if (bmcr & BMCR_FULLDPLX)
4353 current_duplex = DUPLEX_FULL;
4354 else
4355 current_duplex = DUPLEX_HALF;
4357 local_adv = 0;
4358 remote_adv = 0;
4360 if (bmcr & BMCR_ANENABLE) {
4361 u32 common;
4363 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4364 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4365 common = local_adv & remote_adv;
4366 if (common & (ADVERTISE_1000XHALF |
4367 ADVERTISE_1000XFULL)) {
4368 if (common & ADVERTISE_1000XFULL)
4369 current_duplex = DUPLEX_FULL;
4370 else
4371 current_duplex = DUPLEX_HALF;
4372 } else if (!tg3_flag(tp, 5780_CLASS)) {
4373 /* Link is up via parallel detect */
4374 } else {
4375 current_link_up = 0;
4380 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4381 tg3_setup_flow_control(tp, local_adv, remote_adv);
4383 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4384 if (tp->link_config.active_duplex == DUPLEX_HALF)
4385 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4387 tw32_f(MAC_MODE, tp->mac_mode);
4388 udelay(40);
4390 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4392 tp->link_config.active_speed = current_speed;
4393 tp->link_config.active_duplex = current_duplex;
4395 if (current_link_up != netif_carrier_ok(tp->dev)) {
4396 if (current_link_up)
4397 netif_carrier_on(tp->dev);
4398 else {
4399 netif_carrier_off(tp->dev);
4400 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4402 tg3_link_report(tp);
4404 return err;
4407 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4409 if (tp->serdes_counter) {
4410 /* Give autoneg time to complete. */
4411 tp->serdes_counter--;
4412 return;
4415 if (!netif_carrier_ok(tp->dev) &&
4416 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4417 u32 bmcr;
4419 tg3_readphy(tp, MII_BMCR, &bmcr);
4420 if (bmcr & BMCR_ANENABLE) {
4421 u32 phy1, phy2;
4423 /* Select shadow register 0x1f */
4424 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4425 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4427 /* Select expansion interrupt status register */
4428 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4429 MII_TG3_DSP_EXP1_INT_STAT);
4430 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4431 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4433 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4434 /* We have signal detect and not receiving
4435 * config code words, link is up by parallel
4436 * detection.
4439 bmcr &= ~BMCR_ANENABLE;
4440 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4441 tg3_writephy(tp, MII_BMCR, bmcr);
4442 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4445 } else if (netif_carrier_ok(tp->dev) &&
4446 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4447 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4448 u32 phy2;
4450 /* Select expansion interrupt status register */
4451 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4452 MII_TG3_DSP_EXP1_INT_STAT);
4453 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4454 if (phy2 & 0x20) {
4455 u32 bmcr;
4457 /* Config code words received, turn on autoneg. */
4458 tg3_readphy(tp, MII_BMCR, &bmcr);
4459 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4461 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4467 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4469 u32 val;
4470 int err;
4472 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4473 err = tg3_setup_fiber_phy(tp, force_reset);
4474 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4475 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4476 else
4477 err = tg3_setup_copper_phy(tp, force_reset);
4479 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4480 u32 scale;
4482 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4483 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4484 scale = 65;
4485 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4486 scale = 6;
4487 else
4488 scale = 12;
4490 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4491 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4492 tw32(GRC_MISC_CFG, val);
4495 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4496 (6 << TX_LENGTHS_IPG_SHIFT);
4497 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4498 val |= tr32(MAC_TX_LENGTHS) &
4499 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4500 TX_LENGTHS_CNT_DWN_VAL_MSK);
4502 if (tp->link_config.active_speed == SPEED_1000 &&
4503 tp->link_config.active_duplex == DUPLEX_HALF)
4504 tw32(MAC_TX_LENGTHS, val |
4505 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4506 else
4507 tw32(MAC_TX_LENGTHS, val |
4508 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4510 if (!tg3_flag(tp, 5705_PLUS)) {
4511 if (netif_carrier_ok(tp->dev)) {
4512 tw32(HOSTCC_STAT_COAL_TICKS,
4513 tp->coal.stats_block_coalesce_usecs);
4514 } else {
4515 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4519 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4520 val = tr32(PCIE_PWR_MGMT_THRESH);
4521 if (!netif_carrier_ok(tp->dev))
4522 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4523 tp->pwrmgmt_thresh;
4524 else
4525 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4526 tw32(PCIE_PWR_MGMT_THRESH, val);
4529 return err;
4532 static inline int tg3_irq_sync(struct tg3 *tp)
4534 return tp->irq_sync;
4537 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4539 int i;
4541 dst = (u32 *)((u8 *)dst + off);
4542 for (i = 0; i < len; i += sizeof(u32))
4543 *dst++ = tr32(off + i);
4546 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4548 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4549 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4550 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4551 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4552 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4553 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4554 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4555 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4556 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4557 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4558 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4559 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4560 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4561 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4562 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4563 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4564 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4565 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4566 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4568 if (tg3_flag(tp, SUPPORT_MSIX))
4569 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4571 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4572 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4573 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4574 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4575 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4576 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4577 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4578 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4580 if (!tg3_flag(tp, 5705_PLUS)) {
4581 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4582 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4583 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4586 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4587 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4588 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4589 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4590 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4592 if (tg3_flag(tp, NVRAM))
4593 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4596 static void tg3_dump_state(struct tg3 *tp)
4598 int i;
4599 u32 *regs;
4601 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4602 if (!regs) {
4603 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4604 return;
4607 if (tg3_flag(tp, PCI_EXPRESS)) {
4608 /* Read up to but not including private PCI registers */
4609 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4610 regs[i / sizeof(u32)] = tr32(i);
4611 } else
4612 tg3_dump_legacy_regs(tp, regs);
4614 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4615 if (!regs[i + 0] && !regs[i + 1] &&
4616 !regs[i + 2] && !regs[i + 3])
4617 continue;
4619 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4620 i * 4,
4621 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4624 kfree(regs);
4626 for (i = 0; i < tp->irq_cnt; i++) {
4627 struct tg3_napi *tnapi = &tp->napi[i];
4629 /* SW status block */
4630 netdev_err(tp->dev,
4631 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4633 tnapi->hw_status->status,
4634 tnapi->hw_status->status_tag,
4635 tnapi->hw_status->rx_jumbo_consumer,
4636 tnapi->hw_status->rx_consumer,
4637 tnapi->hw_status->rx_mini_consumer,
4638 tnapi->hw_status->idx[0].rx_producer,
4639 tnapi->hw_status->idx[0].tx_consumer);
4641 netdev_err(tp->dev,
4642 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4644 tnapi->last_tag, tnapi->last_irq_tag,
4645 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4646 tnapi->rx_rcb_ptr,
4647 tnapi->prodring.rx_std_prod_idx,
4648 tnapi->prodring.rx_std_cons_idx,
4649 tnapi->prodring.rx_jmb_prod_idx,
4650 tnapi->prodring.rx_jmb_cons_idx);
4654 /* This is called whenever we suspect that the system chipset is re-
4655 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4656 * is bogus tx completions. We try to recover by setting the
4657 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4658 * in the workqueue.
4660 static void tg3_tx_recover(struct tg3 *tp)
4662 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4663 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4665 netdev_warn(tp->dev,
4666 "The system may be re-ordering memory-mapped I/O "
4667 "cycles to the network device, attempting to recover. "
4668 "Please report the problem to the driver maintainer "
4669 "and include system chipset information.\n");
4671 spin_lock(&tp->lock);
4672 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4673 spin_unlock(&tp->lock);
4676 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4678 /* Tell compiler to fetch tx indices from memory. */
4679 barrier();
4680 return tnapi->tx_pending -
4681 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4684 /* Tigon3 never reports partial packet sends. So we do not
4685 * need special logic to handle SKBs that have not had all
4686 * of their frags sent yet, like SunGEM does.
4688 static void tg3_tx(struct tg3_napi *tnapi)
4690 struct tg3 *tp = tnapi->tp;
4691 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4692 u32 sw_idx = tnapi->tx_cons;
4693 struct netdev_queue *txq;
4694 int index = tnapi - tp->napi;
4696 if (tg3_flag(tp, ENABLE_TSS))
4697 index--;
4699 txq = netdev_get_tx_queue(tp->dev, index);
4701 while (sw_idx != hw_idx) {
4702 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4703 struct sk_buff *skb = ri->skb;
4704 int i, tx_bug = 0;
4706 if (unlikely(skb == NULL)) {
4707 tg3_tx_recover(tp);
4708 return;
4711 pci_unmap_single(tp->pdev,
4712 dma_unmap_addr(ri, mapping),
4713 skb_headlen(skb),
4714 PCI_DMA_TODEVICE);
4716 ri->skb = NULL;
4718 sw_idx = NEXT_TX(sw_idx);
4720 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4721 ri = &tnapi->tx_buffers[sw_idx];
4722 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4723 tx_bug = 1;
4725 pci_unmap_page(tp->pdev,
4726 dma_unmap_addr(ri, mapping),
4727 skb_shinfo(skb)->frags[i].size,
4728 PCI_DMA_TODEVICE);
4729 sw_idx = NEXT_TX(sw_idx);
4732 dev_kfree_skb(skb);
4734 if (unlikely(tx_bug)) {
4735 tg3_tx_recover(tp);
4736 return;
4740 tnapi->tx_cons = sw_idx;
4742 /* Need to make the tx_cons update visible to tg3_start_xmit()
4743 * before checking for netif_queue_stopped(). Without the
4744 * memory barrier, there is a small possibility that tg3_start_xmit()
4745 * will miss it and cause the queue to be stopped forever.
4747 smp_mb();
4749 if (unlikely(netif_tx_queue_stopped(txq) &&
4750 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4751 __netif_tx_lock(txq, smp_processor_id());
4752 if (netif_tx_queue_stopped(txq) &&
4753 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4754 netif_tx_wake_queue(txq);
4755 __netif_tx_unlock(txq);
4759 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4761 if (!ri->skb)
4762 return;
4764 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4765 map_sz, PCI_DMA_FROMDEVICE);
4766 dev_kfree_skb_any(ri->skb);
4767 ri->skb = NULL;
4770 /* Returns size of skb allocated or < 0 on error.
4772 * We only need to fill in the address because the other members
4773 * of the RX descriptor are invariant, see tg3_init_rings.
4775 * Note the purposeful assymetry of cpu vs. chip accesses. For
4776 * posting buffers we only dirty the first cache line of the RX
4777 * descriptor (containing the address). Whereas for the RX status
4778 * buffers the cpu only reads the last cacheline of the RX descriptor
4779 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4781 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4782 u32 opaque_key, u32 dest_idx_unmasked)
4784 struct tg3_rx_buffer_desc *desc;
4785 struct ring_info *map;
4786 struct sk_buff *skb;
4787 dma_addr_t mapping;
4788 int skb_size, dest_idx;
4790 switch (opaque_key) {
4791 case RXD_OPAQUE_RING_STD:
4792 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4793 desc = &tpr->rx_std[dest_idx];
4794 map = &tpr->rx_std_buffers[dest_idx];
4795 skb_size = tp->rx_pkt_map_sz;
4796 break;
4798 case RXD_OPAQUE_RING_JUMBO:
4799 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4800 desc = &tpr->rx_jmb[dest_idx].std;
4801 map = &tpr->rx_jmb_buffers[dest_idx];
4802 skb_size = TG3_RX_JMB_MAP_SZ;
4803 break;
4805 default:
4806 return -EINVAL;
4809 /* Do not overwrite any of the map or rp information
4810 * until we are sure we can commit to a new buffer.
4812 * Callers depend upon this behavior and assume that
4813 * we leave everything unchanged if we fail.
4815 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4816 if (skb == NULL)
4817 return -ENOMEM;
4819 skb_reserve(skb, tp->rx_offset);
4821 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4822 PCI_DMA_FROMDEVICE);
4823 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4824 dev_kfree_skb(skb);
4825 return -EIO;
4828 map->skb = skb;
4829 dma_unmap_addr_set(map, mapping, mapping);
4831 desc->addr_hi = ((u64)mapping >> 32);
4832 desc->addr_lo = ((u64)mapping & 0xffffffff);
4834 return skb_size;
4837 /* We only need to move over in the address because the other
4838 * members of the RX descriptor are invariant. See notes above
4839 * tg3_alloc_rx_skb for full details.
4841 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4842 struct tg3_rx_prodring_set *dpr,
4843 u32 opaque_key, int src_idx,
4844 u32 dest_idx_unmasked)
4846 struct tg3 *tp = tnapi->tp;
4847 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4848 struct ring_info *src_map, *dest_map;
4849 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4850 int dest_idx;
4852 switch (opaque_key) {
4853 case RXD_OPAQUE_RING_STD:
4854 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4855 dest_desc = &dpr->rx_std[dest_idx];
4856 dest_map = &dpr->rx_std_buffers[dest_idx];
4857 src_desc = &spr->rx_std[src_idx];
4858 src_map = &spr->rx_std_buffers[src_idx];
4859 break;
4861 case RXD_OPAQUE_RING_JUMBO:
4862 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4863 dest_desc = &dpr->rx_jmb[dest_idx].std;
4864 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4865 src_desc = &spr->rx_jmb[src_idx].std;
4866 src_map = &spr->rx_jmb_buffers[src_idx];
4867 break;
4869 default:
4870 return;
4873 dest_map->skb = src_map->skb;
4874 dma_unmap_addr_set(dest_map, mapping,
4875 dma_unmap_addr(src_map, mapping));
4876 dest_desc->addr_hi = src_desc->addr_hi;
4877 dest_desc->addr_lo = src_desc->addr_lo;
4879 /* Ensure that the update to the skb happens after the physical
4880 * addresses have been transferred to the new BD location.
4882 smp_wmb();
4884 src_map->skb = NULL;
4887 /* The RX ring scheme is composed of multiple rings which post fresh
4888 * buffers to the chip, and one special ring the chip uses to report
4889 * status back to the host.
4891 * The special ring reports the status of received packets to the
4892 * host. The chip does not write into the original descriptor the
4893 * RX buffer was obtained from. The chip simply takes the original
4894 * descriptor as provided by the host, updates the status and length
4895 * field, then writes this into the next status ring entry.
4897 * Each ring the host uses to post buffers to the chip is described
4898 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4899 * it is first placed into the on-chip ram. When the packet's length
4900 * is known, it walks down the TG3_BDINFO entries to select the ring.
4901 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4902 * which is within the range of the new packet's length is chosen.
4904 * The "separate ring for rx status" scheme may sound queer, but it makes
4905 * sense from a cache coherency perspective. If only the host writes
4906 * to the buffer post rings, and only the chip writes to the rx status
4907 * rings, then cache lines never move beyond shared-modified state.
4908 * If both the host and chip were to write into the same ring, cache line
4909 * eviction could occur since both entities want it in an exclusive state.
4911 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4913 struct tg3 *tp = tnapi->tp;
4914 u32 work_mask, rx_std_posted = 0;
4915 u32 std_prod_idx, jmb_prod_idx;
4916 u32 sw_idx = tnapi->rx_rcb_ptr;
4917 u16 hw_idx;
4918 int received;
4919 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4921 hw_idx = *(tnapi->rx_rcb_prod_idx);
4923 * We need to order the read of hw_idx and the read of
4924 * the opaque cookie.
4926 rmb();
4927 work_mask = 0;
4928 received = 0;
4929 std_prod_idx = tpr->rx_std_prod_idx;
4930 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4931 while (sw_idx != hw_idx && budget > 0) {
4932 struct ring_info *ri;
4933 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4934 unsigned int len;
4935 struct sk_buff *skb;
4936 dma_addr_t dma_addr;
4937 u32 opaque_key, desc_idx, *post_ptr;
4939 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4940 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4941 if (opaque_key == RXD_OPAQUE_RING_STD) {
4942 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4943 dma_addr = dma_unmap_addr(ri, mapping);
4944 skb = ri->skb;
4945 post_ptr = &std_prod_idx;
4946 rx_std_posted++;
4947 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4948 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4949 dma_addr = dma_unmap_addr(ri, mapping);
4950 skb = ri->skb;
4951 post_ptr = &jmb_prod_idx;
4952 } else
4953 goto next_pkt_nopost;
4955 work_mask |= opaque_key;
4957 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4958 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4959 drop_it:
4960 tg3_recycle_rx(tnapi, tpr, opaque_key,
4961 desc_idx, *post_ptr);
4962 drop_it_no_recycle:
4963 /* Other statistics kept track of by card. */
4964 tp->rx_dropped++;
4965 goto next_pkt;
4968 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4969 ETH_FCS_LEN;
4971 if (len > TG3_RX_COPY_THRESH(tp)) {
4972 int skb_size;
4974 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4975 *post_ptr);
4976 if (skb_size < 0)
4977 goto drop_it;
4979 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4980 PCI_DMA_FROMDEVICE);
4982 /* Ensure that the update to the skb happens
4983 * after the usage of the old DMA mapping.
4985 smp_wmb();
4987 ri->skb = NULL;
4989 skb_put(skb, len);
4990 } else {
4991 struct sk_buff *copy_skb;
4993 tg3_recycle_rx(tnapi, tpr, opaque_key,
4994 desc_idx, *post_ptr);
4996 copy_skb = netdev_alloc_skb(tp->dev, len +
4997 TG3_RAW_IP_ALIGN);
4998 if (copy_skb == NULL)
4999 goto drop_it_no_recycle;
5001 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5002 skb_put(copy_skb, len);
5003 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5004 skb_copy_from_linear_data(skb, copy_skb->data, len);
5005 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5007 /* We'll reuse the original ring buffer. */
5008 skb = copy_skb;
5011 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5012 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5013 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5014 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5015 skb->ip_summed = CHECKSUM_UNNECESSARY;
5016 else
5017 skb_checksum_none_assert(skb);
5019 skb->protocol = eth_type_trans(skb, tp->dev);
5021 if (len > (tp->dev->mtu + ETH_HLEN) &&
5022 skb->protocol != htons(ETH_P_8021Q)) {
5023 dev_kfree_skb(skb);
5024 goto drop_it_no_recycle;
5027 if (desc->type_flags & RXD_FLAG_VLAN &&
5028 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5029 __vlan_hwaccel_put_tag(skb,
5030 desc->err_vlan & RXD_VLAN_MASK);
5032 napi_gro_receive(&tnapi->napi, skb);
5034 received++;
5035 budget--;
5037 next_pkt:
5038 (*post_ptr)++;
5040 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5041 tpr->rx_std_prod_idx = std_prod_idx &
5042 tp->rx_std_ring_mask;
5043 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5044 tpr->rx_std_prod_idx);
5045 work_mask &= ~RXD_OPAQUE_RING_STD;
5046 rx_std_posted = 0;
5048 next_pkt_nopost:
5049 sw_idx++;
5050 sw_idx &= tp->rx_ret_ring_mask;
5052 /* Refresh hw_idx to see if there is new work */
5053 if (sw_idx == hw_idx) {
5054 hw_idx = *(tnapi->rx_rcb_prod_idx);
5055 rmb();
5059 /* ACK the status ring. */
5060 tnapi->rx_rcb_ptr = sw_idx;
5061 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5063 /* Refill RX ring(s). */
5064 if (!tg3_flag(tp, ENABLE_RSS)) {
5065 if (work_mask & RXD_OPAQUE_RING_STD) {
5066 tpr->rx_std_prod_idx = std_prod_idx &
5067 tp->rx_std_ring_mask;
5068 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5069 tpr->rx_std_prod_idx);
5071 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5072 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5073 tp->rx_jmb_ring_mask;
5074 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5075 tpr->rx_jmb_prod_idx);
5077 mmiowb();
5078 } else if (work_mask) {
5079 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5080 * updated before the producer indices can be updated.
5082 smp_wmb();
5084 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5085 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5087 if (tnapi != &tp->napi[1])
5088 napi_schedule(&tp->napi[1].napi);
5091 return received;
5094 static void tg3_poll_link(struct tg3 *tp)
5096 /* handle link change and other phy events */
5097 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5098 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5100 if (sblk->status & SD_STATUS_LINK_CHG) {
5101 sblk->status = SD_STATUS_UPDATED |
5102 (sblk->status & ~SD_STATUS_LINK_CHG);
5103 spin_lock(&tp->lock);
5104 if (tg3_flag(tp, USE_PHYLIB)) {
5105 tw32_f(MAC_STATUS,
5106 (MAC_STATUS_SYNC_CHANGED |
5107 MAC_STATUS_CFG_CHANGED |
5108 MAC_STATUS_MI_COMPLETION |
5109 MAC_STATUS_LNKSTATE_CHANGED));
5110 udelay(40);
5111 } else
5112 tg3_setup_phy(tp, 0);
5113 spin_unlock(&tp->lock);
5118 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5119 struct tg3_rx_prodring_set *dpr,
5120 struct tg3_rx_prodring_set *spr)
5122 u32 si, di, cpycnt, src_prod_idx;
5123 int i, err = 0;
5125 while (1) {
5126 src_prod_idx = spr->rx_std_prod_idx;
5128 /* Make sure updates to the rx_std_buffers[] entries and the
5129 * standard producer index are seen in the correct order.
5131 smp_rmb();
5133 if (spr->rx_std_cons_idx == src_prod_idx)
5134 break;
5136 if (spr->rx_std_cons_idx < src_prod_idx)
5137 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5138 else
5139 cpycnt = tp->rx_std_ring_mask + 1 -
5140 spr->rx_std_cons_idx;
5142 cpycnt = min(cpycnt,
5143 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5145 si = spr->rx_std_cons_idx;
5146 di = dpr->rx_std_prod_idx;
5148 for (i = di; i < di + cpycnt; i++) {
5149 if (dpr->rx_std_buffers[i].skb) {
5150 cpycnt = i - di;
5151 err = -ENOSPC;
5152 break;
5156 if (!cpycnt)
5157 break;
5159 /* Ensure that updates to the rx_std_buffers ring and the
5160 * shadowed hardware producer ring from tg3_recycle_skb() are
5161 * ordered correctly WRT the skb check above.
5163 smp_rmb();
5165 memcpy(&dpr->rx_std_buffers[di],
5166 &spr->rx_std_buffers[si],
5167 cpycnt * sizeof(struct ring_info));
5169 for (i = 0; i < cpycnt; i++, di++, si++) {
5170 struct tg3_rx_buffer_desc *sbd, *dbd;
5171 sbd = &spr->rx_std[si];
5172 dbd = &dpr->rx_std[di];
5173 dbd->addr_hi = sbd->addr_hi;
5174 dbd->addr_lo = sbd->addr_lo;
5177 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5178 tp->rx_std_ring_mask;
5179 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5180 tp->rx_std_ring_mask;
5183 while (1) {
5184 src_prod_idx = spr->rx_jmb_prod_idx;
5186 /* Make sure updates to the rx_jmb_buffers[] entries and
5187 * the jumbo producer index are seen in the correct order.
5189 smp_rmb();
5191 if (spr->rx_jmb_cons_idx == src_prod_idx)
5192 break;
5194 if (spr->rx_jmb_cons_idx < src_prod_idx)
5195 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5196 else
5197 cpycnt = tp->rx_jmb_ring_mask + 1 -
5198 spr->rx_jmb_cons_idx;
5200 cpycnt = min(cpycnt,
5201 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5203 si = spr->rx_jmb_cons_idx;
5204 di = dpr->rx_jmb_prod_idx;
5206 for (i = di; i < di + cpycnt; i++) {
5207 if (dpr->rx_jmb_buffers[i].skb) {
5208 cpycnt = i - di;
5209 err = -ENOSPC;
5210 break;
5214 if (!cpycnt)
5215 break;
5217 /* Ensure that updates to the rx_jmb_buffers ring and the
5218 * shadowed hardware producer ring from tg3_recycle_skb() are
5219 * ordered correctly WRT the skb check above.
5221 smp_rmb();
5223 memcpy(&dpr->rx_jmb_buffers[di],
5224 &spr->rx_jmb_buffers[si],
5225 cpycnt * sizeof(struct ring_info));
5227 for (i = 0; i < cpycnt; i++, di++, si++) {
5228 struct tg3_rx_buffer_desc *sbd, *dbd;
5229 sbd = &spr->rx_jmb[si].std;
5230 dbd = &dpr->rx_jmb[di].std;
5231 dbd->addr_hi = sbd->addr_hi;
5232 dbd->addr_lo = sbd->addr_lo;
5235 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5236 tp->rx_jmb_ring_mask;
5237 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5238 tp->rx_jmb_ring_mask;
5241 return err;
5244 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5246 struct tg3 *tp = tnapi->tp;
5248 /* run TX completion thread */
5249 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5250 tg3_tx(tnapi);
5251 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5252 return work_done;
5255 /* run RX thread, within the bounds set by NAPI.
5256 * All RX "locking" is done by ensuring outside
5257 * code synchronizes with tg3->napi.poll()
5259 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5260 work_done += tg3_rx(tnapi, budget - work_done);
5262 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5263 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5264 int i, err = 0;
5265 u32 std_prod_idx = dpr->rx_std_prod_idx;
5266 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5268 for (i = 1; i < tp->irq_cnt; i++)
5269 err |= tg3_rx_prodring_xfer(tp, dpr,
5270 &tp->napi[i].prodring);
5272 wmb();
5274 if (std_prod_idx != dpr->rx_std_prod_idx)
5275 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5276 dpr->rx_std_prod_idx);
5278 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5279 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5280 dpr->rx_jmb_prod_idx);
5282 mmiowb();
5284 if (err)
5285 tw32_f(HOSTCC_MODE, tp->coal_now);
5288 return work_done;
5291 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5293 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5294 struct tg3 *tp = tnapi->tp;
5295 int work_done = 0;
5296 struct tg3_hw_status *sblk = tnapi->hw_status;
5298 while (1) {
5299 work_done = tg3_poll_work(tnapi, work_done, budget);
5301 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5302 goto tx_recovery;
5304 if (unlikely(work_done >= budget))
5305 break;
5307 /* tp->last_tag is used in tg3_int_reenable() below
5308 * to tell the hw how much work has been processed,
5309 * so we must read it before checking for more work.
5311 tnapi->last_tag = sblk->status_tag;
5312 tnapi->last_irq_tag = tnapi->last_tag;
5313 rmb();
5315 /* check for RX/TX work to do */
5316 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5317 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5318 napi_complete(napi);
5319 /* Reenable interrupts. */
5320 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5321 mmiowb();
5322 break;
5326 return work_done;
5328 tx_recovery:
5329 /* work_done is guaranteed to be less than budget. */
5330 napi_complete(napi);
5331 schedule_work(&tp->reset_task);
5332 return work_done;
5335 static void tg3_process_error(struct tg3 *tp)
5337 u32 val;
5338 bool real_error = false;
5340 if (tg3_flag(tp, ERROR_PROCESSED))
5341 return;
5343 /* Check Flow Attention register */
5344 val = tr32(HOSTCC_FLOW_ATTN);
5345 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5346 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5347 real_error = true;
5350 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5351 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5352 real_error = true;
5355 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5356 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5357 real_error = true;
5360 if (!real_error)
5361 return;
5363 tg3_dump_state(tp);
5365 tg3_flag_set(tp, ERROR_PROCESSED);
5366 schedule_work(&tp->reset_task);
5369 static int tg3_poll(struct napi_struct *napi, int budget)
5371 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5372 struct tg3 *tp = tnapi->tp;
5373 int work_done = 0;
5374 struct tg3_hw_status *sblk = tnapi->hw_status;
5376 while (1) {
5377 if (sblk->status & SD_STATUS_ERROR)
5378 tg3_process_error(tp);
5380 tg3_poll_link(tp);
5382 work_done = tg3_poll_work(tnapi, work_done, budget);
5384 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5385 goto tx_recovery;
5387 if (unlikely(work_done >= budget))
5388 break;
5390 if (tg3_flag(tp, TAGGED_STATUS)) {
5391 /* tp->last_tag is used in tg3_int_reenable() below
5392 * to tell the hw how much work has been processed,
5393 * so we must read it before checking for more work.
5395 tnapi->last_tag = sblk->status_tag;
5396 tnapi->last_irq_tag = tnapi->last_tag;
5397 rmb();
5398 } else
5399 sblk->status &= ~SD_STATUS_UPDATED;
5401 if (likely(!tg3_has_work(tnapi))) {
5402 napi_complete(napi);
5403 tg3_int_reenable(tnapi);
5404 break;
5408 return work_done;
5410 tx_recovery:
5411 /* work_done is guaranteed to be less than budget. */
5412 napi_complete(napi);
5413 schedule_work(&tp->reset_task);
5414 return work_done;
5417 static void tg3_napi_disable(struct tg3 *tp)
5419 int i;
5421 for (i = tp->irq_cnt - 1; i >= 0; i--)
5422 napi_disable(&tp->napi[i].napi);
5425 static void tg3_napi_enable(struct tg3 *tp)
5427 int i;
5429 for (i = 0; i < tp->irq_cnt; i++)
5430 napi_enable(&tp->napi[i].napi);
5433 static void tg3_napi_init(struct tg3 *tp)
5435 int i;
5437 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5438 for (i = 1; i < tp->irq_cnt; i++)
5439 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5442 static void tg3_napi_fini(struct tg3 *tp)
5444 int i;
5446 for (i = 0; i < tp->irq_cnt; i++)
5447 netif_napi_del(&tp->napi[i].napi);
5450 static inline void tg3_netif_stop(struct tg3 *tp)
5452 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5453 tg3_napi_disable(tp);
5454 netif_tx_disable(tp->dev);
5457 static inline void tg3_netif_start(struct tg3 *tp)
5459 /* NOTE: unconditional netif_tx_wake_all_queues is only
5460 * appropriate so long as all callers are assured to
5461 * have free tx slots (such as after tg3_init_hw)
5463 netif_tx_wake_all_queues(tp->dev);
5465 tg3_napi_enable(tp);
5466 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5467 tg3_enable_ints(tp);
5470 static void tg3_irq_quiesce(struct tg3 *tp)
5472 int i;
5474 BUG_ON(tp->irq_sync);
5476 tp->irq_sync = 1;
5477 smp_mb();
5479 for (i = 0; i < tp->irq_cnt; i++)
5480 synchronize_irq(tp->napi[i].irq_vec);
5483 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5484 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5485 * with as well. Most of the time, this is not necessary except when
5486 * shutting down the device.
5488 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5490 spin_lock_bh(&tp->lock);
5491 if (irq_sync)
5492 tg3_irq_quiesce(tp);
5495 static inline void tg3_full_unlock(struct tg3 *tp)
5497 spin_unlock_bh(&tp->lock);
5500 /* One-shot MSI handler - Chip automatically disables interrupt
5501 * after sending MSI so driver doesn't have to do it.
5503 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5505 struct tg3_napi *tnapi = dev_id;
5506 struct tg3 *tp = tnapi->tp;
5508 prefetch(tnapi->hw_status);
5509 if (tnapi->rx_rcb)
5510 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5512 if (likely(!tg3_irq_sync(tp)))
5513 napi_schedule(&tnapi->napi);
5515 return IRQ_HANDLED;
5518 /* MSI ISR - No need to check for interrupt sharing and no need to
5519 * flush status block and interrupt mailbox. PCI ordering rules
5520 * guarantee that MSI will arrive after the status block.
5522 static irqreturn_t tg3_msi(int irq, void *dev_id)
5524 struct tg3_napi *tnapi = dev_id;
5525 struct tg3 *tp = tnapi->tp;
5527 prefetch(tnapi->hw_status);
5528 if (tnapi->rx_rcb)
5529 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5531 * Writing any value to intr-mbox-0 clears PCI INTA# and
5532 * chip-internal interrupt pending events.
5533 * Writing non-zero to intr-mbox-0 additional tells the
5534 * NIC to stop sending us irqs, engaging "in-intr-handler"
5535 * event coalescing.
5537 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5538 if (likely(!tg3_irq_sync(tp)))
5539 napi_schedule(&tnapi->napi);
5541 return IRQ_RETVAL(1);
5544 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5546 struct tg3_napi *tnapi = dev_id;
5547 struct tg3 *tp = tnapi->tp;
5548 struct tg3_hw_status *sblk = tnapi->hw_status;
5549 unsigned int handled = 1;
5551 /* In INTx mode, it is possible for the interrupt to arrive at
5552 * the CPU before the status block posted prior to the interrupt.
5553 * Reading the PCI State register will confirm whether the
5554 * interrupt is ours and will flush the status block.
5556 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5557 if (tg3_flag(tp, CHIP_RESETTING) ||
5558 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5559 handled = 0;
5560 goto out;
5565 * Writing any value to intr-mbox-0 clears PCI INTA# and
5566 * chip-internal interrupt pending events.
5567 * Writing non-zero to intr-mbox-0 additional tells the
5568 * NIC to stop sending us irqs, engaging "in-intr-handler"
5569 * event coalescing.
5571 * Flush the mailbox to de-assert the IRQ immediately to prevent
5572 * spurious interrupts. The flush impacts performance but
5573 * excessive spurious interrupts can be worse in some cases.
5575 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5576 if (tg3_irq_sync(tp))
5577 goto out;
5578 sblk->status &= ~SD_STATUS_UPDATED;
5579 if (likely(tg3_has_work(tnapi))) {
5580 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5581 napi_schedule(&tnapi->napi);
5582 } else {
5583 /* No work, shared interrupt perhaps? re-enable
5584 * interrupts, and flush that PCI write
5586 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5587 0x00000000);
5589 out:
5590 return IRQ_RETVAL(handled);
5593 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5595 struct tg3_napi *tnapi = dev_id;
5596 struct tg3 *tp = tnapi->tp;
5597 struct tg3_hw_status *sblk = tnapi->hw_status;
5598 unsigned int handled = 1;
5600 /* In INTx mode, it is possible for the interrupt to arrive at
5601 * the CPU before the status block posted prior to the interrupt.
5602 * Reading the PCI State register will confirm whether the
5603 * interrupt is ours and will flush the status block.
5605 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5606 if (tg3_flag(tp, CHIP_RESETTING) ||
5607 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5608 handled = 0;
5609 goto out;
5614 * writing any value to intr-mbox-0 clears PCI INTA# and
5615 * chip-internal interrupt pending events.
5616 * writing non-zero to intr-mbox-0 additional tells the
5617 * NIC to stop sending us irqs, engaging "in-intr-handler"
5618 * event coalescing.
5620 * Flush the mailbox to de-assert the IRQ immediately to prevent
5621 * spurious interrupts. The flush impacts performance but
5622 * excessive spurious interrupts can be worse in some cases.
5624 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5627 * In a shared interrupt configuration, sometimes other devices'
5628 * interrupts will scream. We record the current status tag here
5629 * so that the above check can report that the screaming interrupts
5630 * are unhandled. Eventually they will be silenced.
5632 tnapi->last_irq_tag = sblk->status_tag;
5634 if (tg3_irq_sync(tp))
5635 goto out;
5637 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5639 napi_schedule(&tnapi->napi);
5641 out:
5642 return IRQ_RETVAL(handled);
5645 /* ISR for interrupt test */
5646 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5648 struct tg3_napi *tnapi = dev_id;
5649 struct tg3 *tp = tnapi->tp;
5650 struct tg3_hw_status *sblk = tnapi->hw_status;
5652 if ((sblk->status & SD_STATUS_UPDATED) ||
5653 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5654 tg3_disable_ints(tp);
5655 return IRQ_RETVAL(1);
5657 return IRQ_RETVAL(0);
5660 static int tg3_init_hw(struct tg3 *, int);
5661 static int tg3_halt(struct tg3 *, int, int);
5663 /* Restart hardware after configuration changes, self-test, etc.
5664 * Invoked with tp->lock held.
5666 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5667 __releases(tp->lock)
5668 __acquires(tp->lock)
5670 int err;
5672 err = tg3_init_hw(tp, reset_phy);
5673 if (err) {
5674 netdev_err(tp->dev,
5675 "Failed to re-initialize device, aborting\n");
5676 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5677 tg3_full_unlock(tp);
5678 del_timer_sync(&tp->timer);
5679 tp->irq_sync = 0;
5680 tg3_napi_enable(tp);
5681 dev_close(tp->dev);
5682 tg3_full_lock(tp, 0);
5684 return err;
5687 #ifdef CONFIG_NET_POLL_CONTROLLER
5688 static void tg3_poll_controller(struct net_device *dev)
5690 int i;
5691 struct tg3 *tp = netdev_priv(dev);
5693 for (i = 0; i < tp->irq_cnt; i++)
5694 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5696 #endif
5698 static void tg3_reset_task(struct work_struct *work)
5700 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5701 int err;
5702 unsigned int restart_timer;
5704 tg3_full_lock(tp, 0);
5706 if (!netif_running(tp->dev)) {
5707 tg3_full_unlock(tp);
5708 return;
5711 tg3_full_unlock(tp);
5713 tg3_phy_stop(tp);
5715 tg3_netif_stop(tp);
5717 tg3_full_lock(tp, 1);
5719 restart_timer = tg3_flag(tp, RESTART_TIMER);
5720 tg3_flag_clear(tp, RESTART_TIMER);
5722 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5723 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5724 tp->write32_rx_mbox = tg3_write_flush_reg32;
5725 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5726 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5729 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5730 err = tg3_init_hw(tp, 1);
5731 if (err)
5732 goto out;
5734 tg3_netif_start(tp);
5736 if (restart_timer)
5737 mod_timer(&tp->timer, jiffies + 1);
5739 out:
5740 tg3_full_unlock(tp);
5742 if (!err)
5743 tg3_phy_start(tp);
5746 static void tg3_tx_timeout(struct net_device *dev)
5748 struct tg3 *tp = netdev_priv(dev);
5750 if (netif_msg_tx_err(tp)) {
5751 netdev_err(dev, "transmit timed out, resetting\n");
5752 tg3_dump_state(tp);
5755 schedule_work(&tp->reset_task);
5758 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5759 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5761 u32 base = (u32) mapping & 0xffffffff;
5763 return (base > 0xffffdcc0) && (base + len + 8 < base);
5766 /* Test for DMA addresses > 40-bit */
5767 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5768 int len)
5770 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5771 if (tg3_flag(tp, 40BIT_DMA_BUG))
5772 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5773 return 0;
5774 #else
5775 return 0;
5776 #endif
5779 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5780 dma_addr_t mapping, int len, u32 flags,
5781 u32 mss_and_is_end)
5783 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5784 int is_end = (mss_and_is_end & 0x1);
5785 u32 mss = (mss_and_is_end >> 1);
5786 u32 vlan_tag = 0;
5788 if (is_end)
5789 flags |= TXD_FLAG_END;
5790 if (flags & TXD_FLAG_VLAN) {
5791 vlan_tag = flags >> 16;
5792 flags &= 0xffff;
5794 vlan_tag |= (mss << TXD_MSS_SHIFT);
5796 txd->addr_hi = ((u64) mapping >> 32);
5797 txd->addr_lo = ((u64) mapping & 0xffffffff);
5798 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5799 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5802 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5803 struct sk_buff *skb, int last)
5805 int i;
5806 u32 entry = tnapi->tx_prod;
5807 struct ring_info *txb = &tnapi->tx_buffers[entry];
5809 pci_unmap_single(tnapi->tp->pdev,
5810 dma_unmap_addr(txb, mapping),
5811 skb_headlen(skb),
5812 PCI_DMA_TODEVICE);
5813 for (i = 0; i < last; i++) {
5814 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5816 entry = NEXT_TX(entry);
5817 txb = &tnapi->tx_buffers[entry];
5819 pci_unmap_page(tnapi->tp->pdev,
5820 dma_unmap_addr(txb, mapping),
5821 frag->size, PCI_DMA_TODEVICE);
5825 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5826 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5827 struct sk_buff *skb,
5828 u32 base_flags, u32 mss)
5830 struct tg3 *tp = tnapi->tp;
5831 struct sk_buff *new_skb;
5832 dma_addr_t new_addr = 0;
5833 u32 entry = tnapi->tx_prod;
5834 int ret = 0;
5836 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5837 new_skb = skb_copy(skb, GFP_ATOMIC);
5838 else {
5839 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5841 new_skb = skb_copy_expand(skb,
5842 skb_headroom(skb) + more_headroom,
5843 skb_tailroom(skb), GFP_ATOMIC);
5846 if (!new_skb) {
5847 ret = -1;
5848 } else {
5849 /* New SKB is guaranteed to be linear. */
5850 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5851 PCI_DMA_TODEVICE);
5852 /* Make sure the mapping succeeded */
5853 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5854 ret = -1;
5855 dev_kfree_skb(new_skb);
5857 /* Make sure new skb does not cross any 4G boundaries.
5858 * Drop the packet if it does.
5860 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5861 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5862 PCI_DMA_TODEVICE);
5863 ret = -1;
5864 dev_kfree_skb(new_skb);
5865 } else {
5866 tnapi->tx_buffers[entry].skb = new_skb;
5867 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5868 mapping, new_addr);
5870 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5871 base_flags, 1 | (mss << 1));
5875 dev_kfree_skb(skb);
5877 return ret;
5880 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5882 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5883 * TSO header is greater than 80 bytes.
5885 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5887 struct sk_buff *segs, *nskb;
5888 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5890 /* Estimate the number of fragments in the worst case */
5891 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5892 netif_stop_queue(tp->dev);
5894 /* netif_tx_stop_queue() must be done before checking
5895 * checking tx index in tg3_tx_avail() below, because in
5896 * tg3_tx(), we update tx index before checking for
5897 * netif_tx_queue_stopped().
5899 smp_mb();
5900 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5901 return NETDEV_TX_BUSY;
5903 netif_wake_queue(tp->dev);
5906 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5907 if (IS_ERR(segs))
5908 goto tg3_tso_bug_end;
5910 do {
5911 nskb = segs;
5912 segs = segs->next;
5913 nskb->next = NULL;
5914 tg3_start_xmit(nskb, tp->dev);
5915 } while (segs);
5917 tg3_tso_bug_end:
5918 dev_kfree_skb(skb);
5920 return NETDEV_TX_OK;
5923 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5924 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5926 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5928 struct tg3 *tp = netdev_priv(dev);
5929 u32 len, entry, base_flags, mss;
5930 int i = -1, would_hit_hwbug;
5931 dma_addr_t mapping;
5932 struct tg3_napi *tnapi;
5933 struct netdev_queue *txq;
5934 unsigned int last;
5936 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5937 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5938 if (tg3_flag(tp, ENABLE_TSS))
5939 tnapi++;
5941 /* We are running in BH disabled context with netif_tx_lock
5942 * and TX reclaim runs via tp->napi.poll inside of a software
5943 * interrupt. Furthermore, IRQ processing runs lockless so we have
5944 * no IRQ context deadlocks to worry about either. Rejoice!
5946 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5947 if (!netif_tx_queue_stopped(txq)) {
5948 netif_tx_stop_queue(txq);
5950 /* This is a hard error, log it. */
5951 netdev_err(dev,
5952 "BUG! Tx Ring full when queue awake!\n");
5954 return NETDEV_TX_BUSY;
5957 entry = tnapi->tx_prod;
5958 base_flags = 0;
5959 if (skb->ip_summed == CHECKSUM_PARTIAL)
5960 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5962 mss = skb_shinfo(skb)->gso_size;
5963 if (mss) {
5964 struct iphdr *iph;
5965 u32 tcp_opt_len, hdr_len;
5967 if (skb_header_cloned(skb) &&
5968 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5969 dev_kfree_skb(skb);
5970 goto out_unlock;
5973 iph = ip_hdr(skb);
5974 tcp_opt_len = tcp_optlen(skb);
5976 if (skb_is_gso_v6(skb)) {
5977 hdr_len = skb_headlen(skb) - ETH_HLEN;
5978 } else {
5979 u32 ip_tcp_len;
5981 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5982 hdr_len = ip_tcp_len + tcp_opt_len;
5984 iph->check = 0;
5985 iph->tot_len = htons(mss + hdr_len);
5988 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5989 tg3_flag(tp, TSO_BUG))
5990 return tg3_tso_bug(tp, skb);
5992 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5993 TXD_FLAG_CPU_POST_DMA);
5995 if (tg3_flag(tp, HW_TSO_1) ||
5996 tg3_flag(tp, HW_TSO_2) ||
5997 tg3_flag(tp, HW_TSO_3)) {
5998 tcp_hdr(skb)->check = 0;
5999 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6000 } else
6001 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6002 iph->daddr, 0,
6003 IPPROTO_TCP,
6006 if (tg3_flag(tp, HW_TSO_3)) {
6007 mss |= (hdr_len & 0xc) << 12;
6008 if (hdr_len & 0x10)
6009 base_flags |= 0x00000010;
6010 base_flags |= (hdr_len & 0x3e0) << 5;
6011 } else if (tg3_flag(tp, HW_TSO_2))
6012 mss |= hdr_len << 9;
6013 else if (tg3_flag(tp, HW_TSO_1) ||
6014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6015 if (tcp_opt_len || iph->ihl > 5) {
6016 int tsflags;
6018 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6019 mss |= (tsflags << 11);
6021 } else {
6022 if (tcp_opt_len || iph->ihl > 5) {
6023 int tsflags;
6025 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6026 base_flags |= tsflags << 12;
6031 if (vlan_tx_tag_present(skb))
6032 base_flags |= (TXD_FLAG_VLAN |
6033 (vlan_tx_tag_get(skb) << 16));
6035 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6036 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6037 base_flags |= TXD_FLAG_JMB_PKT;
6039 len = skb_headlen(skb);
6041 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6042 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6043 dev_kfree_skb(skb);
6044 goto out_unlock;
6047 tnapi->tx_buffers[entry].skb = skb;
6048 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6050 would_hit_hwbug = 0;
6052 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6053 would_hit_hwbug = 1;
6055 if (tg3_4g_overflow_test(mapping, len))
6056 would_hit_hwbug = 1;
6058 if (tg3_40bit_overflow_test(tp, mapping, len))
6059 would_hit_hwbug = 1;
6061 if (tg3_flag(tp, 5701_DMA_BUG))
6062 would_hit_hwbug = 1;
6064 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6065 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6067 entry = NEXT_TX(entry);
6069 /* Now loop through additional data fragments, and queue them. */
6070 if (skb_shinfo(skb)->nr_frags > 0) {
6071 last = skb_shinfo(skb)->nr_frags - 1;
6072 for (i = 0; i <= last; i++) {
6073 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6075 len = frag->size;
6076 mapping = pci_map_page(tp->pdev,
6077 frag->page,
6078 frag->page_offset,
6079 len, PCI_DMA_TODEVICE);
6081 tnapi->tx_buffers[entry].skb = NULL;
6082 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6083 mapping);
6084 if (pci_dma_mapping_error(tp->pdev, mapping))
6085 goto dma_error;
6087 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6088 len <= 8)
6089 would_hit_hwbug = 1;
6091 if (tg3_4g_overflow_test(mapping, len))
6092 would_hit_hwbug = 1;
6094 if (tg3_40bit_overflow_test(tp, mapping, len))
6095 would_hit_hwbug = 1;
6097 if (tg3_flag(tp, HW_TSO_1) ||
6098 tg3_flag(tp, HW_TSO_2) ||
6099 tg3_flag(tp, HW_TSO_3))
6100 tg3_set_txd(tnapi, entry, mapping, len,
6101 base_flags, (i == last)|(mss << 1));
6102 else
6103 tg3_set_txd(tnapi, entry, mapping, len,
6104 base_flags, (i == last));
6106 entry = NEXT_TX(entry);
6110 if (would_hit_hwbug) {
6111 tg3_skb_error_unmap(tnapi, skb, i);
6113 /* If the workaround fails due to memory/mapping
6114 * failure, silently drop this packet.
6116 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6117 goto out_unlock;
6119 entry = NEXT_TX(tnapi->tx_prod);
6122 skb_tx_timestamp(skb);
6124 /* Packets are ready, update Tx producer idx local and on card. */
6125 tw32_tx_mbox(tnapi->prodmbox, entry);
6127 tnapi->tx_prod = entry;
6128 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6129 netif_tx_stop_queue(txq);
6131 /* netif_tx_stop_queue() must be done before checking
6132 * checking tx index in tg3_tx_avail() below, because in
6133 * tg3_tx(), we update tx index before checking for
6134 * netif_tx_queue_stopped().
6136 smp_mb();
6137 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6138 netif_tx_wake_queue(txq);
6141 out_unlock:
6142 mmiowb();
6144 return NETDEV_TX_OK;
6146 dma_error:
6147 tg3_skb_error_unmap(tnapi, skb, i);
6148 dev_kfree_skb(skb);
6149 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6150 return NETDEV_TX_OK;
6153 static void tg3_set_loopback(struct net_device *dev, u32 features)
6155 struct tg3 *tp = netdev_priv(dev);
6157 if (features & NETIF_F_LOOPBACK) {
6158 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6159 return;
6162 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6163 * loopback mode if Half-Duplex mode was negotiated earlier.
6165 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6167 /* Enable internal MAC loopback mode */
6168 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6169 spin_lock_bh(&tp->lock);
6170 tw32(MAC_MODE, tp->mac_mode);
6171 netif_carrier_on(tp->dev);
6172 spin_unlock_bh(&tp->lock);
6173 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6174 } else {
6175 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6176 return;
6178 /* Disable internal MAC loopback mode */
6179 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6180 spin_lock_bh(&tp->lock);
6181 tw32(MAC_MODE, tp->mac_mode);
6182 /* Force link status check */
6183 tg3_setup_phy(tp, 1);
6184 spin_unlock_bh(&tp->lock);
6185 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6189 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6191 struct tg3 *tp = netdev_priv(dev);
6193 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6194 features &= ~NETIF_F_ALL_TSO;
6196 return features;
6199 static int tg3_set_features(struct net_device *dev, u32 features)
6201 u32 changed = dev->features ^ features;
6203 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6204 tg3_set_loopback(dev, features);
6206 return 0;
6209 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6210 int new_mtu)
6212 dev->mtu = new_mtu;
6214 if (new_mtu > ETH_DATA_LEN) {
6215 if (tg3_flag(tp, 5780_CLASS)) {
6216 netdev_update_features(dev);
6217 tg3_flag_clear(tp, TSO_CAPABLE);
6218 } else {
6219 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6221 } else {
6222 if (tg3_flag(tp, 5780_CLASS)) {
6223 tg3_flag_set(tp, TSO_CAPABLE);
6224 netdev_update_features(dev);
6226 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6230 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6232 struct tg3 *tp = netdev_priv(dev);
6233 int err;
6235 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6236 return -EINVAL;
6238 if (!netif_running(dev)) {
6239 /* We'll just catch it later when the
6240 * device is up'd.
6242 tg3_set_mtu(dev, tp, new_mtu);
6243 return 0;
6246 tg3_phy_stop(tp);
6248 tg3_netif_stop(tp);
6250 tg3_full_lock(tp, 1);
6252 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6254 tg3_set_mtu(dev, tp, new_mtu);
6256 err = tg3_restart_hw(tp, 0);
6258 if (!err)
6259 tg3_netif_start(tp);
6261 tg3_full_unlock(tp);
6263 if (!err)
6264 tg3_phy_start(tp);
6266 return err;
6269 static void tg3_rx_prodring_free(struct tg3 *tp,
6270 struct tg3_rx_prodring_set *tpr)
6272 int i;
6274 if (tpr != &tp->napi[0].prodring) {
6275 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6276 i = (i + 1) & tp->rx_std_ring_mask)
6277 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6278 tp->rx_pkt_map_sz);
6280 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6281 for (i = tpr->rx_jmb_cons_idx;
6282 i != tpr->rx_jmb_prod_idx;
6283 i = (i + 1) & tp->rx_jmb_ring_mask) {
6284 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6285 TG3_RX_JMB_MAP_SZ);
6289 return;
6292 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6293 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6294 tp->rx_pkt_map_sz);
6296 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6297 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6298 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6299 TG3_RX_JMB_MAP_SZ);
6303 /* Initialize rx rings for packet processing.
6305 * The chip has been shut down and the driver detached from
6306 * the networking, so no interrupts or new tx packets will
6307 * end up in the driver. tp->{tx,}lock are held and thus
6308 * we may not sleep.
6310 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6311 struct tg3_rx_prodring_set *tpr)
6313 u32 i, rx_pkt_dma_sz;
6315 tpr->rx_std_cons_idx = 0;
6316 tpr->rx_std_prod_idx = 0;
6317 tpr->rx_jmb_cons_idx = 0;
6318 tpr->rx_jmb_prod_idx = 0;
6320 if (tpr != &tp->napi[0].prodring) {
6321 memset(&tpr->rx_std_buffers[0], 0,
6322 TG3_RX_STD_BUFF_RING_SIZE(tp));
6323 if (tpr->rx_jmb_buffers)
6324 memset(&tpr->rx_jmb_buffers[0], 0,
6325 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6326 goto done;
6329 /* Zero out all descriptors. */
6330 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6332 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6333 if (tg3_flag(tp, 5780_CLASS) &&
6334 tp->dev->mtu > ETH_DATA_LEN)
6335 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6336 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6338 /* Initialize invariants of the rings, we only set this
6339 * stuff once. This works because the card does not
6340 * write into the rx buffer posting rings.
6342 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6343 struct tg3_rx_buffer_desc *rxd;
6345 rxd = &tpr->rx_std[i];
6346 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6347 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6348 rxd->opaque = (RXD_OPAQUE_RING_STD |
6349 (i << RXD_OPAQUE_INDEX_SHIFT));
6352 /* Now allocate fresh SKBs for each rx ring. */
6353 for (i = 0; i < tp->rx_pending; i++) {
6354 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6355 netdev_warn(tp->dev,
6356 "Using a smaller RX standard ring. Only "
6357 "%d out of %d buffers were allocated "
6358 "successfully\n", i, tp->rx_pending);
6359 if (i == 0)
6360 goto initfail;
6361 tp->rx_pending = i;
6362 break;
6366 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6367 goto done;
6369 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6371 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6372 goto done;
6374 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6375 struct tg3_rx_buffer_desc *rxd;
6377 rxd = &tpr->rx_jmb[i].std;
6378 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6379 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6380 RXD_FLAG_JUMBO;
6381 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6382 (i << RXD_OPAQUE_INDEX_SHIFT));
6385 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6386 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6387 netdev_warn(tp->dev,
6388 "Using a smaller RX jumbo ring. Only %d "
6389 "out of %d buffers were allocated "
6390 "successfully\n", i, tp->rx_jumbo_pending);
6391 if (i == 0)
6392 goto initfail;
6393 tp->rx_jumbo_pending = i;
6394 break;
6398 done:
6399 return 0;
6401 initfail:
6402 tg3_rx_prodring_free(tp, tpr);
6403 return -ENOMEM;
6406 static void tg3_rx_prodring_fini(struct tg3 *tp,
6407 struct tg3_rx_prodring_set *tpr)
6409 kfree(tpr->rx_std_buffers);
6410 tpr->rx_std_buffers = NULL;
6411 kfree(tpr->rx_jmb_buffers);
6412 tpr->rx_jmb_buffers = NULL;
6413 if (tpr->rx_std) {
6414 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6415 tpr->rx_std, tpr->rx_std_mapping);
6416 tpr->rx_std = NULL;
6418 if (tpr->rx_jmb) {
6419 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6420 tpr->rx_jmb, tpr->rx_jmb_mapping);
6421 tpr->rx_jmb = NULL;
6425 static int tg3_rx_prodring_init(struct tg3 *tp,
6426 struct tg3_rx_prodring_set *tpr)
6428 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6429 GFP_KERNEL);
6430 if (!tpr->rx_std_buffers)
6431 return -ENOMEM;
6433 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6434 TG3_RX_STD_RING_BYTES(tp),
6435 &tpr->rx_std_mapping,
6436 GFP_KERNEL);
6437 if (!tpr->rx_std)
6438 goto err_out;
6440 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6441 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6442 GFP_KERNEL);
6443 if (!tpr->rx_jmb_buffers)
6444 goto err_out;
6446 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6447 TG3_RX_JMB_RING_BYTES(tp),
6448 &tpr->rx_jmb_mapping,
6449 GFP_KERNEL);
6450 if (!tpr->rx_jmb)
6451 goto err_out;
6454 return 0;
6456 err_out:
6457 tg3_rx_prodring_fini(tp, tpr);
6458 return -ENOMEM;
6461 /* Free up pending packets in all rx/tx rings.
6463 * The chip has been shut down and the driver detached from
6464 * the networking, so no interrupts or new tx packets will
6465 * end up in the driver. tp->{tx,}lock is not held and we are not
6466 * in an interrupt context and thus may sleep.
6468 static void tg3_free_rings(struct tg3 *tp)
6470 int i, j;
6472 for (j = 0; j < tp->irq_cnt; j++) {
6473 struct tg3_napi *tnapi = &tp->napi[j];
6475 tg3_rx_prodring_free(tp, &tnapi->prodring);
6477 if (!tnapi->tx_buffers)
6478 continue;
6480 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6481 struct ring_info *txp;
6482 struct sk_buff *skb;
6483 unsigned int k;
6485 txp = &tnapi->tx_buffers[i];
6486 skb = txp->skb;
6488 if (skb == NULL) {
6489 i++;
6490 continue;
6493 pci_unmap_single(tp->pdev,
6494 dma_unmap_addr(txp, mapping),
6495 skb_headlen(skb),
6496 PCI_DMA_TODEVICE);
6497 txp->skb = NULL;
6499 i++;
6501 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6502 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6503 pci_unmap_page(tp->pdev,
6504 dma_unmap_addr(txp, mapping),
6505 skb_shinfo(skb)->frags[k].size,
6506 PCI_DMA_TODEVICE);
6507 i++;
6510 dev_kfree_skb_any(skb);
6515 /* Initialize tx/rx rings for packet processing.
6517 * The chip has been shut down and the driver detached from
6518 * the networking, so no interrupts or new tx packets will
6519 * end up in the driver. tp->{tx,}lock are held and thus
6520 * we may not sleep.
6522 static int tg3_init_rings(struct tg3 *tp)
6524 int i;
6526 /* Free up all the SKBs. */
6527 tg3_free_rings(tp);
6529 for (i = 0; i < tp->irq_cnt; i++) {
6530 struct tg3_napi *tnapi = &tp->napi[i];
6532 tnapi->last_tag = 0;
6533 tnapi->last_irq_tag = 0;
6534 tnapi->hw_status->status = 0;
6535 tnapi->hw_status->status_tag = 0;
6536 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6538 tnapi->tx_prod = 0;
6539 tnapi->tx_cons = 0;
6540 if (tnapi->tx_ring)
6541 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6543 tnapi->rx_rcb_ptr = 0;
6544 if (tnapi->rx_rcb)
6545 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6547 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6548 tg3_free_rings(tp);
6549 return -ENOMEM;
6553 return 0;
6557 * Must not be invoked with interrupt sources disabled and
6558 * the hardware shutdown down.
6560 static void tg3_free_consistent(struct tg3 *tp)
6562 int i;
6564 for (i = 0; i < tp->irq_cnt; i++) {
6565 struct tg3_napi *tnapi = &tp->napi[i];
6567 if (tnapi->tx_ring) {
6568 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6569 tnapi->tx_ring, tnapi->tx_desc_mapping);
6570 tnapi->tx_ring = NULL;
6573 kfree(tnapi->tx_buffers);
6574 tnapi->tx_buffers = NULL;
6576 if (tnapi->rx_rcb) {
6577 dma_free_coherent(&tp->pdev->dev,
6578 TG3_RX_RCB_RING_BYTES(tp),
6579 tnapi->rx_rcb,
6580 tnapi->rx_rcb_mapping);
6581 tnapi->rx_rcb = NULL;
6584 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6586 if (tnapi->hw_status) {
6587 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6588 tnapi->hw_status,
6589 tnapi->status_mapping);
6590 tnapi->hw_status = NULL;
6594 if (tp->hw_stats) {
6595 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6596 tp->hw_stats, tp->stats_mapping);
6597 tp->hw_stats = NULL;
6602 * Must not be invoked with interrupt sources disabled and
6603 * the hardware shutdown down. Can sleep.
6605 static int tg3_alloc_consistent(struct tg3 *tp)
6607 int i;
6609 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6610 sizeof(struct tg3_hw_stats),
6611 &tp->stats_mapping,
6612 GFP_KERNEL);
6613 if (!tp->hw_stats)
6614 goto err_out;
6616 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6618 for (i = 0; i < tp->irq_cnt; i++) {
6619 struct tg3_napi *tnapi = &tp->napi[i];
6620 struct tg3_hw_status *sblk;
6622 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6623 TG3_HW_STATUS_SIZE,
6624 &tnapi->status_mapping,
6625 GFP_KERNEL);
6626 if (!tnapi->hw_status)
6627 goto err_out;
6629 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6630 sblk = tnapi->hw_status;
6632 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6633 goto err_out;
6635 /* If multivector TSS is enabled, vector 0 does not handle
6636 * tx interrupts. Don't allocate any resources for it.
6638 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6639 (i && tg3_flag(tp, ENABLE_TSS))) {
6640 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6641 TG3_TX_RING_SIZE,
6642 GFP_KERNEL);
6643 if (!tnapi->tx_buffers)
6644 goto err_out;
6646 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6647 TG3_TX_RING_BYTES,
6648 &tnapi->tx_desc_mapping,
6649 GFP_KERNEL);
6650 if (!tnapi->tx_ring)
6651 goto err_out;
6655 * When RSS is enabled, the status block format changes
6656 * slightly. The "rx_jumbo_consumer", "reserved",
6657 * and "rx_mini_consumer" members get mapped to the
6658 * other three rx return ring producer indexes.
6660 switch (i) {
6661 default:
6662 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6663 break;
6664 case 2:
6665 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6666 break;
6667 case 3:
6668 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6669 break;
6670 case 4:
6671 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6672 break;
6676 * If multivector RSS is enabled, vector 0 does not handle
6677 * rx or tx interrupts. Don't allocate any resources for it.
6679 if (!i && tg3_flag(tp, ENABLE_RSS))
6680 continue;
6682 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6683 TG3_RX_RCB_RING_BYTES(tp),
6684 &tnapi->rx_rcb_mapping,
6685 GFP_KERNEL);
6686 if (!tnapi->rx_rcb)
6687 goto err_out;
6689 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6692 return 0;
6694 err_out:
6695 tg3_free_consistent(tp);
6696 return -ENOMEM;
6699 #define MAX_WAIT_CNT 1000
6701 /* To stop a block, clear the enable bit and poll till it
6702 * clears. tp->lock is held.
6704 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6706 unsigned int i;
6707 u32 val;
6709 if (tg3_flag(tp, 5705_PLUS)) {
6710 switch (ofs) {
6711 case RCVLSC_MODE:
6712 case DMAC_MODE:
6713 case MBFREE_MODE:
6714 case BUFMGR_MODE:
6715 case MEMARB_MODE:
6716 /* We can't enable/disable these bits of the
6717 * 5705/5750, just say success.
6719 return 0;
6721 default:
6722 break;
6726 val = tr32(ofs);
6727 val &= ~enable_bit;
6728 tw32_f(ofs, val);
6730 for (i = 0; i < MAX_WAIT_CNT; i++) {
6731 udelay(100);
6732 val = tr32(ofs);
6733 if ((val & enable_bit) == 0)
6734 break;
6737 if (i == MAX_WAIT_CNT && !silent) {
6738 dev_err(&tp->pdev->dev,
6739 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6740 ofs, enable_bit);
6741 return -ENODEV;
6744 return 0;
6747 /* tp->lock is held. */
6748 static int tg3_abort_hw(struct tg3 *tp, int silent)
6750 int i, err;
6752 tg3_disable_ints(tp);
6754 tp->rx_mode &= ~RX_MODE_ENABLE;
6755 tw32_f(MAC_RX_MODE, tp->rx_mode);
6756 udelay(10);
6758 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6759 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6760 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6761 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6762 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6763 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6765 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6766 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6767 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6768 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6769 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6770 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6771 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6773 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6774 tw32_f(MAC_MODE, tp->mac_mode);
6775 udelay(40);
6777 tp->tx_mode &= ~TX_MODE_ENABLE;
6778 tw32_f(MAC_TX_MODE, tp->tx_mode);
6780 for (i = 0; i < MAX_WAIT_CNT; i++) {
6781 udelay(100);
6782 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6783 break;
6785 if (i >= MAX_WAIT_CNT) {
6786 dev_err(&tp->pdev->dev,
6787 "%s timed out, TX_MODE_ENABLE will not clear "
6788 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6789 err |= -ENODEV;
6792 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6793 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6794 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6796 tw32(FTQ_RESET, 0xffffffff);
6797 tw32(FTQ_RESET, 0x00000000);
6799 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6800 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6802 for (i = 0; i < tp->irq_cnt; i++) {
6803 struct tg3_napi *tnapi = &tp->napi[i];
6804 if (tnapi->hw_status)
6805 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6807 if (tp->hw_stats)
6808 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6810 return err;
6813 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6815 int i;
6816 u32 apedata;
6818 /* NCSI does not support APE events */
6819 if (tg3_flag(tp, APE_HAS_NCSI))
6820 return;
6822 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6823 if (apedata != APE_SEG_SIG_MAGIC)
6824 return;
6826 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6827 if (!(apedata & APE_FW_STATUS_READY))
6828 return;
6830 /* Wait for up to 1 millisecond for APE to service previous event. */
6831 for (i = 0; i < 10; i++) {
6832 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6833 return;
6835 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6837 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6838 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6839 event | APE_EVENT_STATUS_EVENT_PENDING);
6841 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6843 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6844 break;
6846 udelay(100);
6849 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6850 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6853 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6855 u32 event;
6856 u32 apedata;
6858 if (!tg3_flag(tp, ENABLE_APE))
6859 return;
6861 switch (kind) {
6862 case RESET_KIND_INIT:
6863 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6864 APE_HOST_SEG_SIG_MAGIC);
6865 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6866 APE_HOST_SEG_LEN_MAGIC);
6867 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6868 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6869 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6870 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6871 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6872 APE_HOST_BEHAV_NO_PHYLOCK);
6873 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6874 TG3_APE_HOST_DRVR_STATE_START);
6876 event = APE_EVENT_STATUS_STATE_START;
6877 break;
6878 case RESET_KIND_SHUTDOWN:
6879 /* With the interface we are currently using,
6880 * APE does not track driver state. Wiping
6881 * out the HOST SEGMENT SIGNATURE forces
6882 * the APE to assume OS absent status.
6884 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6886 if (device_may_wakeup(&tp->pdev->dev) &&
6887 tg3_flag(tp, WOL_ENABLE)) {
6888 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6889 TG3_APE_HOST_WOL_SPEED_AUTO);
6890 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6891 } else
6892 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6894 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6896 event = APE_EVENT_STATUS_STATE_UNLOAD;
6897 break;
6898 case RESET_KIND_SUSPEND:
6899 event = APE_EVENT_STATUS_STATE_SUSPEND;
6900 break;
6901 default:
6902 return;
6905 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6907 tg3_ape_send_event(tp, event);
6910 /* tp->lock is held. */
6911 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6913 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6914 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6916 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6917 switch (kind) {
6918 case RESET_KIND_INIT:
6919 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6920 DRV_STATE_START);
6921 break;
6923 case RESET_KIND_SHUTDOWN:
6924 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6925 DRV_STATE_UNLOAD);
6926 break;
6928 case RESET_KIND_SUSPEND:
6929 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6930 DRV_STATE_SUSPEND);
6931 break;
6933 default:
6934 break;
6938 if (kind == RESET_KIND_INIT ||
6939 kind == RESET_KIND_SUSPEND)
6940 tg3_ape_driver_state_change(tp, kind);
6943 /* tp->lock is held. */
6944 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6946 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6947 switch (kind) {
6948 case RESET_KIND_INIT:
6949 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6950 DRV_STATE_START_DONE);
6951 break;
6953 case RESET_KIND_SHUTDOWN:
6954 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6955 DRV_STATE_UNLOAD_DONE);
6956 break;
6958 default:
6959 break;
6963 if (kind == RESET_KIND_SHUTDOWN)
6964 tg3_ape_driver_state_change(tp, kind);
6967 /* tp->lock is held. */
6968 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6970 if (tg3_flag(tp, ENABLE_ASF)) {
6971 switch (kind) {
6972 case RESET_KIND_INIT:
6973 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6974 DRV_STATE_START);
6975 break;
6977 case RESET_KIND_SHUTDOWN:
6978 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6979 DRV_STATE_UNLOAD);
6980 break;
6982 case RESET_KIND_SUSPEND:
6983 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6984 DRV_STATE_SUSPEND);
6985 break;
6987 default:
6988 break;
6993 static int tg3_poll_fw(struct tg3 *tp)
6995 int i;
6996 u32 val;
6998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6999 /* Wait up to 20ms for init done. */
7000 for (i = 0; i < 200; i++) {
7001 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7002 return 0;
7003 udelay(100);
7005 return -ENODEV;
7008 /* Wait for firmware initialization to complete. */
7009 for (i = 0; i < 100000; i++) {
7010 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7011 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7012 break;
7013 udelay(10);
7016 /* Chip might not be fitted with firmware. Some Sun onboard
7017 * parts are configured like that. So don't signal the timeout
7018 * of the above loop as an error, but do report the lack of
7019 * running firmware once.
7021 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7022 tg3_flag_set(tp, NO_FWARE_REPORTED);
7024 netdev_info(tp->dev, "No firmware running\n");
7027 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7028 /* The 57765 A0 needs a little more
7029 * time to do some important work.
7031 mdelay(10);
7034 return 0;
7037 /* Save PCI command register before chip reset */
7038 static void tg3_save_pci_state(struct tg3 *tp)
7040 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7043 /* Restore PCI state after chip reset */
7044 static void tg3_restore_pci_state(struct tg3 *tp)
7046 u32 val;
7048 /* Re-enable indirect register accesses. */
7049 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7050 tp->misc_host_ctrl);
7052 /* Set MAX PCI retry to zero. */
7053 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7054 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7055 tg3_flag(tp, PCIX_MODE))
7056 val |= PCISTATE_RETRY_SAME_DMA;
7057 /* Allow reads and writes to the APE register and memory space. */
7058 if (tg3_flag(tp, ENABLE_APE))
7059 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7060 PCISTATE_ALLOW_APE_SHMEM_WR |
7061 PCISTATE_ALLOW_APE_PSPACE_WR;
7062 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7064 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7066 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7067 if (tg3_flag(tp, PCI_EXPRESS))
7068 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7069 else {
7070 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7071 tp->pci_cacheline_sz);
7072 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7073 tp->pci_lat_timer);
7077 /* Make sure PCI-X relaxed ordering bit is clear. */
7078 if (tg3_flag(tp, PCIX_MODE)) {
7079 u16 pcix_cmd;
7081 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7082 &pcix_cmd);
7083 pcix_cmd &= ~PCI_X_CMD_ERO;
7084 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7085 pcix_cmd);
7088 if (tg3_flag(tp, 5780_CLASS)) {
7090 /* Chip reset on 5780 will reset MSI enable bit,
7091 * so need to restore it.
7093 if (tg3_flag(tp, USING_MSI)) {
7094 u16 ctrl;
7096 pci_read_config_word(tp->pdev,
7097 tp->msi_cap + PCI_MSI_FLAGS,
7098 &ctrl);
7099 pci_write_config_word(tp->pdev,
7100 tp->msi_cap + PCI_MSI_FLAGS,
7101 ctrl | PCI_MSI_FLAGS_ENABLE);
7102 val = tr32(MSGINT_MODE);
7103 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7108 static void tg3_stop_fw(struct tg3 *);
7110 /* tp->lock is held. */
7111 static int tg3_chip_reset(struct tg3 *tp)
7113 u32 val;
7114 void (*write_op)(struct tg3 *, u32, u32);
7115 int i, err;
7117 tg3_nvram_lock(tp);
7119 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7121 /* No matching tg3_nvram_unlock() after this because
7122 * chip reset below will undo the nvram lock.
7124 tp->nvram_lock_cnt = 0;
7126 /* GRC_MISC_CFG core clock reset will clear the memory
7127 * enable bit in PCI register 4 and the MSI enable bit
7128 * on some chips, so we save relevant registers here.
7130 tg3_save_pci_state(tp);
7132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7133 tg3_flag(tp, 5755_PLUS))
7134 tw32(GRC_FASTBOOT_PC, 0);
7137 * We must avoid the readl() that normally takes place.
7138 * It locks machines, causes machine checks, and other
7139 * fun things. So, temporarily disable the 5701
7140 * hardware workaround, while we do the reset.
7142 write_op = tp->write32;
7143 if (write_op == tg3_write_flush_reg32)
7144 tp->write32 = tg3_write32;
7146 /* Prevent the irq handler from reading or writing PCI registers
7147 * during chip reset when the memory enable bit in the PCI command
7148 * register may be cleared. The chip does not generate interrupt
7149 * at this time, but the irq handler may still be called due to irq
7150 * sharing or irqpoll.
7152 tg3_flag_set(tp, CHIP_RESETTING);
7153 for (i = 0; i < tp->irq_cnt; i++) {
7154 struct tg3_napi *tnapi = &tp->napi[i];
7155 if (tnapi->hw_status) {
7156 tnapi->hw_status->status = 0;
7157 tnapi->hw_status->status_tag = 0;
7159 tnapi->last_tag = 0;
7160 tnapi->last_irq_tag = 0;
7162 smp_mb();
7164 for (i = 0; i < tp->irq_cnt; i++)
7165 synchronize_irq(tp->napi[i].irq_vec);
7167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7168 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7169 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7172 /* do the reset */
7173 val = GRC_MISC_CFG_CORECLK_RESET;
7175 if (tg3_flag(tp, PCI_EXPRESS)) {
7176 /* Force PCIe 1.0a mode */
7177 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7178 !tg3_flag(tp, 57765_PLUS) &&
7179 tr32(TG3_PCIE_PHY_TSTCTL) ==
7180 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7181 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7183 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7184 tw32(GRC_MISC_CFG, (1 << 29));
7185 val |= (1 << 29);
7189 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7190 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7191 tw32(GRC_VCPU_EXT_CTRL,
7192 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7195 /* Manage gphy power for all CPMU absent PCIe devices. */
7196 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7197 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7199 tw32(GRC_MISC_CFG, val);
7201 /* restore 5701 hardware bug workaround write method */
7202 tp->write32 = write_op;
7204 /* Unfortunately, we have to delay before the PCI read back.
7205 * Some 575X chips even will not respond to a PCI cfg access
7206 * when the reset command is given to the chip.
7208 * How do these hardware designers expect things to work
7209 * properly if the PCI write is posted for a long period
7210 * of time? It is always necessary to have some method by
7211 * which a register read back can occur to push the write
7212 * out which does the reset.
7214 * For most tg3 variants the trick below was working.
7215 * Ho hum...
7217 udelay(120);
7219 /* Flush PCI posted writes. The normal MMIO registers
7220 * are inaccessible at this time so this is the only
7221 * way to make this reliably (actually, this is no longer
7222 * the case, see above). I tried to use indirect
7223 * register read/write but this upset some 5701 variants.
7225 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7227 udelay(120);
7229 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7230 u16 val16;
7232 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7233 int i;
7234 u32 cfg_val;
7236 /* Wait for link training to complete. */
7237 for (i = 0; i < 5000; i++)
7238 udelay(100);
7240 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7241 pci_write_config_dword(tp->pdev, 0xc4,
7242 cfg_val | (1 << 15));
7245 /* Clear the "no snoop" and "relaxed ordering" bits. */
7246 pci_read_config_word(tp->pdev,
7247 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7248 &val16);
7249 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7250 PCI_EXP_DEVCTL_NOSNOOP_EN);
7252 * Older PCIe devices only support the 128 byte
7253 * MPS setting. Enforce the restriction.
7255 if (!tg3_flag(tp, CPMU_PRESENT))
7256 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7257 pci_write_config_word(tp->pdev,
7258 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7259 val16);
7261 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7263 /* Clear error status */
7264 pci_write_config_word(tp->pdev,
7265 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7266 PCI_EXP_DEVSTA_CED |
7267 PCI_EXP_DEVSTA_NFED |
7268 PCI_EXP_DEVSTA_FED |
7269 PCI_EXP_DEVSTA_URD);
7272 tg3_restore_pci_state(tp);
7274 tg3_flag_clear(tp, CHIP_RESETTING);
7275 tg3_flag_clear(tp, ERROR_PROCESSED);
7277 val = 0;
7278 if (tg3_flag(tp, 5780_CLASS))
7279 val = tr32(MEMARB_MODE);
7280 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7282 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7283 tg3_stop_fw(tp);
7284 tw32(0x5000, 0x400);
7287 tw32(GRC_MODE, tp->grc_mode);
7289 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7290 val = tr32(0xc4);
7292 tw32(0xc4, val | (1 << 15));
7295 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7297 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7298 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7299 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7300 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7303 if (tg3_flag(tp, ENABLE_APE))
7304 tp->mac_mode = MAC_MODE_APE_TX_EN |
7305 MAC_MODE_APE_RX_EN |
7306 MAC_MODE_TDE_ENABLE;
7308 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7309 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7310 val = tp->mac_mode;
7311 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7312 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7313 val = tp->mac_mode;
7314 } else
7315 val = 0;
7317 tw32_f(MAC_MODE, val);
7318 udelay(40);
7320 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7322 err = tg3_poll_fw(tp);
7323 if (err)
7324 return err;
7326 tg3_mdio_start(tp);
7328 if (tg3_flag(tp, PCI_EXPRESS) &&
7329 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7330 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7331 !tg3_flag(tp, 57765_PLUS)) {
7332 val = tr32(0x7c00);
7334 tw32(0x7c00, val | (1 << 25));
7337 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7338 val = tr32(TG3_CPMU_CLCK_ORIDE);
7339 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7342 /* Reprobe ASF enable state. */
7343 tg3_flag_clear(tp, ENABLE_ASF);
7344 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7345 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7346 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7347 u32 nic_cfg;
7349 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7350 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7351 tg3_flag_set(tp, ENABLE_ASF);
7352 tp->last_event_jiffies = jiffies;
7353 if (tg3_flag(tp, 5750_PLUS))
7354 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7358 return 0;
7361 /* tp->lock is held. */
7362 static void tg3_stop_fw(struct tg3 *tp)
7364 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7365 /* Wait for RX cpu to ACK the previous event. */
7366 tg3_wait_for_event_ack(tp);
7368 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7370 tg3_generate_fw_event(tp);
7372 /* Wait for RX cpu to ACK this event. */
7373 tg3_wait_for_event_ack(tp);
7377 /* tp->lock is held. */
7378 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7380 int err;
7382 tg3_stop_fw(tp);
7384 tg3_write_sig_pre_reset(tp, kind);
7386 tg3_abort_hw(tp, silent);
7387 err = tg3_chip_reset(tp);
7389 __tg3_set_mac_addr(tp, 0);
7391 tg3_write_sig_legacy(tp, kind);
7392 tg3_write_sig_post_reset(tp, kind);
7394 if (err)
7395 return err;
7397 return 0;
7400 #define RX_CPU_SCRATCH_BASE 0x30000
7401 #define RX_CPU_SCRATCH_SIZE 0x04000
7402 #define TX_CPU_SCRATCH_BASE 0x34000
7403 #define TX_CPU_SCRATCH_SIZE 0x04000
7405 /* tp->lock is held. */
7406 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7408 int i;
7410 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7413 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7415 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7416 return 0;
7418 if (offset == RX_CPU_BASE) {
7419 for (i = 0; i < 10000; i++) {
7420 tw32(offset + CPU_STATE, 0xffffffff);
7421 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7422 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7423 break;
7426 tw32(offset + CPU_STATE, 0xffffffff);
7427 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7428 udelay(10);
7429 } else {
7430 for (i = 0; i < 10000; i++) {
7431 tw32(offset + CPU_STATE, 0xffffffff);
7432 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7433 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7434 break;
7438 if (i >= 10000) {
7439 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7440 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7441 return -ENODEV;
7444 /* Clear firmware's nvram arbitration. */
7445 if (tg3_flag(tp, NVRAM))
7446 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7447 return 0;
7450 struct fw_info {
7451 unsigned int fw_base;
7452 unsigned int fw_len;
7453 const __be32 *fw_data;
7456 /* tp->lock is held. */
7457 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7458 int cpu_scratch_size, struct fw_info *info)
7460 int err, lock_err, i;
7461 void (*write_op)(struct tg3 *, u32, u32);
7463 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7464 netdev_err(tp->dev,
7465 "%s: Trying to load TX cpu firmware which is 5705\n",
7466 __func__);
7467 return -EINVAL;
7470 if (tg3_flag(tp, 5705_PLUS))
7471 write_op = tg3_write_mem;
7472 else
7473 write_op = tg3_write_indirect_reg32;
7475 /* It is possible that bootcode is still loading at this point.
7476 * Get the nvram lock first before halting the cpu.
7478 lock_err = tg3_nvram_lock(tp);
7479 err = tg3_halt_cpu(tp, cpu_base);
7480 if (!lock_err)
7481 tg3_nvram_unlock(tp);
7482 if (err)
7483 goto out;
7485 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7486 write_op(tp, cpu_scratch_base + i, 0);
7487 tw32(cpu_base + CPU_STATE, 0xffffffff);
7488 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7489 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7490 write_op(tp, (cpu_scratch_base +
7491 (info->fw_base & 0xffff) +
7492 (i * sizeof(u32))),
7493 be32_to_cpu(info->fw_data[i]));
7495 err = 0;
7497 out:
7498 return err;
7501 /* tp->lock is held. */
7502 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7504 struct fw_info info;
7505 const __be32 *fw_data;
7506 int err, i;
7508 fw_data = (void *)tp->fw->data;
7510 /* Firmware blob starts with version numbers, followed by
7511 start address and length. We are setting complete length.
7512 length = end_address_of_bss - start_address_of_text.
7513 Remainder is the blob to be loaded contiguously
7514 from start address. */
7516 info.fw_base = be32_to_cpu(fw_data[1]);
7517 info.fw_len = tp->fw->size - 12;
7518 info.fw_data = &fw_data[3];
7520 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7521 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7522 &info);
7523 if (err)
7524 return err;
7526 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7527 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7528 &info);
7529 if (err)
7530 return err;
7532 /* Now startup only the RX cpu. */
7533 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7534 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7536 for (i = 0; i < 5; i++) {
7537 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7538 break;
7539 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7540 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7541 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7542 udelay(1000);
7544 if (i >= 5) {
7545 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7546 "should be %08x\n", __func__,
7547 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7548 return -ENODEV;
7550 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7551 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7553 return 0;
7556 /* tp->lock is held. */
7557 static int tg3_load_tso_firmware(struct tg3 *tp)
7559 struct fw_info info;
7560 const __be32 *fw_data;
7561 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7562 int err, i;
7564 if (tg3_flag(tp, HW_TSO_1) ||
7565 tg3_flag(tp, HW_TSO_2) ||
7566 tg3_flag(tp, HW_TSO_3))
7567 return 0;
7569 fw_data = (void *)tp->fw->data;
7571 /* Firmware blob starts with version numbers, followed by
7572 start address and length. We are setting complete length.
7573 length = end_address_of_bss - start_address_of_text.
7574 Remainder is the blob to be loaded contiguously
7575 from start address. */
7577 info.fw_base = be32_to_cpu(fw_data[1]);
7578 cpu_scratch_size = tp->fw_len;
7579 info.fw_len = tp->fw->size - 12;
7580 info.fw_data = &fw_data[3];
7582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7583 cpu_base = RX_CPU_BASE;
7584 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7585 } else {
7586 cpu_base = TX_CPU_BASE;
7587 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7588 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7591 err = tg3_load_firmware_cpu(tp, cpu_base,
7592 cpu_scratch_base, cpu_scratch_size,
7593 &info);
7594 if (err)
7595 return err;
7597 /* Now startup the cpu. */
7598 tw32(cpu_base + CPU_STATE, 0xffffffff);
7599 tw32_f(cpu_base + CPU_PC, info.fw_base);
7601 for (i = 0; i < 5; i++) {
7602 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7603 break;
7604 tw32(cpu_base + CPU_STATE, 0xffffffff);
7605 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7606 tw32_f(cpu_base + CPU_PC, info.fw_base);
7607 udelay(1000);
7609 if (i >= 5) {
7610 netdev_err(tp->dev,
7611 "%s fails to set CPU PC, is %08x should be %08x\n",
7612 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7613 return -ENODEV;
7615 tw32(cpu_base + CPU_STATE, 0xffffffff);
7616 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7617 return 0;
7621 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7623 struct tg3 *tp = netdev_priv(dev);
7624 struct sockaddr *addr = p;
7625 int err = 0, skip_mac_1 = 0;
7627 if (!is_valid_ether_addr(addr->sa_data))
7628 return -EINVAL;
7630 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7632 if (!netif_running(dev))
7633 return 0;
7635 if (tg3_flag(tp, ENABLE_ASF)) {
7636 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7638 addr0_high = tr32(MAC_ADDR_0_HIGH);
7639 addr0_low = tr32(MAC_ADDR_0_LOW);
7640 addr1_high = tr32(MAC_ADDR_1_HIGH);
7641 addr1_low = tr32(MAC_ADDR_1_LOW);
7643 /* Skip MAC addr 1 if ASF is using it. */
7644 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7645 !(addr1_high == 0 && addr1_low == 0))
7646 skip_mac_1 = 1;
7648 spin_lock_bh(&tp->lock);
7649 __tg3_set_mac_addr(tp, skip_mac_1);
7650 spin_unlock_bh(&tp->lock);
7652 return err;
7655 /* tp->lock is held. */
7656 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7657 dma_addr_t mapping, u32 maxlen_flags,
7658 u32 nic_addr)
7660 tg3_write_mem(tp,
7661 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7662 ((u64) mapping >> 32));
7663 tg3_write_mem(tp,
7664 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7665 ((u64) mapping & 0xffffffff));
7666 tg3_write_mem(tp,
7667 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7668 maxlen_flags);
7670 if (!tg3_flag(tp, 5705_PLUS))
7671 tg3_write_mem(tp,
7672 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7673 nic_addr);
7676 static void __tg3_set_rx_mode(struct net_device *);
7677 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7679 int i;
7681 if (!tg3_flag(tp, ENABLE_TSS)) {
7682 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7683 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7684 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7685 } else {
7686 tw32(HOSTCC_TXCOL_TICKS, 0);
7687 tw32(HOSTCC_TXMAX_FRAMES, 0);
7688 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7691 if (!tg3_flag(tp, ENABLE_RSS)) {
7692 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7693 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7694 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7695 } else {
7696 tw32(HOSTCC_RXCOL_TICKS, 0);
7697 tw32(HOSTCC_RXMAX_FRAMES, 0);
7698 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7701 if (!tg3_flag(tp, 5705_PLUS)) {
7702 u32 val = ec->stats_block_coalesce_usecs;
7704 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7705 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7707 if (!netif_carrier_ok(tp->dev))
7708 val = 0;
7710 tw32(HOSTCC_STAT_COAL_TICKS, val);
7713 for (i = 0; i < tp->irq_cnt - 1; i++) {
7714 u32 reg;
7716 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7717 tw32(reg, ec->rx_coalesce_usecs);
7718 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7719 tw32(reg, ec->rx_max_coalesced_frames);
7720 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7721 tw32(reg, ec->rx_max_coalesced_frames_irq);
7723 if (tg3_flag(tp, ENABLE_TSS)) {
7724 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7725 tw32(reg, ec->tx_coalesce_usecs);
7726 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7727 tw32(reg, ec->tx_max_coalesced_frames);
7728 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7729 tw32(reg, ec->tx_max_coalesced_frames_irq);
7733 for (; i < tp->irq_max - 1; i++) {
7734 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7735 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7736 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7738 if (tg3_flag(tp, ENABLE_TSS)) {
7739 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7740 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7741 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7746 /* tp->lock is held. */
7747 static void tg3_rings_reset(struct tg3 *tp)
7749 int i;
7750 u32 stblk, txrcb, rxrcb, limit;
7751 struct tg3_napi *tnapi = &tp->napi[0];
7753 /* Disable all transmit rings but the first. */
7754 if (!tg3_flag(tp, 5705_PLUS))
7755 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7756 else if (tg3_flag(tp, 5717_PLUS))
7757 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7758 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7759 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7760 else
7761 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7763 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7764 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7765 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7766 BDINFO_FLAGS_DISABLED);
7769 /* Disable all receive return rings but the first. */
7770 if (tg3_flag(tp, 5717_PLUS))
7771 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7772 else if (!tg3_flag(tp, 5705_PLUS))
7773 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7774 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7775 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7776 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7777 else
7778 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7780 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7781 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7782 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7783 BDINFO_FLAGS_DISABLED);
7785 /* Disable interrupts */
7786 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7787 tp->napi[0].chk_msi_cnt = 0;
7788 tp->napi[0].last_rx_cons = 0;
7789 tp->napi[0].last_tx_cons = 0;
7791 /* Zero mailbox registers. */
7792 if (tg3_flag(tp, SUPPORT_MSIX)) {
7793 for (i = 1; i < tp->irq_max; i++) {
7794 tp->napi[i].tx_prod = 0;
7795 tp->napi[i].tx_cons = 0;
7796 if (tg3_flag(tp, ENABLE_TSS))
7797 tw32_mailbox(tp->napi[i].prodmbox, 0);
7798 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7799 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7800 tp->napi[0].chk_msi_cnt = 0;
7801 tp->napi[i].last_rx_cons = 0;
7802 tp->napi[i].last_tx_cons = 0;
7804 if (!tg3_flag(tp, ENABLE_TSS))
7805 tw32_mailbox(tp->napi[0].prodmbox, 0);
7806 } else {
7807 tp->napi[0].tx_prod = 0;
7808 tp->napi[0].tx_cons = 0;
7809 tw32_mailbox(tp->napi[0].prodmbox, 0);
7810 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7813 /* Make sure the NIC-based send BD rings are disabled. */
7814 if (!tg3_flag(tp, 5705_PLUS)) {
7815 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7816 for (i = 0; i < 16; i++)
7817 tw32_tx_mbox(mbox + i * 8, 0);
7820 txrcb = NIC_SRAM_SEND_RCB;
7821 rxrcb = NIC_SRAM_RCV_RET_RCB;
7823 /* Clear status block in ram. */
7824 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7826 /* Set status block DMA address */
7827 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7828 ((u64) tnapi->status_mapping >> 32));
7829 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7830 ((u64) tnapi->status_mapping & 0xffffffff));
7832 if (tnapi->tx_ring) {
7833 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7834 (TG3_TX_RING_SIZE <<
7835 BDINFO_FLAGS_MAXLEN_SHIFT),
7836 NIC_SRAM_TX_BUFFER_DESC);
7837 txrcb += TG3_BDINFO_SIZE;
7840 if (tnapi->rx_rcb) {
7841 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7842 (tp->rx_ret_ring_mask + 1) <<
7843 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7844 rxrcb += TG3_BDINFO_SIZE;
7847 stblk = HOSTCC_STATBLCK_RING1;
7849 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7850 u64 mapping = (u64)tnapi->status_mapping;
7851 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7852 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7854 /* Clear status block in ram. */
7855 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7857 if (tnapi->tx_ring) {
7858 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7859 (TG3_TX_RING_SIZE <<
7860 BDINFO_FLAGS_MAXLEN_SHIFT),
7861 NIC_SRAM_TX_BUFFER_DESC);
7862 txrcb += TG3_BDINFO_SIZE;
7865 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7866 ((tp->rx_ret_ring_mask + 1) <<
7867 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7869 stblk += 8;
7870 rxrcb += TG3_BDINFO_SIZE;
7874 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7876 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7878 if (!tg3_flag(tp, 5750_PLUS) ||
7879 tg3_flag(tp, 5780_CLASS) ||
7880 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7881 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7882 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7883 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7885 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7886 else
7887 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7889 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7890 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7892 val = min(nic_rep_thresh, host_rep_thresh);
7893 tw32(RCVBDI_STD_THRESH, val);
7895 if (tg3_flag(tp, 57765_PLUS))
7896 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7898 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7899 return;
7901 if (!tg3_flag(tp, 5705_PLUS))
7902 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7903 else
7904 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7906 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7908 val = min(bdcache_maxcnt / 2, host_rep_thresh);
7909 tw32(RCVBDI_JUMBO_THRESH, val);
7911 if (tg3_flag(tp, 57765_PLUS))
7912 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7915 /* tp->lock is held. */
7916 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7918 u32 val, rdmac_mode;
7919 int i, err, limit;
7920 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7922 tg3_disable_ints(tp);
7924 tg3_stop_fw(tp);
7926 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7928 if (tg3_flag(tp, INIT_COMPLETE))
7929 tg3_abort_hw(tp, 1);
7931 /* Enable MAC control of LPI */
7932 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7933 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7934 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7935 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7937 tw32_f(TG3_CPMU_EEE_CTRL,
7938 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7940 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7941 TG3_CPMU_EEEMD_LPI_IN_TX |
7942 TG3_CPMU_EEEMD_LPI_IN_RX |
7943 TG3_CPMU_EEEMD_EEE_ENABLE;
7945 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7946 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7948 if (tg3_flag(tp, ENABLE_APE))
7949 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7951 tw32_f(TG3_CPMU_EEE_MODE, val);
7953 tw32_f(TG3_CPMU_EEE_DBTMR1,
7954 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7955 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7957 tw32_f(TG3_CPMU_EEE_DBTMR2,
7958 TG3_CPMU_DBTMR2_APE_TX_2047US |
7959 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7962 if (reset_phy)
7963 tg3_phy_reset(tp);
7965 err = tg3_chip_reset(tp);
7966 if (err)
7967 return err;
7969 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7971 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7972 val = tr32(TG3_CPMU_CTRL);
7973 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7974 tw32(TG3_CPMU_CTRL, val);
7976 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7977 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7978 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7979 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7981 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7982 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7983 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7984 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7986 val = tr32(TG3_CPMU_HST_ACC);
7987 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7988 val |= CPMU_HST_ACC_MACCLK_6_25;
7989 tw32(TG3_CPMU_HST_ACC, val);
7992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7993 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7994 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7995 PCIE_PWR_MGMT_L1_THRESH_4MS;
7996 tw32(PCIE_PWR_MGMT_THRESH, val);
7998 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7999 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8001 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8003 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8004 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8007 if (tg3_flag(tp, L1PLLPD_EN)) {
8008 u32 grc_mode = tr32(GRC_MODE);
8010 /* Access the lower 1K of PL PCIE block registers. */
8011 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8012 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8014 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8015 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8016 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8018 tw32(GRC_MODE, grc_mode);
8021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8022 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8023 u32 grc_mode = tr32(GRC_MODE);
8025 /* Access the lower 1K of PL PCIE block registers. */
8026 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8027 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8029 val = tr32(TG3_PCIE_TLDLPL_PORT +
8030 TG3_PCIE_PL_LO_PHYCTL5);
8031 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8032 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8034 tw32(GRC_MODE, grc_mode);
8037 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8038 u32 grc_mode = tr32(GRC_MODE);
8040 /* Access the lower 1K of DL PCIE block registers. */
8041 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8042 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8044 val = tr32(TG3_PCIE_TLDLPL_PORT +
8045 TG3_PCIE_DL_LO_FTSMAX);
8046 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8047 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8048 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8050 tw32(GRC_MODE, grc_mode);
8053 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8054 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8055 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8056 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8059 /* This works around an issue with Athlon chipsets on
8060 * B3 tigon3 silicon. This bit has no effect on any
8061 * other revision. But do not set this on PCI Express
8062 * chips and don't even touch the clocks if the CPMU is present.
8064 if (!tg3_flag(tp, CPMU_PRESENT)) {
8065 if (!tg3_flag(tp, PCI_EXPRESS))
8066 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8067 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8070 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8071 tg3_flag(tp, PCIX_MODE)) {
8072 val = tr32(TG3PCI_PCISTATE);
8073 val |= PCISTATE_RETRY_SAME_DMA;
8074 tw32(TG3PCI_PCISTATE, val);
8077 if (tg3_flag(tp, ENABLE_APE)) {
8078 /* Allow reads and writes to the
8079 * APE register and memory space.
8081 val = tr32(TG3PCI_PCISTATE);
8082 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8083 PCISTATE_ALLOW_APE_SHMEM_WR |
8084 PCISTATE_ALLOW_APE_PSPACE_WR;
8085 tw32(TG3PCI_PCISTATE, val);
8088 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8089 /* Enable some hw fixes. */
8090 val = tr32(TG3PCI_MSI_DATA);
8091 val |= (1 << 26) | (1 << 28) | (1 << 29);
8092 tw32(TG3PCI_MSI_DATA, val);
8095 /* Descriptor ring init may make accesses to the
8096 * NIC SRAM area to setup the TX descriptors, so we
8097 * can only do this after the hardware has been
8098 * successfully reset.
8100 err = tg3_init_rings(tp);
8101 if (err)
8102 return err;
8104 if (tg3_flag(tp, 57765_PLUS)) {
8105 val = tr32(TG3PCI_DMA_RW_CTRL) &
8106 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8107 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8108 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8109 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8110 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8111 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8112 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8113 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8114 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8115 /* This value is determined during the probe time DMA
8116 * engine test, tg3_test_dma.
8118 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8121 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8122 GRC_MODE_4X_NIC_SEND_RINGS |
8123 GRC_MODE_NO_TX_PHDR_CSUM |
8124 GRC_MODE_NO_RX_PHDR_CSUM);
8125 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8127 /* Pseudo-header checksum is done by hardware logic and not
8128 * the offload processers, so make the chip do the pseudo-
8129 * header checksums on receive. For transmit it is more
8130 * convenient to do the pseudo-header checksum in software
8131 * as Linux does that on transmit for us in all cases.
8133 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8135 tw32(GRC_MODE,
8136 tp->grc_mode |
8137 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8139 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8140 val = tr32(GRC_MISC_CFG);
8141 val &= ~0xff;
8142 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8143 tw32(GRC_MISC_CFG, val);
8145 /* Initialize MBUF/DESC pool. */
8146 if (tg3_flag(tp, 5750_PLUS)) {
8147 /* Do nothing. */
8148 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8149 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8151 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8152 else
8153 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8154 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8155 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8156 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8157 int fw_len;
8159 fw_len = tp->fw_len;
8160 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8161 tw32(BUFMGR_MB_POOL_ADDR,
8162 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8163 tw32(BUFMGR_MB_POOL_SIZE,
8164 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8167 if (tp->dev->mtu <= ETH_DATA_LEN) {
8168 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8169 tp->bufmgr_config.mbuf_read_dma_low_water);
8170 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8171 tp->bufmgr_config.mbuf_mac_rx_low_water);
8172 tw32(BUFMGR_MB_HIGH_WATER,
8173 tp->bufmgr_config.mbuf_high_water);
8174 } else {
8175 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8176 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8177 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8178 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8179 tw32(BUFMGR_MB_HIGH_WATER,
8180 tp->bufmgr_config.mbuf_high_water_jumbo);
8182 tw32(BUFMGR_DMA_LOW_WATER,
8183 tp->bufmgr_config.dma_low_water);
8184 tw32(BUFMGR_DMA_HIGH_WATER,
8185 tp->bufmgr_config.dma_high_water);
8187 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8188 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8189 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8190 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8191 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8192 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8193 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8194 tw32(BUFMGR_MODE, val);
8195 for (i = 0; i < 2000; i++) {
8196 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8197 break;
8198 udelay(10);
8200 if (i >= 2000) {
8201 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8202 return -ENODEV;
8205 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8206 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8208 tg3_setup_rxbd_thresholds(tp);
8210 /* Initialize TG3_BDINFO's at:
8211 * RCVDBDI_STD_BD: standard eth size rx ring
8212 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8213 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8215 * like so:
8216 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8217 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8218 * ring attribute flags
8219 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8221 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8222 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8224 * The size of each ring is fixed in the firmware, but the location is
8225 * configurable.
8227 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8228 ((u64) tpr->rx_std_mapping >> 32));
8229 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8230 ((u64) tpr->rx_std_mapping & 0xffffffff));
8231 if (!tg3_flag(tp, 5717_PLUS))
8232 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8233 NIC_SRAM_RX_BUFFER_DESC);
8235 /* Disable the mini ring */
8236 if (!tg3_flag(tp, 5705_PLUS))
8237 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8238 BDINFO_FLAGS_DISABLED);
8240 /* Program the jumbo buffer descriptor ring control
8241 * blocks on those devices that have them.
8243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8244 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8246 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8247 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8248 ((u64) tpr->rx_jmb_mapping >> 32));
8249 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8250 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8251 val = TG3_RX_JMB_RING_SIZE(tp) <<
8252 BDINFO_FLAGS_MAXLEN_SHIFT;
8253 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8254 val | BDINFO_FLAGS_USE_EXT_RECV);
8255 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8257 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8258 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8259 } else {
8260 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8261 BDINFO_FLAGS_DISABLED);
8264 if (tg3_flag(tp, 57765_PLUS)) {
8265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8266 val = TG3_RX_STD_MAX_SIZE_5700;
8267 else
8268 val = TG3_RX_STD_MAX_SIZE_5717;
8269 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8270 val |= (TG3_RX_STD_DMA_SZ << 2);
8271 } else
8272 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8273 } else
8274 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8276 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8278 tpr->rx_std_prod_idx = tp->rx_pending;
8279 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8281 tpr->rx_jmb_prod_idx =
8282 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8283 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8285 tg3_rings_reset(tp);
8287 /* Initialize MAC address and backoff seed. */
8288 __tg3_set_mac_addr(tp, 0);
8290 /* MTU + ethernet header + FCS + optional VLAN tag */
8291 tw32(MAC_RX_MTU_SIZE,
8292 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8294 /* The slot time is changed by tg3_setup_phy if we
8295 * run at gigabit with half duplex.
8297 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8298 (6 << TX_LENGTHS_IPG_SHIFT) |
8299 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8302 val |= tr32(MAC_TX_LENGTHS) &
8303 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8304 TX_LENGTHS_CNT_DWN_VAL_MSK);
8306 tw32(MAC_TX_LENGTHS, val);
8308 /* Receive rules. */
8309 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8310 tw32(RCVLPC_CONFIG, 0x0181);
8312 /* Calculate RDMAC_MODE setting early, we need it to determine
8313 * the RCVLPC_STATE_ENABLE mask.
8315 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8316 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8317 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8318 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8319 RDMAC_MODE_LNGREAD_ENAB);
8321 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8322 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8325 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8327 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8328 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8329 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8332 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8333 if (tg3_flag(tp, TSO_CAPABLE) &&
8334 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8335 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8336 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8337 !tg3_flag(tp, IS_5788)) {
8338 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8342 if (tg3_flag(tp, PCI_EXPRESS))
8343 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8345 if (tg3_flag(tp, HW_TSO_1) ||
8346 tg3_flag(tp, HW_TSO_2) ||
8347 tg3_flag(tp, HW_TSO_3))
8348 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8350 if (tg3_flag(tp, 57765_PLUS) ||
8351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8352 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8353 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8355 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8356 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8359 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8360 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8361 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8362 tg3_flag(tp, 57765_PLUS)) {
8363 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8364 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8366 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8367 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8368 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8369 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8370 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8371 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8373 tw32(TG3_RDMA_RSRVCTRL_REG,
8374 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8378 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8379 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8380 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8381 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8382 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8385 /* Receive/send statistics. */
8386 if (tg3_flag(tp, 5750_PLUS)) {
8387 val = tr32(RCVLPC_STATS_ENABLE);
8388 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8389 tw32(RCVLPC_STATS_ENABLE, val);
8390 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8391 tg3_flag(tp, TSO_CAPABLE)) {
8392 val = tr32(RCVLPC_STATS_ENABLE);
8393 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8394 tw32(RCVLPC_STATS_ENABLE, val);
8395 } else {
8396 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8398 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8399 tw32(SNDDATAI_STATSENAB, 0xffffff);
8400 tw32(SNDDATAI_STATSCTRL,
8401 (SNDDATAI_SCTRL_ENABLE |
8402 SNDDATAI_SCTRL_FASTUPD));
8404 /* Setup host coalescing engine. */
8405 tw32(HOSTCC_MODE, 0);
8406 for (i = 0; i < 2000; i++) {
8407 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8408 break;
8409 udelay(10);
8412 __tg3_set_coalesce(tp, &tp->coal);
8414 if (!tg3_flag(tp, 5705_PLUS)) {
8415 /* Status/statistics block address. See tg3_timer,
8416 * the tg3_periodic_fetch_stats call there, and
8417 * tg3_get_stats to see how this works for 5705/5750 chips.
8419 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8420 ((u64) tp->stats_mapping >> 32));
8421 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8422 ((u64) tp->stats_mapping & 0xffffffff));
8423 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8425 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8427 /* Clear statistics and status block memory areas */
8428 for (i = NIC_SRAM_STATS_BLK;
8429 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8430 i += sizeof(u32)) {
8431 tg3_write_mem(tp, i, 0);
8432 udelay(40);
8436 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8438 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8439 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8440 if (!tg3_flag(tp, 5705_PLUS))
8441 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8443 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8444 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8445 /* reset to prevent losing 1st rx packet intermittently */
8446 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8447 udelay(10);
8450 if (tg3_flag(tp, ENABLE_APE))
8451 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8452 else
8453 tp->mac_mode = 0;
8454 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8455 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8456 if (!tg3_flag(tp, 5705_PLUS) &&
8457 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8458 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8459 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8460 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8461 udelay(40);
8463 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8464 * If TG3_FLAG_IS_NIC is zero, we should read the
8465 * register to preserve the GPIO settings for LOMs. The GPIOs,
8466 * whether used as inputs or outputs, are set by boot code after
8467 * reset.
8469 if (!tg3_flag(tp, IS_NIC)) {
8470 u32 gpio_mask;
8472 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8473 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8474 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8477 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8478 GRC_LCLCTRL_GPIO_OUTPUT3;
8480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8481 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8483 tp->grc_local_ctrl &= ~gpio_mask;
8484 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8486 /* GPIO1 must be driven high for eeprom write protect */
8487 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8488 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8489 GRC_LCLCTRL_GPIO_OUTPUT1);
8491 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8492 udelay(100);
8494 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8495 val = tr32(MSGINT_MODE);
8496 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8497 tw32(MSGINT_MODE, val);
8500 if (!tg3_flag(tp, 5705_PLUS)) {
8501 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8502 udelay(40);
8505 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8506 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8507 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8508 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8509 WDMAC_MODE_LNGREAD_ENAB);
8511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8512 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8513 if (tg3_flag(tp, TSO_CAPABLE) &&
8514 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8515 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8516 /* nothing */
8517 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8518 !tg3_flag(tp, IS_5788)) {
8519 val |= WDMAC_MODE_RX_ACCEL;
8523 /* Enable host coalescing bug fix */
8524 if (tg3_flag(tp, 5755_PLUS))
8525 val |= WDMAC_MODE_STATUS_TAG_FIX;
8527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8528 val |= WDMAC_MODE_BURST_ALL_DATA;
8530 tw32_f(WDMAC_MODE, val);
8531 udelay(40);
8533 if (tg3_flag(tp, PCIX_MODE)) {
8534 u16 pcix_cmd;
8536 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8537 &pcix_cmd);
8538 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8539 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8540 pcix_cmd |= PCI_X_CMD_READ_2K;
8541 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8542 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8543 pcix_cmd |= PCI_X_CMD_READ_2K;
8545 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8546 pcix_cmd);
8549 tw32_f(RDMAC_MODE, rdmac_mode);
8550 udelay(40);
8552 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8553 if (!tg3_flag(tp, 5705_PLUS))
8554 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8557 tw32(SNDDATAC_MODE,
8558 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8559 else
8560 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8562 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8563 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8564 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8565 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8566 val |= RCVDBDI_MODE_LRG_RING_SZ;
8567 tw32(RCVDBDI_MODE, val);
8568 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8569 if (tg3_flag(tp, HW_TSO_1) ||
8570 tg3_flag(tp, HW_TSO_2) ||
8571 tg3_flag(tp, HW_TSO_3))
8572 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8573 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8574 if (tg3_flag(tp, ENABLE_TSS))
8575 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8576 tw32(SNDBDI_MODE, val);
8577 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8579 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8580 err = tg3_load_5701_a0_firmware_fix(tp);
8581 if (err)
8582 return err;
8585 if (tg3_flag(tp, TSO_CAPABLE)) {
8586 err = tg3_load_tso_firmware(tp);
8587 if (err)
8588 return err;
8591 tp->tx_mode = TX_MODE_ENABLE;
8593 if (tg3_flag(tp, 5755_PLUS) ||
8594 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8595 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8598 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8599 tp->tx_mode &= ~val;
8600 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8603 tw32_f(MAC_TX_MODE, tp->tx_mode);
8604 udelay(100);
8606 if (tg3_flag(tp, ENABLE_RSS)) {
8607 u32 reg = MAC_RSS_INDIR_TBL_0;
8608 u8 *ent = (u8 *)&val;
8610 /* Setup the indirection table */
8611 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8612 int idx = i % sizeof(val);
8614 ent[idx] = i % (tp->irq_cnt - 1);
8615 if (idx == sizeof(val) - 1) {
8616 tw32(reg, val);
8617 reg += 4;
8621 /* Setup the "secret" hash key. */
8622 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8623 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8624 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8625 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8626 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8627 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8628 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8629 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8630 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8631 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8634 tp->rx_mode = RX_MODE_ENABLE;
8635 if (tg3_flag(tp, 5755_PLUS))
8636 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8638 if (tg3_flag(tp, ENABLE_RSS))
8639 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8640 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8641 RX_MODE_RSS_IPV6_HASH_EN |
8642 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8643 RX_MODE_RSS_IPV4_HASH_EN |
8644 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8646 tw32_f(MAC_RX_MODE, tp->rx_mode);
8647 udelay(10);
8649 tw32(MAC_LED_CTRL, tp->led_ctrl);
8651 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8652 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8653 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8654 udelay(10);
8656 tw32_f(MAC_RX_MODE, tp->rx_mode);
8657 udelay(10);
8659 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8660 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8661 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8662 /* Set drive transmission level to 1.2V */
8663 /* only if the signal pre-emphasis bit is not set */
8664 val = tr32(MAC_SERDES_CFG);
8665 val &= 0xfffff000;
8666 val |= 0x880;
8667 tw32(MAC_SERDES_CFG, val);
8669 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8670 tw32(MAC_SERDES_CFG, 0x616000);
8673 /* Prevent chip from dropping frames when flow control
8674 * is enabled.
8676 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8677 val = 1;
8678 else
8679 val = 2;
8680 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8683 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8684 /* Use hardware link auto-negotiation */
8685 tg3_flag_set(tp, HW_AUTONEG);
8688 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8689 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8690 u32 tmp;
8692 tmp = tr32(SERDES_RX_CTRL);
8693 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8694 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8695 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8696 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8699 if (!tg3_flag(tp, USE_PHYLIB)) {
8700 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8701 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8702 tp->link_config.speed = tp->link_config.orig_speed;
8703 tp->link_config.duplex = tp->link_config.orig_duplex;
8704 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8707 err = tg3_setup_phy(tp, 0);
8708 if (err)
8709 return err;
8711 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8712 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8713 u32 tmp;
8715 /* Clear CRC stats. */
8716 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8717 tg3_writephy(tp, MII_TG3_TEST1,
8718 tmp | MII_TG3_TEST1_CRC_EN);
8719 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8724 __tg3_set_rx_mode(tp->dev);
8726 /* Initialize receive rules. */
8727 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8728 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8729 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8730 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8732 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8733 limit = 8;
8734 else
8735 limit = 16;
8736 if (tg3_flag(tp, ENABLE_ASF))
8737 limit -= 4;
8738 switch (limit) {
8739 case 16:
8740 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8741 case 15:
8742 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8743 case 14:
8744 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8745 case 13:
8746 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8747 case 12:
8748 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8749 case 11:
8750 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8751 case 10:
8752 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8753 case 9:
8754 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8755 case 8:
8756 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8757 case 7:
8758 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8759 case 6:
8760 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8761 case 5:
8762 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8763 case 4:
8764 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8765 case 3:
8766 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8767 case 2:
8768 case 1:
8770 default:
8771 break;
8774 if (tg3_flag(tp, ENABLE_APE))
8775 /* Write our heartbeat update interval to APE. */
8776 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8777 APE_HOST_HEARTBEAT_INT_DISABLE);
8779 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8781 return 0;
8784 /* Called at device open time to get the chip ready for
8785 * packet processing. Invoked with tp->lock held.
8787 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8789 tg3_switch_clocks(tp);
8791 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8793 return tg3_reset_hw(tp, reset_phy);
8796 #define TG3_STAT_ADD32(PSTAT, REG) \
8797 do { u32 __val = tr32(REG); \
8798 (PSTAT)->low += __val; \
8799 if ((PSTAT)->low < __val) \
8800 (PSTAT)->high += 1; \
8801 } while (0)
8803 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8805 struct tg3_hw_stats *sp = tp->hw_stats;
8807 if (!netif_carrier_ok(tp->dev))
8808 return;
8810 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8811 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8812 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8813 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8814 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8815 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8816 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8817 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8818 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8819 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8820 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8821 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8822 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8824 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8825 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8826 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8827 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8828 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8829 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8830 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8831 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8832 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8833 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8834 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8835 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8836 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8837 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8839 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8840 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8841 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8842 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8843 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8844 } else {
8845 u32 val = tr32(HOSTCC_FLOW_ATTN);
8846 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8847 if (val) {
8848 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8849 sp->rx_discards.low += val;
8850 if (sp->rx_discards.low < val)
8851 sp->rx_discards.high += 1;
8853 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8855 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8858 static void tg3_chk_missed_msi(struct tg3 *tp)
8860 u32 i;
8862 for (i = 0; i < tp->irq_cnt; i++) {
8863 struct tg3_napi *tnapi = &tp->napi[i];
8865 if (tg3_has_work(tnapi)) {
8866 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8867 tnapi->last_tx_cons == tnapi->tx_cons) {
8868 if (tnapi->chk_msi_cnt < 1) {
8869 tnapi->chk_msi_cnt++;
8870 return;
8872 tw32_mailbox(tnapi->int_mbox,
8873 tnapi->last_tag << 24);
8876 tnapi->chk_msi_cnt = 0;
8877 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8878 tnapi->last_tx_cons = tnapi->tx_cons;
8882 static void tg3_timer(unsigned long __opaque)
8884 struct tg3 *tp = (struct tg3 *) __opaque;
8886 if (tp->irq_sync)
8887 goto restart_timer;
8889 spin_lock(&tp->lock);
8891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8893 tg3_chk_missed_msi(tp);
8895 if (!tg3_flag(tp, TAGGED_STATUS)) {
8896 /* All of this garbage is because when using non-tagged
8897 * IRQ status the mailbox/status_block protocol the chip
8898 * uses with the cpu is race prone.
8900 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8901 tw32(GRC_LOCAL_CTRL,
8902 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8903 } else {
8904 tw32(HOSTCC_MODE, tp->coalesce_mode |
8905 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8908 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8909 tg3_flag_set(tp, RESTART_TIMER);
8910 spin_unlock(&tp->lock);
8911 schedule_work(&tp->reset_task);
8912 return;
8916 /* This part only runs once per second. */
8917 if (!--tp->timer_counter) {
8918 if (tg3_flag(tp, 5705_PLUS))
8919 tg3_periodic_fetch_stats(tp);
8921 if (tp->setlpicnt && !--tp->setlpicnt)
8922 tg3_phy_eee_enable(tp);
8924 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8925 u32 mac_stat;
8926 int phy_event;
8928 mac_stat = tr32(MAC_STATUS);
8930 phy_event = 0;
8931 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8932 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8933 phy_event = 1;
8934 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8935 phy_event = 1;
8937 if (phy_event)
8938 tg3_setup_phy(tp, 0);
8939 } else if (tg3_flag(tp, POLL_SERDES)) {
8940 u32 mac_stat = tr32(MAC_STATUS);
8941 int need_setup = 0;
8943 if (netif_carrier_ok(tp->dev) &&
8944 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8945 need_setup = 1;
8947 if (!netif_carrier_ok(tp->dev) &&
8948 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8949 MAC_STATUS_SIGNAL_DET))) {
8950 need_setup = 1;
8952 if (need_setup) {
8953 if (!tp->serdes_counter) {
8954 tw32_f(MAC_MODE,
8955 (tp->mac_mode &
8956 ~MAC_MODE_PORT_MODE_MASK));
8957 udelay(40);
8958 tw32_f(MAC_MODE, tp->mac_mode);
8959 udelay(40);
8961 tg3_setup_phy(tp, 0);
8963 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8964 tg3_flag(tp, 5780_CLASS)) {
8965 tg3_serdes_parallel_detect(tp);
8968 tp->timer_counter = tp->timer_multiplier;
8971 /* Heartbeat is only sent once every 2 seconds.
8973 * The heartbeat is to tell the ASF firmware that the host
8974 * driver is still alive. In the event that the OS crashes,
8975 * ASF needs to reset the hardware to free up the FIFO space
8976 * that may be filled with rx packets destined for the host.
8977 * If the FIFO is full, ASF will no longer function properly.
8979 * Unintended resets have been reported on real time kernels
8980 * where the timer doesn't run on time. Netpoll will also have
8981 * same problem.
8983 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8984 * to check the ring condition when the heartbeat is expiring
8985 * before doing the reset. This will prevent most unintended
8986 * resets.
8988 if (!--tp->asf_counter) {
8989 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8990 tg3_wait_for_event_ack(tp);
8992 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8993 FWCMD_NICDRV_ALIVE3);
8994 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8995 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8996 TG3_FW_UPDATE_TIMEOUT_SEC);
8998 tg3_generate_fw_event(tp);
9000 tp->asf_counter = tp->asf_multiplier;
9003 spin_unlock(&tp->lock);
9005 restart_timer:
9006 tp->timer.expires = jiffies + tp->timer_offset;
9007 add_timer(&tp->timer);
9010 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9012 irq_handler_t fn;
9013 unsigned long flags;
9014 char *name;
9015 struct tg3_napi *tnapi = &tp->napi[irq_num];
9017 if (tp->irq_cnt == 1)
9018 name = tp->dev->name;
9019 else {
9020 name = &tnapi->irq_lbl[0];
9021 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9022 name[IFNAMSIZ-1] = 0;
9025 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9026 fn = tg3_msi;
9027 if (tg3_flag(tp, 1SHOT_MSI))
9028 fn = tg3_msi_1shot;
9029 flags = 0;
9030 } else {
9031 fn = tg3_interrupt;
9032 if (tg3_flag(tp, TAGGED_STATUS))
9033 fn = tg3_interrupt_tagged;
9034 flags = IRQF_SHARED;
9037 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9040 static int tg3_test_interrupt(struct tg3 *tp)
9042 struct tg3_napi *tnapi = &tp->napi[0];
9043 struct net_device *dev = tp->dev;
9044 int err, i, intr_ok = 0;
9045 u32 val;
9047 if (!netif_running(dev))
9048 return -ENODEV;
9050 tg3_disable_ints(tp);
9052 free_irq(tnapi->irq_vec, tnapi);
9055 * Turn off MSI one shot mode. Otherwise this test has no
9056 * observable way to know whether the interrupt was delivered.
9058 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9059 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9060 tw32(MSGINT_MODE, val);
9063 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9064 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9065 if (err)
9066 return err;
9068 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9069 tg3_enable_ints(tp);
9071 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9072 tnapi->coal_now);
9074 for (i = 0; i < 5; i++) {
9075 u32 int_mbox, misc_host_ctrl;
9077 int_mbox = tr32_mailbox(tnapi->int_mbox);
9078 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9080 if ((int_mbox != 0) ||
9081 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9082 intr_ok = 1;
9083 break;
9086 msleep(10);
9089 tg3_disable_ints(tp);
9091 free_irq(tnapi->irq_vec, tnapi);
9093 err = tg3_request_irq(tp, 0);
9095 if (err)
9096 return err;
9098 if (intr_ok) {
9099 /* Reenable MSI one shot mode. */
9100 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9101 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9102 tw32(MSGINT_MODE, val);
9104 return 0;
9107 return -EIO;
9110 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9111 * successfully restored
9113 static int tg3_test_msi(struct tg3 *tp)
9115 int err;
9116 u16 pci_cmd;
9118 if (!tg3_flag(tp, USING_MSI))
9119 return 0;
9121 /* Turn off SERR reporting in case MSI terminates with Master
9122 * Abort.
9124 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9125 pci_write_config_word(tp->pdev, PCI_COMMAND,
9126 pci_cmd & ~PCI_COMMAND_SERR);
9128 err = tg3_test_interrupt(tp);
9130 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9132 if (!err)
9133 return 0;
9135 /* other failures */
9136 if (err != -EIO)
9137 return err;
9139 /* MSI test failed, go back to INTx mode */
9140 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9141 "to INTx mode. Please report this failure to the PCI "
9142 "maintainer and include system chipset information\n");
9144 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9146 pci_disable_msi(tp->pdev);
9148 tg3_flag_clear(tp, USING_MSI);
9149 tp->napi[0].irq_vec = tp->pdev->irq;
9151 err = tg3_request_irq(tp, 0);
9152 if (err)
9153 return err;
9155 /* Need to reset the chip because the MSI cycle may have terminated
9156 * with Master Abort.
9158 tg3_full_lock(tp, 1);
9160 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9161 err = tg3_init_hw(tp, 1);
9163 tg3_full_unlock(tp);
9165 if (err)
9166 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9168 return err;
9171 static int tg3_request_firmware(struct tg3 *tp)
9173 const __be32 *fw_data;
9175 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9176 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9177 tp->fw_needed);
9178 return -ENOENT;
9181 fw_data = (void *)tp->fw->data;
9183 /* Firmware blob starts with version numbers, followed by
9184 * start address and _full_ length including BSS sections
9185 * (which must be longer than the actual data, of course
9188 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9189 if (tp->fw_len < (tp->fw->size - 12)) {
9190 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9191 tp->fw_len, tp->fw_needed);
9192 release_firmware(tp->fw);
9193 tp->fw = NULL;
9194 return -EINVAL;
9197 /* We no longer need firmware; we have it. */
9198 tp->fw_needed = NULL;
9199 return 0;
9202 static bool tg3_enable_msix(struct tg3 *tp)
9204 int i, rc, cpus = num_online_cpus();
9205 struct msix_entry msix_ent[tp->irq_max];
9207 if (cpus == 1)
9208 /* Just fallback to the simpler MSI mode. */
9209 return false;
9212 * We want as many rx rings enabled as there are cpus.
9213 * The first MSIX vector only deals with link interrupts, etc,
9214 * so we add one to the number of vectors we are requesting.
9216 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9218 for (i = 0; i < tp->irq_max; i++) {
9219 msix_ent[i].entry = i;
9220 msix_ent[i].vector = 0;
9223 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9224 if (rc < 0) {
9225 return false;
9226 } else if (rc != 0) {
9227 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9228 return false;
9229 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9230 tp->irq_cnt, rc);
9231 tp->irq_cnt = rc;
9234 for (i = 0; i < tp->irq_max; i++)
9235 tp->napi[i].irq_vec = msix_ent[i].vector;
9237 netif_set_real_num_tx_queues(tp->dev, 1);
9238 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9239 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9240 pci_disable_msix(tp->pdev);
9241 return false;
9244 if (tp->irq_cnt > 1) {
9245 tg3_flag_set(tp, ENABLE_RSS);
9247 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9248 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9249 tg3_flag_set(tp, ENABLE_TSS);
9250 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9254 return true;
9257 static void tg3_ints_init(struct tg3 *tp)
9259 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9260 !tg3_flag(tp, TAGGED_STATUS)) {
9261 /* All MSI supporting chips should support tagged
9262 * status. Assert that this is the case.
9264 netdev_warn(tp->dev,
9265 "MSI without TAGGED_STATUS? Not using MSI\n");
9266 goto defcfg;
9269 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9270 tg3_flag_set(tp, USING_MSIX);
9271 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9272 tg3_flag_set(tp, USING_MSI);
9274 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9275 u32 msi_mode = tr32(MSGINT_MODE);
9276 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9277 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9278 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9280 defcfg:
9281 if (!tg3_flag(tp, USING_MSIX)) {
9282 tp->irq_cnt = 1;
9283 tp->napi[0].irq_vec = tp->pdev->irq;
9284 netif_set_real_num_tx_queues(tp->dev, 1);
9285 netif_set_real_num_rx_queues(tp->dev, 1);
9289 static void tg3_ints_fini(struct tg3 *tp)
9291 if (tg3_flag(tp, USING_MSIX))
9292 pci_disable_msix(tp->pdev);
9293 else if (tg3_flag(tp, USING_MSI))
9294 pci_disable_msi(tp->pdev);
9295 tg3_flag_clear(tp, USING_MSI);
9296 tg3_flag_clear(tp, USING_MSIX);
9297 tg3_flag_clear(tp, ENABLE_RSS);
9298 tg3_flag_clear(tp, ENABLE_TSS);
9301 static int tg3_open(struct net_device *dev)
9303 struct tg3 *tp = netdev_priv(dev);
9304 int i, err;
9306 if (tp->fw_needed) {
9307 err = tg3_request_firmware(tp);
9308 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9309 if (err)
9310 return err;
9311 } else if (err) {
9312 netdev_warn(tp->dev, "TSO capability disabled\n");
9313 tg3_flag_clear(tp, TSO_CAPABLE);
9314 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9315 netdev_notice(tp->dev, "TSO capability restored\n");
9316 tg3_flag_set(tp, TSO_CAPABLE);
9320 netif_carrier_off(tp->dev);
9322 err = tg3_power_up(tp);
9323 if (err)
9324 return err;
9326 tg3_full_lock(tp, 0);
9328 tg3_disable_ints(tp);
9329 tg3_flag_clear(tp, INIT_COMPLETE);
9331 tg3_full_unlock(tp);
9334 * Setup interrupts first so we know how
9335 * many NAPI resources to allocate
9337 tg3_ints_init(tp);
9339 /* The placement of this call is tied
9340 * to the setup and use of Host TX descriptors.
9342 err = tg3_alloc_consistent(tp);
9343 if (err)
9344 goto err_out1;
9346 tg3_napi_init(tp);
9348 tg3_napi_enable(tp);
9350 for (i = 0; i < tp->irq_cnt; i++) {
9351 struct tg3_napi *tnapi = &tp->napi[i];
9352 err = tg3_request_irq(tp, i);
9353 if (err) {
9354 for (i--; i >= 0; i--)
9355 free_irq(tnapi->irq_vec, tnapi);
9356 break;
9360 if (err)
9361 goto err_out2;
9363 tg3_full_lock(tp, 0);
9365 err = tg3_init_hw(tp, 1);
9366 if (err) {
9367 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9368 tg3_free_rings(tp);
9369 } else {
9370 if (tg3_flag(tp, TAGGED_STATUS) &&
9371 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9372 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9373 tp->timer_offset = HZ;
9374 else
9375 tp->timer_offset = HZ / 10;
9377 BUG_ON(tp->timer_offset > HZ);
9378 tp->timer_counter = tp->timer_multiplier =
9379 (HZ / tp->timer_offset);
9380 tp->asf_counter = tp->asf_multiplier =
9381 ((HZ / tp->timer_offset) * 2);
9383 init_timer(&tp->timer);
9384 tp->timer.expires = jiffies + tp->timer_offset;
9385 tp->timer.data = (unsigned long) tp;
9386 tp->timer.function = tg3_timer;
9389 tg3_full_unlock(tp);
9391 if (err)
9392 goto err_out3;
9394 if (tg3_flag(tp, USING_MSI)) {
9395 err = tg3_test_msi(tp);
9397 if (err) {
9398 tg3_full_lock(tp, 0);
9399 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9400 tg3_free_rings(tp);
9401 tg3_full_unlock(tp);
9403 goto err_out2;
9406 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9407 u32 val = tr32(PCIE_TRANSACTION_CFG);
9409 tw32(PCIE_TRANSACTION_CFG,
9410 val | PCIE_TRANS_CFG_1SHOT_MSI);
9414 tg3_phy_start(tp);
9416 tg3_full_lock(tp, 0);
9418 add_timer(&tp->timer);
9419 tg3_flag_set(tp, INIT_COMPLETE);
9420 tg3_enable_ints(tp);
9422 tg3_full_unlock(tp);
9424 netif_tx_start_all_queues(dev);
9427 * Reset loopback feature if it was turned on while the device was down
9428 * make sure that it's installed properly now.
9430 if (dev->features & NETIF_F_LOOPBACK)
9431 tg3_set_loopback(dev, dev->features);
9433 return 0;
9435 err_out3:
9436 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9437 struct tg3_napi *tnapi = &tp->napi[i];
9438 free_irq(tnapi->irq_vec, tnapi);
9441 err_out2:
9442 tg3_napi_disable(tp);
9443 tg3_napi_fini(tp);
9444 tg3_free_consistent(tp);
9446 err_out1:
9447 tg3_ints_fini(tp);
9448 return err;
9451 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9452 struct rtnl_link_stats64 *);
9453 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9455 static int tg3_close(struct net_device *dev)
9457 int i;
9458 struct tg3 *tp = netdev_priv(dev);
9460 tg3_napi_disable(tp);
9461 cancel_work_sync(&tp->reset_task);
9463 netif_tx_stop_all_queues(dev);
9465 del_timer_sync(&tp->timer);
9467 tg3_phy_stop(tp);
9469 tg3_full_lock(tp, 1);
9471 tg3_disable_ints(tp);
9473 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9474 tg3_free_rings(tp);
9475 tg3_flag_clear(tp, INIT_COMPLETE);
9477 tg3_full_unlock(tp);
9479 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9480 struct tg3_napi *tnapi = &tp->napi[i];
9481 free_irq(tnapi->irq_vec, tnapi);
9484 tg3_ints_fini(tp);
9486 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9488 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9489 sizeof(tp->estats_prev));
9491 tg3_napi_fini(tp);
9493 tg3_free_consistent(tp);
9495 tg3_power_down(tp);
9497 netif_carrier_off(tp->dev);
9499 return 0;
9502 static inline u64 get_stat64(tg3_stat64_t *val)
9504 return ((u64)val->high << 32) | ((u64)val->low);
9507 static u64 calc_crc_errors(struct tg3 *tp)
9509 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9511 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9512 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9513 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9514 u32 val;
9516 spin_lock_bh(&tp->lock);
9517 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9518 tg3_writephy(tp, MII_TG3_TEST1,
9519 val | MII_TG3_TEST1_CRC_EN);
9520 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9521 } else
9522 val = 0;
9523 spin_unlock_bh(&tp->lock);
9525 tp->phy_crc_errors += val;
9527 return tp->phy_crc_errors;
9530 return get_stat64(&hw_stats->rx_fcs_errors);
9533 #define ESTAT_ADD(member) \
9534 estats->member = old_estats->member + \
9535 get_stat64(&hw_stats->member)
9537 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9539 struct tg3_ethtool_stats *estats = &tp->estats;
9540 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9541 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9543 if (!hw_stats)
9544 return old_estats;
9546 ESTAT_ADD(rx_octets);
9547 ESTAT_ADD(rx_fragments);
9548 ESTAT_ADD(rx_ucast_packets);
9549 ESTAT_ADD(rx_mcast_packets);
9550 ESTAT_ADD(rx_bcast_packets);
9551 ESTAT_ADD(rx_fcs_errors);
9552 ESTAT_ADD(rx_align_errors);
9553 ESTAT_ADD(rx_xon_pause_rcvd);
9554 ESTAT_ADD(rx_xoff_pause_rcvd);
9555 ESTAT_ADD(rx_mac_ctrl_rcvd);
9556 ESTAT_ADD(rx_xoff_entered);
9557 ESTAT_ADD(rx_frame_too_long_errors);
9558 ESTAT_ADD(rx_jabbers);
9559 ESTAT_ADD(rx_undersize_packets);
9560 ESTAT_ADD(rx_in_length_errors);
9561 ESTAT_ADD(rx_out_length_errors);
9562 ESTAT_ADD(rx_64_or_less_octet_packets);
9563 ESTAT_ADD(rx_65_to_127_octet_packets);
9564 ESTAT_ADD(rx_128_to_255_octet_packets);
9565 ESTAT_ADD(rx_256_to_511_octet_packets);
9566 ESTAT_ADD(rx_512_to_1023_octet_packets);
9567 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9568 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9569 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9570 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9571 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9573 ESTAT_ADD(tx_octets);
9574 ESTAT_ADD(tx_collisions);
9575 ESTAT_ADD(tx_xon_sent);
9576 ESTAT_ADD(tx_xoff_sent);
9577 ESTAT_ADD(tx_flow_control);
9578 ESTAT_ADD(tx_mac_errors);
9579 ESTAT_ADD(tx_single_collisions);
9580 ESTAT_ADD(tx_mult_collisions);
9581 ESTAT_ADD(tx_deferred);
9582 ESTAT_ADD(tx_excessive_collisions);
9583 ESTAT_ADD(tx_late_collisions);
9584 ESTAT_ADD(tx_collide_2times);
9585 ESTAT_ADD(tx_collide_3times);
9586 ESTAT_ADD(tx_collide_4times);
9587 ESTAT_ADD(tx_collide_5times);
9588 ESTAT_ADD(tx_collide_6times);
9589 ESTAT_ADD(tx_collide_7times);
9590 ESTAT_ADD(tx_collide_8times);
9591 ESTAT_ADD(tx_collide_9times);
9592 ESTAT_ADD(tx_collide_10times);
9593 ESTAT_ADD(tx_collide_11times);
9594 ESTAT_ADD(tx_collide_12times);
9595 ESTAT_ADD(tx_collide_13times);
9596 ESTAT_ADD(tx_collide_14times);
9597 ESTAT_ADD(tx_collide_15times);
9598 ESTAT_ADD(tx_ucast_packets);
9599 ESTAT_ADD(tx_mcast_packets);
9600 ESTAT_ADD(tx_bcast_packets);
9601 ESTAT_ADD(tx_carrier_sense_errors);
9602 ESTAT_ADD(tx_discards);
9603 ESTAT_ADD(tx_errors);
9605 ESTAT_ADD(dma_writeq_full);
9606 ESTAT_ADD(dma_write_prioq_full);
9607 ESTAT_ADD(rxbds_empty);
9608 ESTAT_ADD(rx_discards);
9609 ESTAT_ADD(rx_errors);
9610 ESTAT_ADD(rx_threshold_hit);
9612 ESTAT_ADD(dma_readq_full);
9613 ESTAT_ADD(dma_read_prioq_full);
9614 ESTAT_ADD(tx_comp_queue_full);
9616 ESTAT_ADD(ring_set_send_prod_index);
9617 ESTAT_ADD(ring_status_update);
9618 ESTAT_ADD(nic_irqs);
9619 ESTAT_ADD(nic_avoided_irqs);
9620 ESTAT_ADD(nic_tx_threshold_hit);
9622 ESTAT_ADD(mbuf_lwm_thresh_hit);
9624 return estats;
9627 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9628 struct rtnl_link_stats64 *stats)
9630 struct tg3 *tp = netdev_priv(dev);
9631 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9632 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9634 if (!hw_stats)
9635 return old_stats;
9637 stats->rx_packets = old_stats->rx_packets +
9638 get_stat64(&hw_stats->rx_ucast_packets) +
9639 get_stat64(&hw_stats->rx_mcast_packets) +
9640 get_stat64(&hw_stats->rx_bcast_packets);
9642 stats->tx_packets = old_stats->tx_packets +
9643 get_stat64(&hw_stats->tx_ucast_packets) +
9644 get_stat64(&hw_stats->tx_mcast_packets) +
9645 get_stat64(&hw_stats->tx_bcast_packets);
9647 stats->rx_bytes = old_stats->rx_bytes +
9648 get_stat64(&hw_stats->rx_octets);
9649 stats->tx_bytes = old_stats->tx_bytes +
9650 get_stat64(&hw_stats->tx_octets);
9652 stats->rx_errors = old_stats->rx_errors +
9653 get_stat64(&hw_stats->rx_errors);
9654 stats->tx_errors = old_stats->tx_errors +
9655 get_stat64(&hw_stats->tx_errors) +
9656 get_stat64(&hw_stats->tx_mac_errors) +
9657 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9658 get_stat64(&hw_stats->tx_discards);
9660 stats->multicast = old_stats->multicast +
9661 get_stat64(&hw_stats->rx_mcast_packets);
9662 stats->collisions = old_stats->collisions +
9663 get_stat64(&hw_stats->tx_collisions);
9665 stats->rx_length_errors = old_stats->rx_length_errors +
9666 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9667 get_stat64(&hw_stats->rx_undersize_packets);
9669 stats->rx_over_errors = old_stats->rx_over_errors +
9670 get_stat64(&hw_stats->rxbds_empty);
9671 stats->rx_frame_errors = old_stats->rx_frame_errors +
9672 get_stat64(&hw_stats->rx_align_errors);
9673 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9674 get_stat64(&hw_stats->tx_discards);
9675 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9676 get_stat64(&hw_stats->tx_carrier_sense_errors);
9678 stats->rx_crc_errors = old_stats->rx_crc_errors +
9679 calc_crc_errors(tp);
9681 stats->rx_missed_errors = old_stats->rx_missed_errors +
9682 get_stat64(&hw_stats->rx_discards);
9684 stats->rx_dropped = tp->rx_dropped;
9686 return stats;
9689 static inline u32 calc_crc(unsigned char *buf, int len)
9691 u32 reg;
9692 u32 tmp;
9693 int j, k;
9695 reg = 0xffffffff;
9697 for (j = 0; j < len; j++) {
9698 reg ^= buf[j];
9700 for (k = 0; k < 8; k++) {
9701 tmp = reg & 0x01;
9703 reg >>= 1;
9705 if (tmp)
9706 reg ^= 0xedb88320;
9710 return ~reg;
9713 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9715 /* accept or reject all multicast frames */
9716 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9717 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9718 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9719 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9722 static void __tg3_set_rx_mode(struct net_device *dev)
9724 struct tg3 *tp = netdev_priv(dev);
9725 u32 rx_mode;
9727 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9728 RX_MODE_KEEP_VLAN_TAG);
9730 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9731 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9732 * flag clear.
9734 if (!tg3_flag(tp, ENABLE_ASF))
9735 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9736 #endif
9738 if (dev->flags & IFF_PROMISC) {
9739 /* Promiscuous mode. */
9740 rx_mode |= RX_MODE_PROMISC;
9741 } else if (dev->flags & IFF_ALLMULTI) {
9742 /* Accept all multicast. */
9743 tg3_set_multi(tp, 1);
9744 } else if (netdev_mc_empty(dev)) {
9745 /* Reject all multicast. */
9746 tg3_set_multi(tp, 0);
9747 } else {
9748 /* Accept one or more multicast(s). */
9749 struct netdev_hw_addr *ha;
9750 u32 mc_filter[4] = { 0, };
9751 u32 regidx;
9752 u32 bit;
9753 u32 crc;
9755 netdev_for_each_mc_addr(ha, dev) {
9756 crc = calc_crc(ha->addr, ETH_ALEN);
9757 bit = ~crc & 0x7f;
9758 regidx = (bit & 0x60) >> 5;
9759 bit &= 0x1f;
9760 mc_filter[regidx] |= (1 << bit);
9763 tw32(MAC_HASH_REG_0, mc_filter[0]);
9764 tw32(MAC_HASH_REG_1, mc_filter[1]);
9765 tw32(MAC_HASH_REG_2, mc_filter[2]);
9766 tw32(MAC_HASH_REG_3, mc_filter[3]);
9769 if (rx_mode != tp->rx_mode) {
9770 tp->rx_mode = rx_mode;
9771 tw32_f(MAC_RX_MODE, rx_mode);
9772 udelay(10);
9776 static void tg3_set_rx_mode(struct net_device *dev)
9778 struct tg3 *tp = netdev_priv(dev);
9780 if (!netif_running(dev))
9781 return;
9783 tg3_full_lock(tp, 0);
9784 __tg3_set_rx_mode(dev);
9785 tg3_full_unlock(tp);
9788 static int tg3_get_regs_len(struct net_device *dev)
9790 return TG3_REG_BLK_SIZE;
9793 static void tg3_get_regs(struct net_device *dev,
9794 struct ethtool_regs *regs, void *_p)
9796 struct tg3 *tp = netdev_priv(dev);
9798 regs->version = 0;
9800 memset(_p, 0, TG3_REG_BLK_SIZE);
9802 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9803 return;
9805 tg3_full_lock(tp, 0);
9807 tg3_dump_legacy_regs(tp, (u32 *)_p);
9809 tg3_full_unlock(tp);
9812 static int tg3_get_eeprom_len(struct net_device *dev)
9814 struct tg3 *tp = netdev_priv(dev);
9816 return tp->nvram_size;
9819 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9821 struct tg3 *tp = netdev_priv(dev);
9822 int ret;
9823 u8 *pd;
9824 u32 i, offset, len, b_offset, b_count;
9825 __be32 val;
9827 if (tg3_flag(tp, NO_NVRAM))
9828 return -EINVAL;
9830 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9831 return -EAGAIN;
9833 offset = eeprom->offset;
9834 len = eeprom->len;
9835 eeprom->len = 0;
9837 eeprom->magic = TG3_EEPROM_MAGIC;
9839 if (offset & 3) {
9840 /* adjustments to start on required 4 byte boundary */
9841 b_offset = offset & 3;
9842 b_count = 4 - b_offset;
9843 if (b_count > len) {
9844 /* i.e. offset=1 len=2 */
9845 b_count = len;
9847 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9848 if (ret)
9849 return ret;
9850 memcpy(data, ((char *)&val) + b_offset, b_count);
9851 len -= b_count;
9852 offset += b_count;
9853 eeprom->len += b_count;
9856 /* read bytes up to the last 4 byte boundary */
9857 pd = &data[eeprom->len];
9858 for (i = 0; i < (len - (len & 3)); i += 4) {
9859 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9860 if (ret) {
9861 eeprom->len += i;
9862 return ret;
9864 memcpy(pd + i, &val, 4);
9866 eeprom->len += i;
9868 if (len & 3) {
9869 /* read last bytes not ending on 4 byte boundary */
9870 pd = &data[eeprom->len];
9871 b_count = len & 3;
9872 b_offset = offset + len - b_count;
9873 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9874 if (ret)
9875 return ret;
9876 memcpy(pd, &val, b_count);
9877 eeprom->len += b_count;
9879 return 0;
9882 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9884 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9886 struct tg3 *tp = netdev_priv(dev);
9887 int ret;
9888 u32 offset, len, b_offset, odd_len;
9889 u8 *buf;
9890 __be32 start, end;
9892 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9893 return -EAGAIN;
9895 if (tg3_flag(tp, NO_NVRAM) ||
9896 eeprom->magic != TG3_EEPROM_MAGIC)
9897 return -EINVAL;
9899 offset = eeprom->offset;
9900 len = eeprom->len;
9902 if ((b_offset = (offset & 3))) {
9903 /* adjustments to start on required 4 byte boundary */
9904 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9905 if (ret)
9906 return ret;
9907 len += b_offset;
9908 offset &= ~3;
9909 if (len < 4)
9910 len = 4;
9913 odd_len = 0;
9914 if (len & 3) {
9915 /* adjustments to end on required 4 byte boundary */
9916 odd_len = 1;
9917 len = (len + 3) & ~3;
9918 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9919 if (ret)
9920 return ret;
9923 buf = data;
9924 if (b_offset || odd_len) {
9925 buf = kmalloc(len, GFP_KERNEL);
9926 if (!buf)
9927 return -ENOMEM;
9928 if (b_offset)
9929 memcpy(buf, &start, 4);
9930 if (odd_len)
9931 memcpy(buf+len-4, &end, 4);
9932 memcpy(buf + b_offset, data, eeprom->len);
9935 ret = tg3_nvram_write_block(tp, offset, len, buf);
9937 if (buf != data)
9938 kfree(buf);
9940 return ret;
9943 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9945 struct tg3 *tp = netdev_priv(dev);
9947 if (tg3_flag(tp, USE_PHYLIB)) {
9948 struct phy_device *phydev;
9949 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9950 return -EAGAIN;
9951 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9952 return phy_ethtool_gset(phydev, cmd);
9955 cmd->supported = (SUPPORTED_Autoneg);
9957 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9958 cmd->supported |= (SUPPORTED_1000baseT_Half |
9959 SUPPORTED_1000baseT_Full);
9961 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9962 cmd->supported |= (SUPPORTED_100baseT_Half |
9963 SUPPORTED_100baseT_Full |
9964 SUPPORTED_10baseT_Half |
9965 SUPPORTED_10baseT_Full |
9966 SUPPORTED_TP);
9967 cmd->port = PORT_TP;
9968 } else {
9969 cmd->supported |= SUPPORTED_FIBRE;
9970 cmd->port = PORT_FIBRE;
9973 cmd->advertising = tp->link_config.advertising;
9974 if (tg3_flag(tp, PAUSE_AUTONEG)) {
9975 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
9976 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
9977 cmd->advertising |= ADVERTISED_Pause;
9978 } else {
9979 cmd->advertising |= ADVERTISED_Pause |
9980 ADVERTISED_Asym_Pause;
9982 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
9983 cmd->advertising |= ADVERTISED_Asym_Pause;
9986 if (netif_running(dev)) {
9987 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9988 cmd->duplex = tp->link_config.active_duplex;
9989 } else {
9990 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9991 cmd->duplex = DUPLEX_INVALID;
9993 cmd->phy_address = tp->phy_addr;
9994 cmd->transceiver = XCVR_INTERNAL;
9995 cmd->autoneg = tp->link_config.autoneg;
9996 cmd->maxtxpkt = 0;
9997 cmd->maxrxpkt = 0;
9998 return 0;
10001 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10003 struct tg3 *tp = netdev_priv(dev);
10004 u32 speed = ethtool_cmd_speed(cmd);
10006 if (tg3_flag(tp, USE_PHYLIB)) {
10007 struct phy_device *phydev;
10008 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10009 return -EAGAIN;
10010 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10011 return phy_ethtool_sset(phydev, cmd);
10014 if (cmd->autoneg != AUTONEG_ENABLE &&
10015 cmd->autoneg != AUTONEG_DISABLE)
10016 return -EINVAL;
10018 if (cmd->autoneg == AUTONEG_DISABLE &&
10019 cmd->duplex != DUPLEX_FULL &&
10020 cmd->duplex != DUPLEX_HALF)
10021 return -EINVAL;
10023 if (cmd->autoneg == AUTONEG_ENABLE) {
10024 u32 mask = ADVERTISED_Autoneg |
10025 ADVERTISED_Pause |
10026 ADVERTISED_Asym_Pause;
10028 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10029 mask |= ADVERTISED_1000baseT_Half |
10030 ADVERTISED_1000baseT_Full;
10032 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10033 mask |= ADVERTISED_100baseT_Half |
10034 ADVERTISED_100baseT_Full |
10035 ADVERTISED_10baseT_Half |
10036 ADVERTISED_10baseT_Full |
10037 ADVERTISED_TP;
10038 else
10039 mask |= ADVERTISED_FIBRE;
10041 if (cmd->advertising & ~mask)
10042 return -EINVAL;
10044 mask &= (ADVERTISED_1000baseT_Half |
10045 ADVERTISED_1000baseT_Full |
10046 ADVERTISED_100baseT_Half |
10047 ADVERTISED_100baseT_Full |
10048 ADVERTISED_10baseT_Half |
10049 ADVERTISED_10baseT_Full);
10051 cmd->advertising &= mask;
10052 } else {
10053 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10054 if (speed != SPEED_1000)
10055 return -EINVAL;
10057 if (cmd->duplex != DUPLEX_FULL)
10058 return -EINVAL;
10059 } else {
10060 if (speed != SPEED_100 &&
10061 speed != SPEED_10)
10062 return -EINVAL;
10066 tg3_full_lock(tp, 0);
10068 tp->link_config.autoneg = cmd->autoneg;
10069 if (cmd->autoneg == AUTONEG_ENABLE) {
10070 tp->link_config.advertising = (cmd->advertising |
10071 ADVERTISED_Autoneg);
10072 tp->link_config.speed = SPEED_INVALID;
10073 tp->link_config.duplex = DUPLEX_INVALID;
10074 } else {
10075 tp->link_config.advertising = 0;
10076 tp->link_config.speed = speed;
10077 tp->link_config.duplex = cmd->duplex;
10080 tp->link_config.orig_speed = tp->link_config.speed;
10081 tp->link_config.orig_duplex = tp->link_config.duplex;
10082 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10084 if (netif_running(dev))
10085 tg3_setup_phy(tp, 1);
10087 tg3_full_unlock(tp);
10089 return 0;
10092 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10094 struct tg3 *tp = netdev_priv(dev);
10096 strcpy(info->driver, DRV_MODULE_NAME);
10097 strcpy(info->version, DRV_MODULE_VERSION);
10098 strcpy(info->fw_version, tp->fw_ver);
10099 strcpy(info->bus_info, pci_name(tp->pdev));
10102 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10104 struct tg3 *tp = netdev_priv(dev);
10106 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10107 wol->supported = WAKE_MAGIC;
10108 else
10109 wol->supported = 0;
10110 wol->wolopts = 0;
10111 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10112 wol->wolopts = WAKE_MAGIC;
10113 memset(&wol->sopass, 0, sizeof(wol->sopass));
10116 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10118 struct tg3 *tp = netdev_priv(dev);
10119 struct device *dp = &tp->pdev->dev;
10121 if (wol->wolopts & ~WAKE_MAGIC)
10122 return -EINVAL;
10123 if ((wol->wolopts & WAKE_MAGIC) &&
10124 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10125 return -EINVAL;
10127 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10129 spin_lock_bh(&tp->lock);
10130 if (device_may_wakeup(dp))
10131 tg3_flag_set(tp, WOL_ENABLE);
10132 else
10133 tg3_flag_clear(tp, WOL_ENABLE);
10134 spin_unlock_bh(&tp->lock);
10136 return 0;
10139 static u32 tg3_get_msglevel(struct net_device *dev)
10141 struct tg3 *tp = netdev_priv(dev);
10142 return tp->msg_enable;
10145 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10147 struct tg3 *tp = netdev_priv(dev);
10148 tp->msg_enable = value;
10151 static int tg3_nway_reset(struct net_device *dev)
10153 struct tg3 *tp = netdev_priv(dev);
10154 int r;
10156 if (!netif_running(dev))
10157 return -EAGAIN;
10159 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10160 return -EINVAL;
10162 if (tg3_flag(tp, USE_PHYLIB)) {
10163 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10164 return -EAGAIN;
10165 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10166 } else {
10167 u32 bmcr;
10169 spin_lock_bh(&tp->lock);
10170 r = -EINVAL;
10171 tg3_readphy(tp, MII_BMCR, &bmcr);
10172 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10173 ((bmcr & BMCR_ANENABLE) ||
10174 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10175 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10176 BMCR_ANENABLE);
10177 r = 0;
10179 spin_unlock_bh(&tp->lock);
10182 return r;
10185 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10187 struct tg3 *tp = netdev_priv(dev);
10189 ering->rx_max_pending = tp->rx_std_ring_mask;
10190 ering->rx_mini_max_pending = 0;
10191 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10192 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10193 else
10194 ering->rx_jumbo_max_pending = 0;
10196 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10198 ering->rx_pending = tp->rx_pending;
10199 ering->rx_mini_pending = 0;
10200 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10201 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10202 else
10203 ering->rx_jumbo_pending = 0;
10205 ering->tx_pending = tp->napi[0].tx_pending;
10208 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10210 struct tg3 *tp = netdev_priv(dev);
10211 int i, irq_sync = 0, err = 0;
10213 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10214 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10215 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10216 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10217 (tg3_flag(tp, TSO_BUG) &&
10218 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10219 return -EINVAL;
10221 if (netif_running(dev)) {
10222 tg3_phy_stop(tp);
10223 tg3_netif_stop(tp);
10224 irq_sync = 1;
10227 tg3_full_lock(tp, irq_sync);
10229 tp->rx_pending = ering->rx_pending;
10231 if (tg3_flag(tp, MAX_RXPEND_64) &&
10232 tp->rx_pending > 63)
10233 tp->rx_pending = 63;
10234 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10236 for (i = 0; i < tp->irq_max; i++)
10237 tp->napi[i].tx_pending = ering->tx_pending;
10239 if (netif_running(dev)) {
10240 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10241 err = tg3_restart_hw(tp, 1);
10242 if (!err)
10243 tg3_netif_start(tp);
10246 tg3_full_unlock(tp);
10248 if (irq_sync && !err)
10249 tg3_phy_start(tp);
10251 return err;
10254 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10256 struct tg3 *tp = netdev_priv(dev);
10258 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10260 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10261 epause->rx_pause = 1;
10262 else
10263 epause->rx_pause = 0;
10265 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10266 epause->tx_pause = 1;
10267 else
10268 epause->tx_pause = 0;
10271 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10273 struct tg3 *tp = netdev_priv(dev);
10274 int err = 0;
10276 if (tg3_flag(tp, USE_PHYLIB)) {
10277 u32 newadv;
10278 struct phy_device *phydev;
10280 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10282 if (!(phydev->supported & SUPPORTED_Pause) ||
10283 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10284 (epause->rx_pause != epause->tx_pause)))
10285 return -EINVAL;
10287 tp->link_config.flowctrl = 0;
10288 if (epause->rx_pause) {
10289 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10291 if (epause->tx_pause) {
10292 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10293 newadv = ADVERTISED_Pause;
10294 } else
10295 newadv = ADVERTISED_Pause |
10296 ADVERTISED_Asym_Pause;
10297 } else if (epause->tx_pause) {
10298 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10299 newadv = ADVERTISED_Asym_Pause;
10300 } else
10301 newadv = 0;
10303 if (epause->autoneg)
10304 tg3_flag_set(tp, PAUSE_AUTONEG);
10305 else
10306 tg3_flag_clear(tp, PAUSE_AUTONEG);
10308 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10309 u32 oldadv = phydev->advertising &
10310 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10311 if (oldadv != newadv) {
10312 phydev->advertising &=
10313 ~(ADVERTISED_Pause |
10314 ADVERTISED_Asym_Pause);
10315 phydev->advertising |= newadv;
10316 if (phydev->autoneg) {
10318 * Always renegotiate the link to
10319 * inform our link partner of our
10320 * flow control settings, even if the
10321 * flow control is forced. Let
10322 * tg3_adjust_link() do the final
10323 * flow control setup.
10325 return phy_start_aneg(phydev);
10329 if (!epause->autoneg)
10330 tg3_setup_flow_control(tp, 0, 0);
10331 } else {
10332 tp->link_config.orig_advertising &=
10333 ~(ADVERTISED_Pause |
10334 ADVERTISED_Asym_Pause);
10335 tp->link_config.orig_advertising |= newadv;
10337 } else {
10338 int irq_sync = 0;
10340 if (netif_running(dev)) {
10341 tg3_netif_stop(tp);
10342 irq_sync = 1;
10345 tg3_full_lock(tp, irq_sync);
10347 if (epause->autoneg)
10348 tg3_flag_set(tp, PAUSE_AUTONEG);
10349 else
10350 tg3_flag_clear(tp, PAUSE_AUTONEG);
10351 if (epause->rx_pause)
10352 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10353 else
10354 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10355 if (epause->tx_pause)
10356 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10357 else
10358 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10360 if (netif_running(dev)) {
10361 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10362 err = tg3_restart_hw(tp, 1);
10363 if (!err)
10364 tg3_netif_start(tp);
10367 tg3_full_unlock(tp);
10370 return err;
10373 static int tg3_get_sset_count(struct net_device *dev, int sset)
10375 switch (sset) {
10376 case ETH_SS_TEST:
10377 return TG3_NUM_TEST;
10378 case ETH_SS_STATS:
10379 return TG3_NUM_STATS;
10380 default:
10381 return -EOPNOTSUPP;
10385 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10387 switch (stringset) {
10388 case ETH_SS_STATS:
10389 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10390 break;
10391 case ETH_SS_TEST:
10392 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10393 break;
10394 default:
10395 WARN_ON(1); /* we need a WARN() */
10396 break;
10400 static int tg3_set_phys_id(struct net_device *dev,
10401 enum ethtool_phys_id_state state)
10403 struct tg3 *tp = netdev_priv(dev);
10405 if (!netif_running(tp->dev))
10406 return -EAGAIN;
10408 switch (state) {
10409 case ETHTOOL_ID_ACTIVE:
10410 return 1; /* cycle on/off once per second */
10412 case ETHTOOL_ID_ON:
10413 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10414 LED_CTRL_1000MBPS_ON |
10415 LED_CTRL_100MBPS_ON |
10416 LED_CTRL_10MBPS_ON |
10417 LED_CTRL_TRAFFIC_OVERRIDE |
10418 LED_CTRL_TRAFFIC_BLINK |
10419 LED_CTRL_TRAFFIC_LED);
10420 break;
10422 case ETHTOOL_ID_OFF:
10423 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10424 LED_CTRL_TRAFFIC_OVERRIDE);
10425 break;
10427 case ETHTOOL_ID_INACTIVE:
10428 tw32(MAC_LED_CTRL, tp->led_ctrl);
10429 break;
10432 return 0;
10435 static void tg3_get_ethtool_stats(struct net_device *dev,
10436 struct ethtool_stats *estats, u64 *tmp_stats)
10438 struct tg3 *tp = netdev_priv(dev);
10439 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10442 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10444 int i;
10445 __be32 *buf;
10446 u32 offset = 0, len = 0;
10447 u32 magic, val;
10449 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10450 return NULL;
10452 if (magic == TG3_EEPROM_MAGIC) {
10453 for (offset = TG3_NVM_DIR_START;
10454 offset < TG3_NVM_DIR_END;
10455 offset += TG3_NVM_DIRENT_SIZE) {
10456 if (tg3_nvram_read(tp, offset, &val))
10457 return NULL;
10459 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10460 TG3_NVM_DIRTYPE_EXTVPD)
10461 break;
10464 if (offset != TG3_NVM_DIR_END) {
10465 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10466 if (tg3_nvram_read(tp, offset + 4, &offset))
10467 return NULL;
10469 offset = tg3_nvram_logical_addr(tp, offset);
10473 if (!offset || !len) {
10474 offset = TG3_NVM_VPD_OFF;
10475 len = TG3_NVM_VPD_LEN;
10478 buf = kmalloc(len, GFP_KERNEL);
10479 if (buf == NULL)
10480 return NULL;
10482 if (magic == TG3_EEPROM_MAGIC) {
10483 for (i = 0; i < len; i += 4) {
10484 /* The data is in little-endian format in NVRAM.
10485 * Use the big-endian read routines to preserve
10486 * the byte order as it exists in NVRAM.
10488 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10489 goto error;
10491 } else {
10492 u8 *ptr;
10493 ssize_t cnt;
10494 unsigned int pos = 0;
10496 ptr = (u8 *)&buf[0];
10497 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10498 cnt = pci_read_vpd(tp->pdev, pos,
10499 len - pos, ptr);
10500 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10501 cnt = 0;
10502 else if (cnt < 0)
10503 goto error;
10505 if (pos != len)
10506 goto error;
10509 return buf;
10511 error:
10512 kfree(buf);
10513 return NULL;
10516 #define NVRAM_TEST_SIZE 0x100
10517 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10518 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10519 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10520 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10521 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10522 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x4c
10523 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10524 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10526 static int tg3_test_nvram(struct tg3 *tp)
10528 u32 csum, magic;
10529 __be32 *buf;
10530 int i, j, k, err = 0, size;
10532 if (tg3_flag(tp, NO_NVRAM))
10533 return 0;
10535 if (tg3_nvram_read(tp, 0, &magic) != 0)
10536 return -EIO;
10538 if (magic == TG3_EEPROM_MAGIC)
10539 size = NVRAM_TEST_SIZE;
10540 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10541 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10542 TG3_EEPROM_SB_FORMAT_1) {
10543 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10544 case TG3_EEPROM_SB_REVISION_0:
10545 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10546 break;
10547 case TG3_EEPROM_SB_REVISION_2:
10548 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10549 break;
10550 case TG3_EEPROM_SB_REVISION_3:
10551 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10552 break;
10553 case TG3_EEPROM_SB_REVISION_4:
10554 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10555 break;
10556 case TG3_EEPROM_SB_REVISION_5:
10557 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10558 break;
10559 case TG3_EEPROM_SB_REVISION_6:
10560 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10561 break;
10562 default:
10563 return -EIO;
10565 } else
10566 return 0;
10567 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10568 size = NVRAM_SELFBOOT_HW_SIZE;
10569 else
10570 return -EIO;
10572 buf = kmalloc(size, GFP_KERNEL);
10573 if (buf == NULL)
10574 return -ENOMEM;
10576 err = -EIO;
10577 for (i = 0, j = 0; i < size; i += 4, j++) {
10578 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10579 if (err)
10580 break;
10582 if (i < size)
10583 goto out;
10585 /* Selfboot format */
10586 magic = be32_to_cpu(buf[0]);
10587 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10588 TG3_EEPROM_MAGIC_FW) {
10589 u8 *buf8 = (u8 *) buf, csum8 = 0;
10591 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10592 TG3_EEPROM_SB_REVISION_2) {
10593 /* For rev 2, the csum doesn't include the MBA. */
10594 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10595 csum8 += buf8[i];
10596 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10597 csum8 += buf8[i];
10598 } else {
10599 for (i = 0; i < size; i++)
10600 csum8 += buf8[i];
10603 if (csum8 == 0) {
10604 err = 0;
10605 goto out;
10608 err = -EIO;
10609 goto out;
10612 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10613 TG3_EEPROM_MAGIC_HW) {
10614 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10615 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10616 u8 *buf8 = (u8 *) buf;
10618 /* Separate the parity bits and the data bytes. */
10619 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10620 if ((i == 0) || (i == 8)) {
10621 int l;
10622 u8 msk;
10624 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10625 parity[k++] = buf8[i] & msk;
10626 i++;
10627 } else if (i == 16) {
10628 int l;
10629 u8 msk;
10631 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10632 parity[k++] = buf8[i] & msk;
10633 i++;
10635 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10636 parity[k++] = buf8[i] & msk;
10637 i++;
10639 data[j++] = buf8[i];
10642 err = -EIO;
10643 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10644 u8 hw8 = hweight8(data[i]);
10646 if ((hw8 & 0x1) && parity[i])
10647 goto out;
10648 else if (!(hw8 & 0x1) && !parity[i])
10649 goto out;
10651 err = 0;
10652 goto out;
10655 err = -EIO;
10657 /* Bootstrap checksum at offset 0x10 */
10658 csum = calc_crc((unsigned char *) buf, 0x10);
10659 if (csum != le32_to_cpu(buf[0x10/4]))
10660 goto out;
10662 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10663 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10664 if (csum != le32_to_cpu(buf[0xfc/4]))
10665 goto out;
10667 kfree(buf);
10669 buf = tg3_vpd_readblock(tp);
10670 if (!buf)
10671 return -ENOMEM;
10673 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10674 PCI_VPD_LRDT_RO_DATA);
10675 if (i > 0) {
10676 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10677 if (j < 0)
10678 goto out;
10680 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10681 goto out;
10683 i += PCI_VPD_LRDT_TAG_SIZE;
10684 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10685 PCI_VPD_RO_KEYWORD_CHKSUM);
10686 if (j > 0) {
10687 u8 csum8 = 0;
10689 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10691 for (i = 0; i <= j; i++)
10692 csum8 += ((u8 *)buf)[i];
10694 if (csum8)
10695 goto out;
10699 err = 0;
10701 out:
10702 kfree(buf);
10703 return err;
10706 #define TG3_SERDES_TIMEOUT_SEC 2
10707 #define TG3_COPPER_TIMEOUT_SEC 6
10709 static int tg3_test_link(struct tg3 *tp)
10711 int i, max;
10713 if (!netif_running(tp->dev))
10714 return -ENODEV;
10716 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10717 max = TG3_SERDES_TIMEOUT_SEC;
10718 else
10719 max = TG3_COPPER_TIMEOUT_SEC;
10721 for (i = 0; i < max; i++) {
10722 if (netif_carrier_ok(tp->dev))
10723 return 0;
10725 if (msleep_interruptible(1000))
10726 break;
10729 return -EIO;
10732 /* Only test the commonly used registers */
10733 static int tg3_test_registers(struct tg3 *tp)
10735 int i, is_5705, is_5750;
10736 u32 offset, read_mask, write_mask, val, save_val, read_val;
10737 static struct {
10738 u16 offset;
10739 u16 flags;
10740 #define TG3_FL_5705 0x1
10741 #define TG3_FL_NOT_5705 0x2
10742 #define TG3_FL_NOT_5788 0x4
10743 #define TG3_FL_NOT_5750 0x8
10744 u32 read_mask;
10745 u32 write_mask;
10746 } reg_tbl[] = {
10747 /* MAC Control Registers */
10748 { MAC_MODE, TG3_FL_NOT_5705,
10749 0x00000000, 0x00ef6f8c },
10750 { MAC_MODE, TG3_FL_5705,
10751 0x00000000, 0x01ef6b8c },
10752 { MAC_STATUS, TG3_FL_NOT_5705,
10753 0x03800107, 0x00000000 },
10754 { MAC_STATUS, TG3_FL_5705,
10755 0x03800100, 0x00000000 },
10756 { MAC_ADDR_0_HIGH, 0x0000,
10757 0x00000000, 0x0000ffff },
10758 { MAC_ADDR_0_LOW, 0x0000,
10759 0x00000000, 0xffffffff },
10760 { MAC_RX_MTU_SIZE, 0x0000,
10761 0x00000000, 0x0000ffff },
10762 { MAC_TX_MODE, 0x0000,
10763 0x00000000, 0x00000070 },
10764 { MAC_TX_LENGTHS, 0x0000,
10765 0x00000000, 0x00003fff },
10766 { MAC_RX_MODE, TG3_FL_NOT_5705,
10767 0x00000000, 0x000007fc },
10768 { MAC_RX_MODE, TG3_FL_5705,
10769 0x00000000, 0x000007dc },
10770 { MAC_HASH_REG_0, 0x0000,
10771 0x00000000, 0xffffffff },
10772 { MAC_HASH_REG_1, 0x0000,
10773 0x00000000, 0xffffffff },
10774 { MAC_HASH_REG_2, 0x0000,
10775 0x00000000, 0xffffffff },
10776 { MAC_HASH_REG_3, 0x0000,
10777 0x00000000, 0xffffffff },
10779 /* Receive Data and Receive BD Initiator Control Registers. */
10780 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10781 0x00000000, 0xffffffff },
10782 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10783 0x00000000, 0xffffffff },
10784 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10785 0x00000000, 0x00000003 },
10786 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10787 0x00000000, 0xffffffff },
10788 { RCVDBDI_STD_BD+0, 0x0000,
10789 0x00000000, 0xffffffff },
10790 { RCVDBDI_STD_BD+4, 0x0000,
10791 0x00000000, 0xffffffff },
10792 { RCVDBDI_STD_BD+8, 0x0000,
10793 0x00000000, 0xffff0002 },
10794 { RCVDBDI_STD_BD+0xc, 0x0000,
10795 0x00000000, 0xffffffff },
10797 /* Receive BD Initiator Control Registers. */
10798 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10799 0x00000000, 0xffffffff },
10800 { RCVBDI_STD_THRESH, TG3_FL_5705,
10801 0x00000000, 0x000003ff },
10802 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10803 0x00000000, 0xffffffff },
10805 /* Host Coalescing Control Registers. */
10806 { HOSTCC_MODE, TG3_FL_NOT_5705,
10807 0x00000000, 0x00000004 },
10808 { HOSTCC_MODE, TG3_FL_5705,
10809 0x00000000, 0x000000f6 },
10810 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10811 0x00000000, 0xffffffff },
10812 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10813 0x00000000, 0x000003ff },
10814 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10815 0x00000000, 0xffffffff },
10816 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10817 0x00000000, 0x000003ff },
10818 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10819 0x00000000, 0xffffffff },
10820 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10821 0x00000000, 0x000000ff },
10822 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10823 0x00000000, 0xffffffff },
10824 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10825 0x00000000, 0x000000ff },
10826 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10827 0x00000000, 0xffffffff },
10828 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10829 0x00000000, 0xffffffff },
10830 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10831 0x00000000, 0xffffffff },
10832 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10833 0x00000000, 0x000000ff },
10834 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10835 0x00000000, 0xffffffff },
10836 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10837 0x00000000, 0x000000ff },
10838 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10839 0x00000000, 0xffffffff },
10840 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10841 0x00000000, 0xffffffff },
10842 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10843 0x00000000, 0xffffffff },
10844 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10845 0x00000000, 0xffffffff },
10846 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10847 0x00000000, 0xffffffff },
10848 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10849 0xffffffff, 0x00000000 },
10850 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10851 0xffffffff, 0x00000000 },
10853 /* Buffer Manager Control Registers. */
10854 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10855 0x00000000, 0x007fff80 },
10856 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10857 0x00000000, 0x007fffff },
10858 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10859 0x00000000, 0x0000003f },
10860 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10861 0x00000000, 0x000001ff },
10862 { BUFMGR_MB_HIGH_WATER, 0x0000,
10863 0x00000000, 0x000001ff },
10864 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10865 0xffffffff, 0x00000000 },
10866 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10867 0xffffffff, 0x00000000 },
10869 /* Mailbox Registers */
10870 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10871 0x00000000, 0x000001ff },
10872 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10873 0x00000000, 0x000001ff },
10874 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10875 0x00000000, 0x000007ff },
10876 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10877 0x00000000, 0x000001ff },
10879 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10882 is_5705 = is_5750 = 0;
10883 if (tg3_flag(tp, 5705_PLUS)) {
10884 is_5705 = 1;
10885 if (tg3_flag(tp, 5750_PLUS))
10886 is_5750 = 1;
10889 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10890 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10891 continue;
10893 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10894 continue;
10896 if (tg3_flag(tp, IS_5788) &&
10897 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10898 continue;
10900 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10901 continue;
10903 offset = (u32) reg_tbl[i].offset;
10904 read_mask = reg_tbl[i].read_mask;
10905 write_mask = reg_tbl[i].write_mask;
10907 /* Save the original register content */
10908 save_val = tr32(offset);
10910 /* Determine the read-only value. */
10911 read_val = save_val & read_mask;
10913 /* Write zero to the register, then make sure the read-only bits
10914 * are not changed and the read/write bits are all zeros.
10916 tw32(offset, 0);
10918 val = tr32(offset);
10920 /* Test the read-only and read/write bits. */
10921 if (((val & read_mask) != read_val) || (val & write_mask))
10922 goto out;
10924 /* Write ones to all the bits defined by RdMask and WrMask, then
10925 * make sure the read-only bits are not changed and the
10926 * read/write bits are all ones.
10928 tw32(offset, read_mask | write_mask);
10930 val = tr32(offset);
10932 /* Test the read-only bits. */
10933 if ((val & read_mask) != read_val)
10934 goto out;
10936 /* Test the read/write bits. */
10937 if ((val & write_mask) != write_mask)
10938 goto out;
10940 tw32(offset, save_val);
10943 return 0;
10945 out:
10946 if (netif_msg_hw(tp))
10947 netdev_err(tp->dev,
10948 "Register test failed at offset %x\n", offset);
10949 tw32(offset, save_val);
10950 return -EIO;
10953 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10955 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10956 int i;
10957 u32 j;
10959 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10960 for (j = 0; j < len; j += 4) {
10961 u32 val;
10963 tg3_write_mem(tp, offset + j, test_pattern[i]);
10964 tg3_read_mem(tp, offset + j, &val);
10965 if (val != test_pattern[i])
10966 return -EIO;
10969 return 0;
10972 static int tg3_test_memory(struct tg3 *tp)
10974 static struct mem_entry {
10975 u32 offset;
10976 u32 len;
10977 } mem_tbl_570x[] = {
10978 { 0x00000000, 0x00b50},
10979 { 0x00002000, 0x1c000},
10980 { 0xffffffff, 0x00000}
10981 }, mem_tbl_5705[] = {
10982 { 0x00000100, 0x0000c},
10983 { 0x00000200, 0x00008},
10984 { 0x00004000, 0x00800},
10985 { 0x00006000, 0x01000},
10986 { 0x00008000, 0x02000},
10987 { 0x00010000, 0x0e000},
10988 { 0xffffffff, 0x00000}
10989 }, mem_tbl_5755[] = {
10990 { 0x00000200, 0x00008},
10991 { 0x00004000, 0x00800},
10992 { 0x00006000, 0x00800},
10993 { 0x00008000, 0x02000},
10994 { 0x00010000, 0x0c000},
10995 { 0xffffffff, 0x00000}
10996 }, mem_tbl_5906[] = {
10997 { 0x00000200, 0x00008},
10998 { 0x00004000, 0x00400},
10999 { 0x00006000, 0x00400},
11000 { 0x00008000, 0x01000},
11001 { 0x00010000, 0x01000},
11002 { 0xffffffff, 0x00000}
11003 }, mem_tbl_5717[] = {
11004 { 0x00000200, 0x00008},
11005 { 0x00010000, 0x0a000},
11006 { 0x00020000, 0x13c00},
11007 { 0xffffffff, 0x00000}
11008 }, mem_tbl_57765[] = {
11009 { 0x00000200, 0x00008},
11010 { 0x00004000, 0x00800},
11011 { 0x00006000, 0x09800},
11012 { 0x00010000, 0x0a000},
11013 { 0xffffffff, 0x00000}
11015 struct mem_entry *mem_tbl;
11016 int err = 0;
11017 int i;
11019 if (tg3_flag(tp, 5717_PLUS))
11020 mem_tbl = mem_tbl_5717;
11021 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11022 mem_tbl = mem_tbl_57765;
11023 else if (tg3_flag(tp, 5755_PLUS))
11024 mem_tbl = mem_tbl_5755;
11025 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11026 mem_tbl = mem_tbl_5906;
11027 else if (tg3_flag(tp, 5705_PLUS))
11028 mem_tbl = mem_tbl_5705;
11029 else
11030 mem_tbl = mem_tbl_570x;
11032 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11033 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11034 if (err)
11035 break;
11038 return err;
11041 #define TG3_MAC_LOOPBACK 0
11042 #define TG3_PHY_LOOPBACK 1
11043 #define TG3_TSO_LOOPBACK 2
11045 #define TG3_TSO_MSS 500
11047 #define TG3_TSO_IP_HDR_LEN 20
11048 #define TG3_TSO_TCP_HDR_LEN 20
11049 #define TG3_TSO_TCP_OPT_LEN 12
11051 static const u8 tg3_tso_header[] = {
11052 0x08, 0x00,
11053 0x45, 0x00, 0x00, 0x00,
11054 0x00, 0x00, 0x40, 0x00,
11055 0x40, 0x06, 0x00, 0x00,
11056 0x0a, 0x00, 0x00, 0x01,
11057 0x0a, 0x00, 0x00, 0x02,
11058 0x0d, 0x00, 0xe0, 0x00,
11059 0x00, 0x00, 0x01, 0x00,
11060 0x00, 0x00, 0x02, 0x00,
11061 0x80, 0x10, 0x10, 0x00,
11062 0x14, 0x09, 0x00, 0x00,
11063 0x01, 0x01, 0x08, 0x0a,
11064 0x11, 0x11, 0x11, 0x11,
11065 0x11, 0x11, 0x11, 0x11,
11068 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11070 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11071 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11072 struct sk_buff *skb, *rx_skb;
11073 u8 *tx_data;
11074 dma_addr_t map;
11075 int num_pkts, tx_len, rx_len, i, err;
11076 struct tg3_rx_buffer_desc *desc;
11077 struct tg3_napi *tnapi, *rnapi;
11078 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11080 tnapi = &tp->napi[0];
11081 rnapi = &tp->napi[0];
11082 if (tp->irq_cnt > 1) {
11083 if (tg3_flag(tp, ENABLE_RSS))
11084 rnapi = &tp->napi[1];
11085 if (tg3_flag(tp, ENABLE_TSS))
11086 tnapi = &tp->napi[1];
11088 coal_now = tnapi->coal_now | rnapi->coal_now;
11090 if (loopback_mode == TG3_MAC_LOOPBACK) {
11091 /* HW errata - mac loopback fails in some cases on 5780.
11092 * Normal traffic and PHY loopback are not affected by
11093 * errata. Also, the MAC loopback test is deprecated for
11094 * all newer ASIC revisions.
11096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11097 tg3_flag(tp, CPMU_PRESENT))
11098 return 0;
11100 mac_mode = tp->mac_mode &
11101 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11102 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11103 if (!tg3_flag(tp, 5705_PLUS))
11104 mac_mode |= MAC_MODE_LINK_POLARITY;
11105 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11106 mac_mode |= MAC_MODE_PORT_MODE_MII;
11107 else
11108 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11109 tw32(MAC_MODE, mac_mode);
11110 } else {
11111 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11112 tg3_phy_fet_toggle_apd(tp, false);
11113 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11114 } else
11115 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11117 tg3_phy_toggle_automdix(tp, 0);
11119 tg3_writephy(tp, MII_BMCR, val);
11120 udelay(40);
11122 mac_mode = tp->mac_mode &
11123 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11124 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11125 tg3_writephy(tp, MII_TG3_FET_PTEST,
11126 MII_TG3_FET_PTEST_FRC_TX_LINK |
11127 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11128 /* The write needs to be flushed for the AC131 */
11129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11130 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11131 mac_mode |= MAC_MODE_PORT_MODE_MII;
11132 } else
11133 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11135 /* reset to prevent losing 1st rx packet intermittently */
11136 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11137 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11138 udelay(10);
11139 tw32_f(MAC_RX_MODE, tp->rx_mode);
11141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11142 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11143 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11144 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11145 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11146 mac_mode |= MAC_MODE_LINK_POLARITY;
11147 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11148 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11150 tw32(MAC_MODE, mac_mode);
11152 /* Wait for link */
11153 for (i = 0; i < 100; i++) {
11154 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11155 break;
11156 mdelay(1);
11160 err = -EIO;
11162 tx_len = pktsz;
11163 skb = netdev_alloc_skb(tp->dev, tx_len);
11164 if (!skb)
11165 return -ENOMEM;
11167 tx_data = skb_put(skb, tx_len);
11168 memcpy(tx_data, tp->dev->dev_addr, 6);
11169 memset(tx_data + 6, 0x0, 8);
11171 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11173 if (loopback_mode == TG3_TSO_LOOPBACK) {
11174 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11176 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11177 TG3_TSO_TCP_OPT_LEN;
11179 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11180 sizeof(tg3_tso_header));
11181 mss = TG3_TSO_MSS;
11183 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11184 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11186 /* Set the total length field in the IP header */
11187 iph->tot_len = htons((u16)(mss + hdr_len));
11189 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11190 TXD_FLAG_CPU_POST_DMA);
11192 if (tg3_flag(tp, HW_TSO_1) ||
11193 tg3_flag(tp, HW_TSO_2) ||
11194 tg3_flag(tp, HW_TSO_3)) {
11195 struct tcphdr *th;
11196 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11197 th = (struct tcphdr *)&tx_data[val];
11198 th->check = 0;
11199 } else
11200 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11202 if (tg3_flag(tp, HW_TSO_3)) {
11203 mss |= (hdr_len & 0xc) << 12;
11204 if (hdr_len & 0x10)
11205 base_flags |= 0x00000010;
11206 base_flags |= (hdr_len & 0x3e0) << 5;
11207 } else if (tg3_flag(tp, HW_TSO_2))
11208 mss |= hdr_len << 9;
11209 else if (tg3_flag(tp, HW_TSO_1) ||
11210 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11211 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11212 } else {
11213 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11216 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11217 } else {
11218 num_pkts = 1;
11219 data_off = ETH_HLEN;
11222 for (i = data_off; i < tx_len; i++)
11223 tx_data[i] = (u8) (i & 0xff);
11225 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11226 if (pci_dma_mapping_error(tp->pdev, map)) {
11227 dev_kfree_skb(skb);
11228 return -EIO;
11231 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11232 rnapi->coal_now);
11234 udelay(10);
11236 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11238 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11239 base_flags, (mss << 1) | 1);
11241 tnapi->tx_prod++;
11243 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11244 tr32_mailbox(tnapi->prodmbox);
11246 udelay(10);
11248 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11249 for (i = 0; i < 35; i++) {
11250 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11251 coal_now);
11253 udelay(10);
11255 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11256 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11257 if ((tx_idx == tnapi->tx_prod) &&
11258 (rx_idx == (rx_start_idx + num_pkts)))
11259 break;
11262 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11263 dev_kfree_skb(skb);
11265 if (tx_idx != tnapi->tx_prod)
11266 goto out;
11268 if (rx_idx != rx_start_idx + num_pkts)
11269 goto out;
11271 val = data_off;
11272 while (rx_idx != rx_start_idx) {
11273 desc = &rnapi->rx_rcb[rx_start_idx++];
11274 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11275 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11277 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11278 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11279 goto out;
11281 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11282 - ETH_FCS_LEN;
11284 if (loopback_mode != TG3_TSO_LOOPBACK) {
11285 if (rx_len != tx_len)
11286 goto out;
11288 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11289 if (opaque_key != RXD_OPAQUE_RING_STD)
11290 goto out;
11291 } else {
11292 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11293 goto out;
11295 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11296 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11297 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11298 goto out;
11301 if (opaque_key == RXD_OPAQUE_RING_STD) {
11302 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11303 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11304 mapping);
11305 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11306 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11307 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11308 mapping);
11309 } else
11310 goto out;
11312 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11313 PCI_DMA_FROMDEVICE);
11315 for (i = data_off; i < rx_len; i++, val++) {
11316 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11317 goto out;
11321 err = 0;
11323 /* tg3_free_rings will unmap and free the rx_skb */
11324 out:
11325 return err;
11328 #define TG3_STD_LOOPBACK_FAILED 1
11329 #define TG3_JMB_LOOPBACK_FAILED 2
11330 #define TG3_TSO_LOOPBACK_FAILED 4
11332 #define TG3_MAC_LOOPBACK_SHIFT 0
11333 #define TG3_PHY_LOOPBACK_SHIFT 4
11334 #define TG3_LOOPBACK_FAILED 0x00000077
11336 static int tg3_test_loopback(struct tg3 *tp)
11338 int err = 0;
11339 u32 eee_cap, cpmuctrl = 0;
11341 if (!netif_running(tp->dev))
11342 return TG3_LOOPBACK_FAILED;
11344 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11345 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11347 err = tg3_reset_hw(tp, 1);
11348 if (err) {
11349 err = TG3_LOOPBACK_FAILED;
11350 goto done;
11353 if (tg3_flag(tp, ENABLE_RSS)) {
11354 int i;
11356 /* Reroute all rx packets to the 1st queue */
11357 for (i = MAC_RSS_INDIR_TBL_0;
11358 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11359 tw32(i, 0x0);
11362 /* Turn off gphy autopowerdown. */
11363 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11364 tg3_phy_toggle_apd(tp, false);
11366 if (tg3_flag(tp, CPMU_PRESENT)) {
11367 int i;
11368 u32 status;
11370 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11372 /* Wait for up to 40 microseconds to acquire lock. */
11373 for (i = 0; i < 4; i++) {
11374 status = tr32(TG3_CPMU_MUTEX_GNT);
11375 if (status == CPMU_MUTEX_GNT_DRIVER)
11376 break;
11377 udelay(10);
11380 if (status != CPMU_MUTEX_GNT_DRIVER) {
11381 err = TG3_LOOPBACK_FAILED;
11382 goto done;
11385 /* Turn off link-based power management. */
11386 cpmuctrl = tr32(TG3_CPMU_CTRL);
11387 tw32(TG3_CPMU_CTRL,
11388 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11389 CPMU_CTRL_LINK_AWARE_MODE));
11392 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11393 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11395 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11396 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11397 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11399 if (tg3_flag(tp, CPMU_PRESENT)) {
11400 tw32(TG3_CPMU_CTRL, cpmuctrl);
11402 /* Release the mutex */
11403 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11406 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11407 !tg3_flag(tp, USE_PHYLIB)) {
11408 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11409 err |= TG3_STD_LOOPBACK_FAILED <<
11410 TG3_PHY_LOOPBACK_SHIFT;
11411 if (tg3_flag(tp, TSO_CAPABLE) &&
11412 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11413 err |= TG3_TSO_LOOPBACK_FAILED <<
11414 TG3_PHY_LOOPBACK_SHIFT;
11415 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11416 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11417 err |= TG3_JMB_LOOPBACK_FAILED <<
11418 TG3_PHY_LOOPBACK_SHIFT;
11421 /* Re-enable gphy autopowerdown. */
11422 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11423 tg3_phy_toggle_apd(tp, true);
11425 done:
11426 tp->phy_flags |= eee_cap;
11428 return err;
11431 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11432 u64 *data)
11434 struct tg3 *tp = netdev_priv(dev);
11436 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11437 tg3_power_up(tp);
11439 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11441 if (tg3_test_nvram(tp) != 0) {
11442 etest->flags |= ETH_TEST_FL_FAILED;
11443 data[0] = 1;
11445 if (tg3_test_link(tp) != 0) {
11446 etest->flags |= ETH_TEST_FL_FAILED;
11447 data[1] = 1;
11449 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11450 int err, err2 = 0, irq_sync = 0;
11452 if (netif_running(dev)) {
11453 tg3_phy_stop(tp);
11454 tg3_netif_stop(tp);
11455 irq_sync = 1;
11458 tg3_full_lock(tp, irq_sync);
11460 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11461 err = tg3_nvram_lock(tp);
11462 tg3_halt_cpu(tp, RX_CPU_BASE);
11463 if (!tg3_flag(tp, 5705_PLUS))
11464 tg3_halt_cpu(tp, TX_CPU_BASE);
11465 if (!err)
11466 tg3_nvram_unlock(tp);
11468 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11469 tg3_phy_reset(tp);
11471 if (tg3_test_registers(tp) != 0) {
11472 etest->flags |= ETH_TEST_FL_FAILED;
11473 data[2] = 1;
11475 if (tg3_test_memory(tp) != 0) {
11476 etest->flags |= ETH_TEST_FL_FAILED;
11477 data[3] = 1;
11479 if ((data[4] = tg3_test_loopback(tp)) != 0)
11480 etest->flags |= ETH_TEST_FL_FAILED;
11482 tg3_full_unlock(tp);
11484 if (tg3_test_interrupt(tp) != 0) {
11485 etest->flags |= ETH_TEST_FL_FAILED;
11486 data[5] = 1;
11489 tg3_full_lock(tp, 0);
11491 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11492 if (netif_running(dev)) {
11493 tg3_flag_set(tp, INIT_COMPLETE);
11494 err2 = tg3_restart_hw(tp, 1);
11495 if (!err2)
11496 tg3_netif_start(tp);
11499 tg3_full_unlock(tp);
11501 if (irq_sync && !err2)
11502 tg3_phy_start(tp);
11504 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11505 tg3_power_down(tp);
11509 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11511 struct mii_ioctl_data *data = if_mii(ifr);
11512 struct tg3 *tp = netdev_priv(dev);
11513 int err;
11515 if (tg3_flag(tp, USE_PHYLIB)) {
11516 struct phy_device *phydev;
11517 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11518 return -EAGAIN;
11519 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11520 return phy_mii_ioctl(phydev, ifr, cmd);
11523 switch (cmd) {
11524 case SIOCGMIIPHY:
11525 data->phy_id = tp->phy_addr;
11527 /* fallthru */
11528 case SIOCGMIIREG: {
11529 u32 mii_regval;
11531 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11532 break; /* We have no PHY */
11534 if (!netif_running(dev))
11535 return -EAGAIN;
11537 spin_lock_bh(&tp->lock);
11538 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11539 spin_unlock_bh(&tp->lock);
11541 data->val_out = mii_regval;
11543 return err;
11546 case SIOCSMIIREG:
11547 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11548 break; /* We have no PHY */
11550 if (!netif_running(dev))
11551 return -EAGAIN;
11553 spin_lock_bh(&tp->lock);
11554 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11555 spin_unlock_bh(&tp->lock);
11557 return err;
11559 default:
11560 /* do nothing */
11561 break;
11563 return -EOPNOTSUPP;
11566 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11568 struct tg3 *tp = netdev_priv(dev);
11570 memcpy(ec, &tp->coal, sizeof(*ec));
11571 return 0;
11574 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11576 struct tg3 *tp = netdev_priv(dev);
11577 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11578 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11580 if (!tg3_flag(tp, 5705_PLUS)) {
11581 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11582 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11583 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11584 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11587 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11588 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11589 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11590 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11591 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11592 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11593 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11594 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11595 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11596 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11597 return -EINVAL;
11599 /* No rx interrupts will be generated if both are zero */
11600 if ((ec->rx_coalesce_usecs == 0) &&
11601 (ec->rx_max_coalesced_frames == 0))
11602 return -EINVAL;
11604 /* No tx interrupts will be generated if both are zero */
11605 if ((ec->tx_coalesce_usecs == 0) &&
11606 (ec->tx_max_coalesced_frames == 0))
11607 return -EINVAL;
11609 /* Only copy relevant parameters, ignore all others. */
11610 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11611 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11612 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11613 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11614 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11615 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11616 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11617 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11618 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11620 if (netif_running(dev)) {
11621 tg3_full_lock(tp, 0);
11622 __tg3_set_coalesce(tp, &tp->coal);
11623 tg3_full_unlock(tp);
11625 return 0;
11628 static const struct ethtool_ops tg3_ethtool_ops = {
11629 .get_settings = tg3_get_settings,
11630 .set_settings = tg3_set_settings,
11631 .get_drvinfo = tg3_get_drvinfo,
11632 .get_regs_len = tg3_get_regs_len,
11633 .get_regs = tg3_get_regs,
11634 .get_wol = tg3_get_wol,
11635 .set_wol = tg3_set_wol,
11636 .get_msglevel = tg3_get_msglevel,
11637 .set_msglevel = tg3_set_msglevel,
11638 .nway_reset = tg3_nway_reset,
11639 .get_link = ethtool_op_get_link,
11640 .get_eeprom_len = tg3_get_eeprom_len,
11641 .get_eeprom = tg3_get_eeprom,
11642 .set_eeprom = tg3_set_eeprom,
11643 .get_ringparam = tg3_get_ringparam,
11644 .set_ringparam = tg3_set_ringparam,
11645 .get_pauseparam = tg3_get_pauseparam,
11646 .set_pauseparam = tg3_set_pauseparam,
11647 .self_test = tg3_self_test,
11648 .get_strings = tg3_get_strings,
11649 .set_phys_id = tg3_set_phys_id,
11650 .get_ethtool_stats = tg3_get_ethtool_stats,
11651 .get_coalesce = tg3_get_coalesce,
11652 .set_coalesce = tg3_set_coalesce,
11653 .get_sset_count = tg3_get_sset_count,
11656 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11658 u32 cursize, val, magic;
11660 tp->nvram_size = EEPROM_CHIP_SIZE;
11662 if (tg3_nvram_read(tp, 0, &magic) != 0)
11663 return;
11665 if ((magic != TG3_EEPROM_MAGIC) &&
11666 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11667 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11668 return;
11671 * Size the chip by reading offsets at increasing powers of two.
11672 * When we encounter our validation signature, we know the addressing
11673 * has wrapped around, and thus have our chip size.
11675 cursize = 0x10;
11677 while (cursize < tp->nvram_size) {
11678 if (tg3_nvram_read(tp, cursize, &val) != 0)
11679 return;
11681 if (val == magic)
11682 break;
11684 cursize <<= 1;
11687 tp->nvram_size = cursize;
11690 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11692 u32 val;
11694 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11695 return;
11697 /* Selfboot format */
11698 if (val != TG3_EEPROM_MAGIC) {
11699 tg3_get_eeprom_size(tp);
11700 return;
11703 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11704 if (val != 0) {
11705 /* This is confusing. We want to operate on the
11706 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11707 * call will read from NVRAM and byteswap the data
11708 * according to the byteswapping settings for all
11709 * other register accesses. This ensures the data we
11710 * want will always reside in the lower 16-bits.
11711 * However, the data in NVRAM is in LE format, which
11712 * means the data from the NVRAM read will always be
11713 * opposite the endianness of the CPU. The 16-bit
11714 * byteswap then brings the data to CPU endianness.
11716 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11717 return;
11720 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11723 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11725 u32 nvcfg1;
11727 nvcfg1 = tr32(NVRAM_CFG1);
11728 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11729 tg3_flag_set(tp, FLASH);
11730 } else {
11731 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11732 tw32(NVRAM_CFG1, nvcfg1);
11735 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11736 tg3_flag(tp, 5780_CLASS)) {
11737 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11738 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11739 tp->nvram_jedecnum = JEDEC_ATMEL;
11740 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11741 tg3_flag_set(tp, NVRAM_BUFFERED);
11742 break;
11743 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11744 tp->nvram_jedecnum = JEDEC_ATMEL;
11745 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11746 break;
11747 case FLASH_VENDOR_ATMEL_EEPROM:
11748 tp->nvram_jedecnum = JEDEC_ATMEL;
11749 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11750 tg3_flag_set(tp, NVRAM_BUFFERED);
11751 break;
11752 case FLASH_VENDOR_ST:
11753 tp->nvram_jedecnum = JEDEC_ST;
11754 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11755 tg3_flag_set(tp, NVRAM_BUFFERED);
11756 break;
11757 case FLASH_VENDOR_SAIFUN:
11758 tp->nvram_jedecnum = JEDEC_SAIFUN;
11759 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11760 break;
11761 case FLASH_VENDOR_SST_SMALL:
11762 case FLASH_VENDOR_SST_LARGE:
11763 tp->nvram_jedecnum = JEDEC_SST;
11764 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11765 break;
11767 } else {
11768 tp->nvram_jedecnum = JEDEC_ATMEL;
11769 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11770 tg3_flag_set(tp, NVRAM_BUFFERED);
11774 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11776 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11777 case FLASH_5752PAGE_SIZE_256:
11778 tp->nvram_pagesize = 256;
11779 break;
11780 case FLASH_5752PAGE_SIZE_512:
11781 tp->nvram_pagesize = 512;
11782 break;
11783 case FLASH_5752PAGE_SIZE_1K:
11784 tp->nvram_pagesize = 1024;
11785 break;
11786 case FLASH_5752PAGE_SIZE_2K:
11787 tp->nvram_pagesize = 2048;
11788 break;
11789 case FLASH_5752PAGE_SIZE_4K:
11790 tp->nvram_pagesize = 4096;
11791 break;
11792 case FLASH_5752PAGE_SIZE_264:
11793 tp->nvram_pagesize = 264;
11794 break;
11795 case FLASH_5752PAGE_SIZE_528:
11796 tp->nvram_pagesize = 528;
11797 break;
11801 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11803 u32 nvcfg1;
11805 nvcfg1 = tr32(NVRAM_CFG1);
11807 /* NVRAM protection for TPM */
11808 if (nvcfg1 & (1 << 27))
11809 tg3_flag_set(tp, PROTECTED_NVRAM);
11811 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11812 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11813 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11814 tp->nvram_jedecnum = JEDEC_ATMEL;
11815 tg3_flag_set(tp, NVRAM_BUFFERED);
11816 break;
11817 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11818 tp->nvram_jedecnum = JEDEC_ATMEL;
11819 tg3_flag_set(tp, NVRAM_BUFFERED);
11820 tg3_flag_set(tp, FLASH);
11821 break;
11822 case FLASH_5752VENDOR_ST_M45PE10:
11823 case FLASH_5752VENDOR_ST_M45PE20:
11824 case FLASH_5752VENDOR_ST_M45PE40:
11825 tp->nvram_jedecnum = JEDEC_ST;
11826 tg3_flag_set(tp, NVRAM_BUFFERED);
11827 tg3_flag_set(tp, FLASH);
11828 break;
11831 if (tg3_flag(tp, FLASH)) {
11832 tg3_nvram_get_pagesize(tp, nvcfg1);
11833 } else {
11834 /* For eeprom, set pagesize to maximum eeprom size */
11835 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11837 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11838 tw32(NVRAM_CFG1, nvcfg1);
11842 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11844 u32 nvcfg1, protect = 0;
11846 nvcfg1 = tr32(NVRAM_CFG1);
11848 /* NVRAM protection for TPM */
11849 if (nvcfg1 & (1 << 27)) {
11850 tg3_flag_set(tp, PROTECTED_NVRAM);
11851 protect = 1;
11854 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11855 switch (nvcfg1) {
11856 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11857 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11858 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11859 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11860 tp->nvram_jedecnum = JEDEC_ATMEL;
11861 tg3_flag_set(tp, NVRAM_BUFFERED);
11862 tg3_flag_set(tp, FLASH);
11863 tp->nvram_pagesize = 264;
11864 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11865 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11866 tp->nvram_size = (protect ? 0x3e200 :
11867 TG3_NVRAM_SIZE_512KB);
11868 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11869 tp->nvram_size = (protect ? 0x1f200 :
11870 TG3_NVRAM_SIZE_256KB);
11871 else
11872 tp->nvram_size = (protect ? 0x1f200 :
11873 TG3_NVRAM_SIZE_128KB);
11874 break;
11875 case FLASH_5752VENDOR_ST_M45PE10:
11876 case FLASH_5752VENDOR_ST_M45PE20:
11877 case FLASH_5752VENDOR_ST_M45PE40:
11878 tp->nvram_jedecnum = JEDEC_ST;
11879 tg3_flag_set(tp, NVRAM_BUFFERED);
11880 tg3_flag_set(tp, FLASH);
11881 tp->nvram_pagesize = 256;
11882 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11883 tp->nvram_size = (protect ?
11884 TG3_NVRAM_SIZE_64KB :
11885 TG3_NVRAM_SIZE_128KB);
11886 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11887 tp->nvram_size = (protect ?
11888 TG3_NVRAM_SIZE_64KB :
11889 TG3_NVRAM_SIZE_256KB);
11890 else
11891 tp->nvram_size = (protect ?
11892 TG3_NVRAM_SIZE_128KB :
11893 TG3_NVRAM_SIZE_512KB);
11894 break;
11898 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11900 u32 nvcfg1;
11902 nvcfg1 = tr32(NVRAM_CFG1);
11904 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11905 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11906 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11907 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11908 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11909 tp->nvram_jedecnum = JEDEC_ATMEL;
11910 tg3_flag_set(tp, NVRAM_BUFFERED);
11911 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11913 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11914 tw32(NVRAM_CFG1, nvcfg1);
11915 break;
11916 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11917 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11918 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11919 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11920 tp->nvram_jedecnum = JEDEC_ATMEL;
11921 tg3_flag_set(tp, NVRAM_BUFFERED);
11922 tg3_flag_set(tp, FLASH);
11923 tp->nvram_pagesize = 264;
11924 break;
11925 case FLASH_5752VENDOR_ST_M45PE10:
11926 case FLASH_5752VENDOR_ST_M45PE20:
11927 case FLASH_5752VENDOR_ST_M45PE40:
11928 tp->nvram_jedecnum = JEDEC_ST;
11929 tg3_flag_set(tp, NVRAM_BUFFERED);
11930 tg3_flag_set(tp, FLASH);
11931 tp->nvram_pagesize = 256;
11932 break;
11936 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11938 u32 nvcfg1, protect = 0;
11940 nvcfg1 = tr32(NVRAM_CFG1);
11942 /* NVRAM protection for TPM */
11943 if (nvcfg1 & (1 << 27)) {
11944 tg3_flag_set(tp, PROTECTED_NVRAM);
11945 protect = 1;
11948 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11949 switch (nvcfg1) {
11950 case FLASH_5761VENDOR_ATMEL_ADB021D:
11951 case FLASH_5761VENDOR_ATMEL_ADB041D:
11952 case FLASH_5761VENDOR_ATMEL_ADB081D:
11953 case FLASH_5761VENDOR_ATMEL_ADB161D:
11954 case FLASH_5761VENDOR_ATMEL_MDB021D:
11955 case FLASH_5761VENDOR_ATMEL_MDB041D:
11956 case FLASH_5761VENDOR_ATMEL_MDB081D:
11957 case FLASH_5761VENDOR_ATMEL_MDB161D:
11958 tp->nvram_jedecnum = JEDEC_ATMEL;
11959 tg3_flag_set(tp, NVRAM_BUFFERED);
11960 tg3_flag_set(tp, FLASH);
11961 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11962 tp->nvram_pagesize = 256;
11963 break;
11964 case FLASH_5761VENDOR_ST_A_M45PE20:
11965 case FLASH_5761VENDOR_ST_A_M45PE40:
11966 case FLASH_5761VENDOR_ST_A_M45PE80:
11967 case FLASH_5761VENDOR_ST_A_M45PE16:
11968 case FLASH_5761VENDOR_ST_M_M45PE20:
11969 case FLASH_5761VENDOR_ST_M_M45PE40:
11970 case FLASH_5761VENDOR_ST_M_M45PE80:
11971 case FLASH_5761VENDOR_ST_M_M45PE16:
11972 tp->nvram_jedecnum = JEDEC_ST;
11973 tg3_flag_set(tp, NVRAM_BUFFERED);
11974 tg3_flag_set(tp, FLASH);
11975 tp->nvram_pagesize = 256;
11976 break;
11979 if (protect) {
11980 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11981 } else {
11982 switch (nvcfg1) {
11983 case FLASH_5761VENDOR_ATMEL_ADB161D:
11984 case FLASH_5761VENDOR_ATMEL_MDB161D:
11985 case FLASH_5761VENDOR_ST_A_M45PE16:
11986 case FLASH_5761VENDOR_ST_M_M45PE16:
11987 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11988 break;
11989 case FLASH_5761VENDOR_ATMEL_ADB081D:
11990 case FLASH_5761VENDOR_ATMEL_MDB081D:
11991 case FLASH_5761VENDOR_ST_A_M45PE80:
11992 case FLASH_5761VENDOR_ST_M_M45PE80:
11993 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11994 break;
11995 case FLASH_5761VENDOR_ATMEL_ADB041D:
11996 case FLASH_5761VENDOR_ATMEL_MDB041D:
11997 case FLASH_5761VENDOR_ST_A_M45PE40:
11998 case FLASH_5761VENDOR_ST_M_M45PE40:
11999 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12000 break;
12001 case FLASH_5761VENDOR_ATMEL_ADB021D:
12002 case FLASH_5761VENDOR_ATMEL_MDB021D:
12003 case FLASH_5761VENDOR_ST_A_M45PE20:
12004 case FLASH_5761VENDOR_ST_M_M45PE20:
12005 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12006 break;
12011 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12013 tp->nvram_jedecnum = JEDEC_ATMEL;
12014 tg3_flag_set(tp, NVRAM_BUFFERED);
12015 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12018 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12020 u32 nvcfg1;
12022 nvcfg1 = tr32(NVRAM_CFG1);
12024 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12025 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12026 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12027 tp->nvram_jedecnum = JEDEC_ATMEL;
12028 tg3_flag_set(tp, NVRAM_BUFFERED);
12029 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12031 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12032 tw32(NVRAM_CFG1, nvcfg1);
12033 return;
12034 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12035 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12036 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12037 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12038 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12039 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12040 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12041 tp->nvram_jedecnum = JEDEC_ATMEL;
12042 tg3_flag_set(tp, NVRAM_BUFFERED);
12043 tg3_flag_set(tp, FLASH);
12045 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12046 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12047 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12048 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12049 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12050 break;
12051 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12052 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12053 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12054 break;
12055 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12056 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12057 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12058 break;
12060 break;
12061 case FLASH_5752VENDOR_ST_M45PE10:
12062 case FLASH_5752VENDOR_ST_M45PE20:
12063 case FLASH_5752VENDOR_ST_M45PE40:
12064 tp->nvram_jedecnum = JEDEC_ST;
12065 tg3_flag_set(tp, NVRAM_BUFFERED);
12066 tg3_flag_set(tp, FLASH);
12068 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12069 case FLASH_5752VENDOR_ST_M45PE10:
12070 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12071 break;
12072 case FLASH_5752VENDOR_ST_M45PE20:
12073 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12074 break;
12075 case FLASH_5752VENDOR_ST_M45PE40:
12076 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12077 break;
12079 break;
12080 default:
12081 tg3_flag_set(tp, NO_NVRAM);
12082 return;
12085 tg3_nvram_get_pagesize(tp, nvcfg1);
12086 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12087 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12091 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12093 u32 nvcfg1;
12095 nvcfg1 = tr32(NVRAM_CFG1);
12097 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12098 case FLASH_5717VENDOR_ATMEL_EEPROM:
12099 case FLASH_5717VENDOR_MICRO_EEPROM:
12100 tp->nvram_jedecnum = JEDEC_ATMEL;
12101 tg3_flag_set(tp, NVRAM_BUFFERED);
12102 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12104 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12105 tw32(NVRAM_CFG1, nvcfg1);
12106 return;
12107 case FLASH_5717VENDOR_ATMEL_MDB011D:
12108 case FLASH_5717VENDOR_ATMEL_ADB011B:
12109 case FLASH_5717VENDOR_ATMEL_ADB011D:
12110 case FLASH_5717VENDOR_ATMEL_MDB021D:
12111 case FLASH_5717VENDOR_ATMEL_ADB021B:
12112 case FLASH_5717VENDOR_ATMEL_ADB021D:
12113 case FLASH_5717VENDOR_ATMEL_45USPT:
12114 tp->nvram_jedecnum = JEDEC_ATMEL;
12115 tg3_flag_set(tp, NVRAM_BUFFERED);
12116 tg3_flag_set(tp, FLASH);
12118 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12119 case FLASH_5717VENDOR_ATMEL_MDB021D:
12120 /* Detect size with tg3_nvram_get_size() */
12121 break;
12122 case FLASH_5717VENDOR_ATMEL_ADB021B:
12123 case FLASH_5717VENDOR_ATMEL_ADB021D:
12124 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12125 break;
12126 default:
12127 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12128 break;
12130 break;
12131 case FLASH_5717VENDOR_ST_M_M25PE10:
12132 case FLASH_5717VENDOR_ST_A_M25PE10:
12133 case FLASH_5717VENDOR_ST_M_M45PE10:
12134 case FLASH_5717VENDOR_ST_A_M45PE10:
12135 case FLASH_5717VENDOR_ST_M_M25PE20:
12136 case FLASH_5717VENDOR_ST_A_M25PE20:
12137 case FLASH_5717VENDOR_ST_M_M45PE20:
12138 case FLASH_5717VENDOR_ST_A_M45PE20:
12139 case FLASH_5717VENDOR_ST_25USPT:
12140 case FLASH_5717VENDOR_ST_45USPT:
12141 tp->nvram_jedecnum = JEDEC_ST;
12142 tg3_flag_set(tp, NVRAM_BUFFERED);
12143 tg3_flag_set(tp, FLASH);
12145 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12146 case FLASH_5717VENDOR_ST_M_M25PE20:
12147 case FLASH_5717VENDOR_ST_M_M45PE20:
12148 /* Detect size with tg3_nvram_get_size() */
12149 break;
12150 case FLASH_5717VENDOR_ST_A_M25PE20:
12151 case FLASH_5717VENDOR_ST_A_M45PE20:
12152 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12153 break;
12154 default:
12155 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12156 break;
12158 break;
12159 default:
12160 tg3_flag_set(tp, NO_NVRAM);
12161 return;
12164 tg3_nvram_get_pagesize(tp, nvcfg1);
12165 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12166 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12169 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12171 u32 nvcfg1, nvmpinstrp;
12173 nvcfg1 = tr32(NVRAM_CFG1);
12174 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12176 switch (nvmpinstrp) {
12177 case FLASH_5720_EEPROM_HD:
12178 case FLASH_5720_EEPROM_LD:
12179 tp->nvram_jedecnum = JEDEC_ATMEL;
12180 tg3_flag_set(tp, NVRAM_BUFFERED);
12182 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12183 tw32(NVRAM_CFG1, nvcfg1);
12184 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12185 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12186 else
12187 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12188 return;
12189 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12190 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12191 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12192 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12193 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12194 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12195 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12196 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12197 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12198 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12199 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12200 case FLASH_5720VENDOR_ATMEL_45USPT:
12201 tp->nvram_jedecnum = JEDEC_ATMEL;
12202 tg3_flag_set(tp, NVRAM_BUFFERED);
12203 tg3_flag_set(tp, FLASH);
12205 switch (nvmpinstrp) {
12206 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12207 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12208 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12209 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12210 break;
12211 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12212 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12213 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12214 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12215 break;
12216 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12217 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12218 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12219 break;
12220 default:
12221 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12222 break;
12224 break;
12225 case FLASH_5720VENDOR_M_ST_M25PE10:
12226 case FLASH_5720VENDOR_M_ST_M45PE10:
12227 case FLASH_5720VENDOR_A_ST_M25PE10:
12228 case FLASH_5720VENDOR_A_ST_M45PE10:
12229 case FLASH_5720VENDOR_M_ST_M25PE20:
12230 case FLASH_5720VENDOR_M_ST_M45PE20:
12231 case FLASH_5720VENDOR_A_ST_M25PE20:
12232 case FLASH_5720VENDOR_A_ST_M45PE20:
12233 case FLASH_5720VENDOR_M_ST_M25PE40:
12234 case FLASH_5720VENDOR_M_ST_M45PE40:
12235 case FLASH_5720VENDOR_A_ST_M25PE40:
12236 case FLASH_5720VENDOR_A_ST_M45PE40:
12237 case FLASH_5720VENDOR_M_ST_M25PE80:
12238 case FLASH_5720VENDOR_M_ST_M45PE80:
12239 case FLASH_5720VENDOR_A_ST_M25PE80:
12240 case FLASH_5720VENDOR_A_ST_M45PE80:
12241 case FLASH_5720VENDOR_ST_25USPT:
12242 case FLASH_5720VENDOR_ST_45USPT:
12243 tp->nvram_jedecnum = JEDEC_ST;
12244 tg3_flag_set(tp, NVRAM_BUFFERED);
12245 tg3_flag_set(tp, FLASH);
12247 switch (nvmpinstrp) {
12248 case FLASH_5720VENDOR_M_ST_M25PE20:
12249 case FLASH_5720VENDOR_M_ST_M45PE20:
12250 case FLASH_5720VENDOR_A_ST_M25PE20:
12251 case FLASH_5720VENDOR_A_ST_M45PE20:
12252 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12253 break;
12254 case FLASH_5720VENDOR_M_ST_M25PE40:
12255 case FLASH_5720VENDOR_M_ST_M45PE40:
12256 case FLASH_5720VENDOR_A_ST_M25PE40:
12257 case FLASH_5720VENDOR_A_ST_M45PE40:
12258 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12259 break;
12260 case FLASH_5720VENDOR_M_ST_M25PE80:
12261 case FLASH_5720VENDOR_M_ST_M45PE80:
12262 case FLASH_5720VENDOR_A_ST_M25PE80:
12263 case FLASH_5720VENDOR_A_ST_M45PE80:
12264 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12265 break;
12266 default:
12267 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12268 break;
12270 break;
12271 default:
12272 tg3_flag_set(tp, NO_NVRAM);
12273 return;
12276 tg3_nvram_get_pagesize(tp, nvcfg1);
12277 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12278 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12281 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12282 static void __devinit tg3_nvram_init(struct tg3 *tp)
12284 tw32_f(GRC_EEPROM_ADDR,
12285 (EEPROM_ADDR_FSM_RESET |
12286 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12287 EEPROM_ADDR_CLKPERD_SHIFT)));
12289 msleep(1);
12291 /* Enable seeprom accesses. */
12292 tw32_f(GRC_LOCAL_CTRL,
12293 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12294 udelay(100);
12296 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12297 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12298 tg3_flag_set(tp, NVRAM);
12300 if (tg3_nvram_lock(tp)) {
12301 netdev_warn(tp->dev,
12302 "Cannot get nvram lock, %s failed\n",
12303 __func__);
12304 return;
12306 tg3_enable_nvram_access(tp);
12308 tp->nvram_size = 0;
12310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12311 tg3_get_5752_nvram_info(tp);
12312 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12313 tg3_get_5755_nvram_info(tp);
12314 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12315 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12316 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12317 tg3_get_5787_nvram_info(tp);
12318 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12319 tg3_get_5761_nvram_info(tp);
12320 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12321 tg3_get_5906_nvram_info(tp);
12322 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12323 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12324 tg3_get_57780_nvram_info(tp);
12325 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12327 tg3_get_5717_nvram_info(tp);
12328 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12329 tg3_get_5720_nvram_info(tp);
12330 else
12331 tg3_get_nvram_info(tp);
12333 if (tp->nvram_size == 0)
12334 tg3_get_nvram_size(tp);
12336 tg3_disable_nvram_access(tp);
12337 tg3_nvram_unlock(tp);
12339 } else {
12340 tg3_flag_clear(tp, NVRAM);
12341 tg3_flag_clear(tp, NVRAM_BUFFERED);
12343 tg3_get_eeprom_size(tp);
12347 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12348 u32 offset, u32 len, u8 *buf)
12350 int i, j, rc = 0;
12351 u32 val;
12353 for (i = 0; i < len; i += 4) {
12354 u32 addr;
12355 __be32 data;
12357 addr = offset + i;
12359 memcpy(&data, buf + i, 4);
12362 * The SEEPROM interface expects the data to always be opposite
12363 * the native endian format. We accomplish this by reversing
12364 * all the operations that would have been performed on the
12365 * data from a call to tg3_nvram_read_be32().
12367 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12369 val = tr32(GRC_EEPROM_ADDR);
12370 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12372 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12373 EEPROM_ADDR_READ);
12374 tw32(GRC_EEPROM_ADDR, val |
12375 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12376 (addr & EEPROM_ADDR_ADDR_MASK) |
12377 EEPROM_ADDR_START |
12378 EEPROM_ADDR_WRITE);
12380 for (j = 0; j < 1000; j++) {
12381 val = tr32(GRC_EEPROM_ADDR);
12383 if (val & EEPROM_ADDR_COMPLETE)
12384 break;
12385 msleep(1);
12387 if (!(val & EEPROM_ADDR_COMPLETE)) {
12388 rc = -EBUSY;
12389 break;
12393 return rc;
12396 /* offset and length are dword aligned */
12397 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12398 u8 *buf)
12400 int ret = 0;
12401 u32 pagesize = tp->nvram_pagesize;
12402 u32 pagemask = pagesize - 1;
12403 u32 nvram_cmd;
12404 u8 *tmp;
12406 tmp = kmalloc(pagesize, GFP_KERNEL);
12407 if (tmp == NULL)
12408 return -ENOMEM;
12410 while (len) {
12411 int j;
12412 u32 phy_addr, page_off, size;
12414 phy_addr = offset & ~pagemask;
12416 for (j = 0; j < pagesize; j += 4) {
12417 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12418 (__be32 *) (tmp + j));
12419 if (ret)
12420 break;
12422 if (ret)
12423 break;
12425 page_off = offset & pagemask;
12426 size = pagesize;
12427 if (len < size)
12428 size = len;
12430 len -= size;
12432 memcpy(tmp + page_off, buf, size);
12434 offset = offset + (pagesize - page_off);
12436 tg3_enable_nvram_access(tp);
12439 * Before we can erase the flash page, we need
12440 * to issue a special "write enable" command.
12442 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12444 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12445 break;
12447 /* Erase the target page */
12448 tw32(NVRAM_ADDR, phy_addr);
12450 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12451 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12453 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12454 break;
12456 /* Issue another write enable to start the write. */
12457 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12459 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12460 break;
12462 for (j = 0; j < pagesize; j += 4) {
12463 __be32 data;
12465 data = *((__be32 *) (tmp + j));
12467 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12469 tw32(NVRAM_ADDR, phy_addr + j);
12471 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12472 NVRAM_CMD_WR;
12474 if (j == 0)
12475 nvram_cmd |= NVRAM_CMD_FIRST;
12476 else if (j == (pagesize - 4))
12477 nvram_cmd |= NVRAM_CMD_LAST;
12479 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12480 break;
12482 if (ret)
12483 break;
12486 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12487 tg3_nvram_exec_cmd(tp, nvram_cmd);
12489 kfree(tmp);
12491 return ret;
12494 /* offset and length are dword aligned */
12495 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12496 u8 *buf)
12498 int i, ret = 0;
12500 for (i = 0; i < len; i += 4, offset += 4) {
12501 u32 page_off, phy_addr, nvram_cmd;
12502 __be32 data;
12504 memcpy(&data, buf + i, 4);
12505 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12507 page_off = offset % tp->nvram_pagesize;
12509 phy_addr = tg3_nvram_phys_addr(tp, offset);
12511 tw32(NVRAM_ADDR, phy_addr);
12513 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12515 if (page_off == 0 || i == 0)
12516 nvram_cmd |= NVRAM_CMD_FIRST;
12517 if (page_off == (tp->nvram_pagesize - 4))
12518 nvram_cmd |= NVRAM_CMD_LAST;
12520 if (i == (len - 4))
12521 nvram_cmd |= NVRAM_CMD_LAST;
12523 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12524 !tg3_flag(tp, 5755_PLUS) &&
12525 (tp->nvram_jedecnum == JEDEC_ST) &&
12526 (nvram_cmd & NVRAM_CMD_FIRST)) {
12528 if ((ret = tg3_nvram_exec_cmd(tp,
12529 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12530 NVRAM_CMD_DONE)))
12532 break;
12534 if (!tg3_flag(tp, FLASH)) {
12535 /* We always do complete word writes to eeprom. */
12536 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12539 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12540 break;
12542 return ret;
12545 /* offset and length are dword aligned */
12546 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12548 int ret;
12550 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12551 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12552 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12553 udelay(40);
12556 if (!tg3_flag(tp, NVRAM)) {
12557 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12558 } else {
12559 u32 grc_mode;
12561 ret = tg3_nvram_lock(tp);
12562 if (ret)
12563 return ret;
12565 tg3_enable_nvram_access(tp);
12566 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12567 tw32(NVRAM_WRITE1, 0x406);
12569 grc_mode = tr32(GRC_MODE);
12570 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12572 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12573 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12574 buf);
12575 } else {
12576 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12577 buf);
12580 grc_mode = tr32(GRC_MODE);
12581 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12583 tg3_disable_nvram_access(tp);
12584 tg3_nvram_unlock(tp);
12587 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12588 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12589 udelay(40);
12592 return ret;
12595 struct subsys_tbl_ent {
12596 u16 subsys_vendor, subsys_devid;
12597 u32 phy_id;
12600 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12601 /* Broadcom boards. */
12602 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12603 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12604 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12605 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12606 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12607 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12608 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12609 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12610 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12611 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12612 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12613 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12614 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12615 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12616 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12617 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12618 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12619 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12620 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12621 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12622 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12623 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12625 /* 3com boards. */
12626 { TG3PCI_SUBVENDOR_ID_3COM,
12627 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12628 { TG3PCI_SUBVENDOR_ID_3COM,
12629 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12630 { TG3PCI_SUBVENDOR_ID_3COM,
12631 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12632 { TG3PCI_SUBVENDOR_ID_3COM,
12633 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12634 { TG3PCI_SUBVENDOR_ID_3COM,
12635 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12637 /* DELL boards. */
12638 { TG3PCI_SUBVENDOR_ID_DELL,
12639 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12640 { TG3PCI_SUBVENDOR_ID_DELL,
12641 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12642 { TG3PCI_SUBVENDOR_ID_DELL,
12643 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12644 { TG3PCI_SUBVENDOR_ID_DELL,
12645 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12647 /* Compaq boards. */
12648 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12649 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12650 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12651 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12652 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12653 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12654 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12655 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12656 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12657 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12659 /* IBM boards. */
12660 { TG3PCI_SUBVENDOR_ID_IBM,
12661 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12664 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12666 int i;
12668 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12669 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12670 tp->pdev->subsystem_vendor) &&
12671 (subsys_id_to_phy_id[i].subsys_devid ==
12672 tp->pdev->subsystem_device))
12673 return &subsys_id_to_phy_id[i];
12675 return NULL;
12678 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12680 u32 val;
12681 u16 pmcsr;
12683 /* On some early chips the SRAM cannot be accessed in D3hot state,
12684 * so need make sure we're in D0.
12686 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12687 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12688 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12689 msleep(1);
12691 /* Make sure register accesses (indirect or otherwise)
12692 * will function correctly.
12694 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12695 tp->misc_host_ctrl);
12697 /* The memory arbiter has to be enabled in order for SRAM accesses
12698 * to succeed. Normally on powerup the tg3 chip firmware will make
12699 * sure it is enabled, but other entities such as system netboot
12700 * code might disable it.
12702 val = tr32(MEMARB_MODE);
12703 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12705 tp->phy_id = TG3_PHY_ID_INVALID;
12706 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12708 /* Assume an onboard device and WOL capable by default. */
12709 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12710 tg3_flag_set(tp, WOL_CAP);
12712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12713 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12714 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12715 tg3_flag_set(tp, IS_NIC);
12717 val = tr32(VCPU_CFGSHDW);
12718 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12719 tg3_flag_set(tp, ASPM_WORKAROUND);
12720 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12721 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12722 tg3_flag_set(tp, WOL_ENABLE);
12723 device_set_wakeup_enable(&tp->pdev->dev, true);
12725 goto done;
12728 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12729 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12730 u32 nic_cfg, led_cfg;
12731 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12732 int eeprom_phy_serdes = 0;
12734 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12735 tp->nic_sram_data_cfg = nic_cfg;
12737 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12738 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12739 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12740 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12741 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12742 (ver > 0) && (ver < 0x100))
12743 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12746 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12748 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12749 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12750 eeprom_phy_serdes = 1;
12752 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12753 if (nic_phy_id != 0) {
12754 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12755 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12757 eeprom_phy_id = (id1 >> 16) << 10;
12758 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12759 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12760 } else
12761 eeprom_phy_id = 0;
12763 tp->phy_id = eeprom_phy_id;
12764 if (eeprom_phy_serdes) {
12765 if (!tg3_flag(tp, 5705_PLUS))
12766 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12767 else
12768 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12771 if (tg3_flag(tp, 5750_PLUS))
12772 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12773 SHASTA_EXT_LED_MODE_MASK);
12774 else
12775 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12777 switch (led_cfg) {
12778 default:
12779 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12780 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12781 break;
12783 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12784 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12785 break;
12787 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12788 tp->led_ctrl = LED_CTRL_MODE_MAC;
12790 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12791 * read on some older 5700/5701 bootcode.
12793 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12794 ASIC_REV_5700 ||
12795 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12796 ASIC_REV_5701)
12797 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12799 break;
12801 case SHASTA_EXT_LED_SHARED:
12802 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12803 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12804 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12805 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12806 LED_CTRL_MODE_PHY_2);
12807 break;
12809 case SHASTA_EXT_LED_MAC:
12810 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12811 break;
12813 case SHASTA_EXT_LED_COMBO:
12814 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12815 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12816 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12817 LED_CTRL_MODE_PHY_2);
12818 break;
12822 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12824 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12825 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12827 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12828 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12830 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12831 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12832 if ((tp->pdev->subsystem_vendor ==
12833 PCI_VENDOR_ID_ARIMA) &&
12834 (tp->pdev->subsystem_device == 0x205a ||
12835 tp->pdev->subsystem_device == 0x2063))
12836 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12837 } else {
12838 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12839 tg3_flag_set(tp, IS_NIC);
12842 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12843 tg3_flag_set(tp, ENABLE_ASF);
12844 if (tg3_flag(tp, 5750_PLUS))
12845 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12848 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12849 tg3_flag(tp, 5750_PLUS))
12850 tg3_flag_set(tp, ENABLE_APE);
12852 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12853 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12854 tg3_flag_clear(tp, WOL_CAP);
12856 if (tg3_flag(tp, WOL_CAP) &&
12857 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12858 tg3_flag_set(tp, WOL_ENABLE);
12859 device_set_wakeup_enable(&tp->pdev->dev, true);
12862 if (cfg2 & (1 << 17))
12863 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12865 /* serdes signal pre-emphasis in register 0x590 set by */
12866 /* bootcode if bit 18 is set */
12867 if (cfg2 & (1 << 18))
12868 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12870 if ((tg3_flag(tp, 57765_PLUS) ||
12871 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12872 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12873 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12874 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12876 if (tg3_flag(tp, PCI_EXPRESS) &&
12877 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12878 !tg3_flag(tp, 57765_PLUS)) {
12879 u32 cfg3;
12881 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12882 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12883 tg3_flag_set(tp, ASPM_WORKAROUND);
12886 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12887 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12888 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12889 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12890 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12891 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12893 done:
12894 if (tg3_flag(tp, WOL_CAP))
12895 device_set_wakeup_enable(&tp->pdev->dev,
12896 tg3_flag(tp, WOL_ENABLE));
12897 else
12898 device_set_wakeup_capable(&tp->pdev->dev, false);
12901 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12903 int i;
12904 u32 val;
12906 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12907 tw32(OTP_CTRL, cmd);
12909 /* Wait for up to 1 ms for command to execute. */
12910 for (i = 0; i < 100; i++) {
12911 val = tr32(OTP_STATUS);
12912 if (val & OTP_STATUS_CMD_DONE)
12913 break;
12914 udelay(10);
12917 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12920 /* Read the gphy configuration from the OTP region of the chip. The gphy
12921 * configuration is a 32-bit value that straddles the alignment boundary.
12922 * We do two 32-bit reads and then shift and merge the results.
12924 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12926 u32 bhalf_otp, thalf_otp;
12928 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12930 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12931 return 0;
12933 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12935 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12936 return 0;
12938 thalf_otp = tr32(OTP_READ_DATA);
12940 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12942 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12943 return 0;
12945 bhalf_otp = tr32(OTP_READ_DATA);
12947 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12950 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12952 u32 adv = ADVERTISED_Autoneg |
12953 ADVERTISED_Pause;
12955 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12956 adv |= ADVERTISED_1000baseT_Half |
12957 ADVERTISED_1000baseT_Full;
12959 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12960 adv |= ADVERTISED_100baseT_Half |
12961 ADVERTISED_100baseT_Full |
12962 ADVERTISED_10baseT_Half |
12963 ADVERTISED_10baseT_Full |
12964 ADVERTISED_TP;
12965 else
12966 adv |= ADVERTISED_FIBRE;
12968 tp->link_config.advertising = adv;
12969 tp->link_config.speed = SPEED_INVALID;
12970 tp->link_config.duplex = DUPLEX_INVALID;
12971 tp->link_config.autoneg = AUTONEG_ENABLE;
12972 tp->link_config.active_speed = SPEED_INVALID;
12973 tp->link_config.active_duplex = DUPLEX_INVALID;
12974 tp->link_config.orig_speed = SPEED_INVALID;
12975 tp->link_config.orig_duplex = DUPLEX_INVALID;
12976 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12979 static int __devinit tg3_phy_probe(struct tg3 *tp)
12981 u32 hw_phy_id_1, hw_phy_id_2;
12982 u32 hw_phy_id, hw_phy_id_masked;
12983 int err;
12985 /* flow control autonegotiation is default behavior */
12986 tg3_flag_set(tp, PAUSE_AUTONEG);
12987 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12989 if (tg3_flag(tp, USE_PHYLIB))
12990 return tg3_phy_init(tp);
12992 /* Reading the PHY ID register can conflict with ASF
12993 * firmware access to the PHY hardware.
12995 err = 0;
12996 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12997 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12998 } else {
12999 /* Now read the physical PHY_ID from the chip and verify
13000 * that it is sane. If it doesn't look good, we fall back
13001 * to either the hard-coded table based PHY_ID and failing
13002 * that the value found in the eeprom area.
13004 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13005 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13007 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13008 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13009 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13011 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13014 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13015 tp->phy_id = hw_phy_id;
13016 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13017 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13018 else
13019 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13020 } else {
13021 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13022 /* Do nothing, phy ID already set up in
13023 * tg3_get_eeprom_hw_cfg().
13025 } else {
13026 struct subsys_tbl_ent *p;
13028 /* No eeprom signature? Try the hardcoded
13029 * subsys device table.
13031 p = tg3_lookup_by_subsys(tp);
13032 if (!p)
13033 return -ENODEV;
13035 tp->phy_id = p->phy_id;
13036 if (!tp->phy_id ||
13037 tp->phy_id == TG3_PHY_ID_BCM8002)
13038 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13042 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13043 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13044 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13045 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13046 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13047 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13049 tg3_phy_init_link_config(tp);
13051 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13052 !tg3_flag(tp, ENABLE_APE) &&
13053 !tg3_flag(tp, ENABLE_ASF)) {
13054 u32 bmsr, mask;
13056 tg3_readphy(tp, MII_BMSR, &bmsr);
13057 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13058 (bmsr & BMSR_LSTATUS))
13059 goto skip_phy_reset;
13061 err = tg3_phy_reset(tp);
13062 if (err)
13063 return err;
13065 tg3_phy_set_wirespeed(tp);
13067 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13068 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13069 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13070 if (!tg3_copper_is_advertising_all(tp, mask)) {
13071 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13072 tp->link_config.flowctrl);
13074 tg3_writephy(tp, MII_BMCR,
13075 BMCR_ANENABLE | BMCR_ANRESTART);
13079 skip_phy_reset:
13080 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13081 err = tg3_init_5401phy_dsp(tp);
13082 if (err)
13083 return err;
13085 err = tg3_init_5401phy_dsp(tp);
13088 return err;
13091 static void __devinit tg3_read_vpd(struct tg3 *tp)
13093 u8 *vpd_data;
13094 unsigned int block_end, rosize, len;
13095 int j, i = 0;
13097 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13098 if (!vpd_data)
13099 goto out_no_vpd;
13101 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13102 PCI_VPD_LRDT_RO_DATA);
13103 if (i < 0)
13104 goto out_not_found;
13106 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13107 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13108 i += PCI_VPD_LRDT_TAG_SIZE;
13110 if (block_end > TG3_NVM_VPD_LEN)
13111 goto out_not_found;
13113 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13114 PCI_VPD_RO_KEYWORD_MFR_ID);
13115 if (j > 0) {
13116 len = pci_vpd_info_field_size(&vpd_data[j]);
13118 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13119 if (j + len > block_end || len != 4 ||
13120 memcmp(&vpd_data[j], "1028", 4))
13121 goto partno;
13123 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13124 PCI_VPD_RO_KEYWORD_VENDOR0);
13125 if (j < 0)
13126 goto partno;
13128 len = pci_vpd_info_field_size(&vpd_data[j]);
13130 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13131 if (j + len > block_end)
13132 goto partno;
13134 memcpy(tp->fw_ver, &vpd_data[j], len);
13135 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13138 partno:
13139 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13140 PCI_VPD_RO_KEYWORD_PARTNO);
13141 if (i < 0)
13142 goto out_not_found;
13144 len = pci_vpd_info_field_size(&vpd_data[i]);
13146 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13147 if (len > TG3_BPN_SIZE ||
13148 (len + i) > TG3_NVM_VPD_LEN)
13149 goto out_not_found;
13151 memcpy(tp->board_part_number, &vpd_data[i], len);
13153 out_not_found:
13154 kfree(vpd_data);
13155 if (tp->board_part_number[0])
13156 return;
13158 out_no_vpd:
13159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13160 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13161 strcpy(tp->board_part_number, "BCM5717");
13162 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13163 strcpy(tp->board_part_number, "BCM5718");
13164 else
13165 goto nomatch;
13166 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13167 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13168 strcpy(tp->board_part_number, "BCM57780");
13169 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13170 strcpy(tp->board_part_number, "BCM57760");
13171 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13172 strcpy(tp->board_part_number, "BCM57790");
13173 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13174 strcpy(tp->board_part_number, "BCM57788");
13175 else
13176 goto nomatch;
13177 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13178 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13179 strcpy(tp->board_part_number, "BCM57761");
13180 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13181 strcpy(tp->board_part_number, "BCM57765");
13182 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13183 strcpy(tp->board_part_number, "BCM57781");
13184 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13185 strcpy(tp->board_part_number, "BCM57785");
13186 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13187 strcpy(tp->board_part_number, "BCM57791");
13188 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13189 strcpy(tp->board_part_number, "BCM57795");
13190 else
13191 goto nomatch;
13192 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13193 strcpy(tp->board_part_number, "BCM95906");
13194 } else {
13195 nomatch:
13196 strcpy(tp->board_part_number, "none");
13200 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13202 u32 val;
13204 if (tg3_nvram_read(tp, offset, &val) ||
13205 (val & 0xfc000000) != 0x0c000000 ||
13206 tg3_nvram_read(tp, offset + 4, &val) ||
13207 val != 0)
13208 return 0;
13210 return 1;
13213 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13215 u32 val, offset, start, ver_offset;
13216 int i, dst_off;
13217 bool newver = false;
13219 if (tg3_nvram_read(tp, 0xc, &offset) ||
13220 tg3_nvram_read(tp, 0x4, &start))
13221 return;
13223 offset = tg3_nvram_logical_addr(tp, offset);
13225 if (tg3_nvram_read(tp, offset, &val))
13226 return;
13228 if ((val & 0xfc000000) == 0x0c000000) {
13229 if (tg3_nvram_read(tp, offset + 4, &val))
13230 return;
13232 if (val == 0)
13233 newver = true;
13236 dst_off = strlen(tp->fw_ver);
13238 if (newver) {
13239 if (TG3_VER_SIZE - dst_off < 16 ||
13240 tg3_nvram_read(tp, offset + 8, &ver_offset))
13241 return;
13243 offset = offset + ver_offset - start;
13244 for (i = 0; i < 16; i += 4) {
13245 __be32 v;
13246 if (tg3_nvram_read_be32(tp, offset + i, &v))
13247 return;
13249 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13251 } else {
13252 u32 major, minor;
13254 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13255 return;
13257 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13258 TG3_NVM_BCVER_MAJSFT;
13259 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13260 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13261 "v%d.%02d", major, minor);
13265 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13267 u32 val, major, minor;
13269 /* Use native endian representation */
13270 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13271 return;
13273 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13274 TG3_NVM_HWSB_CFG1_MAJSFT;
13275 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13276 TG3_NVM_HWSB_CFG1_MINSFT;
13278 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13281 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13283 u32 offset, major, minor, build;
13285 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13287 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13288 return;
13290 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13291 case TG3_EEPROM_SB_REVISION_0:
13292 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13293 break;
13294 case TG3_EEPROM_SB_REVISION_2:
13295 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13296 break;
13297 case TG3_EEPROM_SB_REVISION_3:
13298 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13299 break;
13300 case TG3_EEPROM_SB_REVISION_4:
13301 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13302 break;
13303 case TG3_EEPROM_SB_REVISION_5:
13304 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13305 break;
13306 case TG3_EEPROM_SB_REVISION_6:
13307 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13308 break;
13309 default:
13310 return;
13313 if (tg3_nvram_read(tp, offset, &val))
13314 return;
13316 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13317 TG3_EEPROM_SB_EDH_BLD_SHFT;
13318 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13319 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13320 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13322 if (minor > 99 || build > 26)
13323 return;
13325 offset = strlen(tp->fw_ver);
13326 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13327 " v%d.%02d", major, minor);
13329 if (build > 0) {
13330 offset = strlen(tp->fw_ver);
13331 if (offset < TG3_VER_SIZE - 1)
13332 tp->fw_ver[offset] = 'a' + build - 1;
13336 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13338 u32 val, offset, start;
13339 int i, vlen;
13341 for (offset = TG3_NVM_DIR_START;
13342 offset < TG3_NVM_DIR_END;
13343 offset += TG3_NVM_DIRENT_SIZE) {
13344 if (tg3_nvram_read(tp, offset, &val))
13345 return;
13347 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13348 break;
13351 if (offset == TG3_NVM_DIR_END)
13352 return;
13354 if (!tg3_flag(tp, 5705_PLUS))
13355 start = 0x08000000;
13356 else if (tg3_nvram_read(tp, offset - 4, &start))
13357 return;
13359 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13360 !tg3_fw_img_is_valid(tp, offset) ||
13361 tg3_nvram_read(tp, offset + 8, &val))
13362 return;
13364 offset += val - start;
13366 vlen = strlen(tp->fw_ver);
13368 tp->fw_ver[vlen++] = ',';
13369 tp->fw_ver[vlen++] = ' ';
13371 for (i = 0; i < 4; i++) {
13372 __be32 v;
13373 if (tg3_nvram_read_be32(tp, offset, &v))
13374 return;
13376 offset += sizeof(v);
13378 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13379 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13380 break;
13383 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13384 vlen += sizeof(v);
13388 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13390 int vlen;
13391 u32 apedata;
13392 char *fwtype;
13394 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13395 return;
13397 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13398 if (apedata != APE_SEG_SIG_MAGIC)
13399 return;
13401 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13402 if (!(apedata & APE_FW_STATUS_READY))
13403 return;
13405 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13407 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13408 tg3_flag_set(tp, APE_HAS_NCSI);
13409 fwtype = "NCSI";
13410 } else {
13411 fwtype = "DASH";
13414 vlen = strlen(tp->fw_ver);
13416 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13417 fwtype,
13418 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13419 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13420 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13421 (apedata & APE_FW_VERSION_BLDMSK));
13424 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13426 u32 val;
13427 bool vpd_vers = false;
13429 if (tp->fw_ver[0] != 0)
13430 vpd_vers = true;
13432 if (tg3_flag(tp, NO_NVRAM)) {
13433 strcat(tp->fw_ver, "sb");
13434 return;
13437 if (tg3_nvram_read(tp, 0, &val))
13438 return;
13440 if (val == TG3_EEPROM_MAGIC)
13441 tg3_read_bc_ver(tp);
13442 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13443 tg3_read_sb_ver(tp, val);
13444 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13445 tg3_read_hwsb_ver(tp);
13446 else
13447 return;
13449 if (vpd_vers)
13450 goto done;
13452 if (tg3_flag(tp, ENABLE_APE)) {
13453 if (tg3_flag(tp, ENABLE_ASF))
13454 tg3_read_dash_ver(tp);
13455 } else if (tg3_flag(tp, ENABLE_ASF)) {
13456 tg3_read_mgmtfw_ver(tp);
13459 done:
13460 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13463 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13465 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13467 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13468 return TG3_RX_RET_MAX_SIZE_5717;
13469 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13470 return TG3_RX_RET_MAX_SIZE_5700;
13471 else
13472 return TG3_RX_RET_MAX_SIZE_5705;
13475 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13476 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13477 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13478 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13479 { },
13482 static int __devinit tg3_get_invariants(struct tg3 *tp)
13484 u32 misc_ctrl_reg;
13485 u32 pci_state_reg, grc_misc_cfg;
13486 u32 val;
13487 u16 pci_cmd;
13488 int err;
13490 /* Force memory write invalidate off. If we leave it on,
13491 * then on 5700_BX chips we have to enable a workaround.
13492 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13493 * to match the cacheline size. The Broadcom driver have this
13494 * workaround but turns MWI off all the times so never uses
13495 * it. This seems to suggest that the workaround is insufficient.
13497 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13498 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13499 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13501 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13502 * has the register indirect write enable bit set before
13503 * we try to access any of the MMIO registers. It is also
13504 * critical that the PCI-X hw workaround situation is decided
13505 * before that as well.
13507 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13508 &misc_ctrl_reg);
13510 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13511 MISC_HOST_CTRL_CHIPREV_SHIFT);
13512 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13513 u32 prod_id_asic_rev;
13515 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13516 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13517 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13518 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13519 pci_read_config_dword(tp->pdev,
13520 TG3PCI_GEN2_PRODID_ASICREV,
13521 &prod_id_asic_rev);
13522 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13523 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13524 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13525 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13526 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13527 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13528 pci_read_config_dword(tp->pdev,
13529 TG3PCI_GEN15_PRODID_ASICREV,
13530 &prod_id_asic_rev);
13531 else
13532 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13533 &prod_id_asic_rev);
13535 tp->pci_chip_rev_id = prod_id_asic_rev;
13538 /* Wrong chip ID in 5752 A0. This code can be removed later
13539 * as A0 is not in production.
13541 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13542 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13544 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13545 * we need to disable memory and use config. cycles
13546 * only to access all registers. The 5702/03 chips
13547 * can mistakenly decode the special cycles from the
13548 * ICH chipsets as memory write cycles, causing corruption
13549 * of register and memory space. Only certain ICH bridges
13550 * will drive special cycles with non-zero data during the
13551 * address phase which can fall within the 5703's address
13552 * range. This is not an ICH bug as the PCI spec allows
13553 * non-zero address during special cycles. However, only
13554 * these ICH bridges are known to drive non-zero addresses
13555 * during special cycles.
13557 * Since special cycles do not cross PCI bridges, we only
13558 * enable this workaround if the 5703 is on the secondary
13559 * bus of these ICH bridges.
13561 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13562 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13563 static struct tg3_dev_id {
13564 u32 vendor;
13565 u32 device;
13566 u32 rev;
13567 } ich_chipsets[] = {
13568 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13569 PCI_ANY_ID },
13570 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13571 PCI_ANY_ID },
13572 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13573 0xa },
13574 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13575 PCI_ANY_ID },
13576 { },
13578 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13579 struct pci_dev *bridge = NULL;
13581 while (pci_id->vendor != 0) {
13582 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13583 bridge);
13584 if (!bridge) {
13585 pci_id++;
13586 continue;
13588 if (pci_id->rev != PCI_ANY_ID) {
13589 if (bridge->revision > pci_id->rev)
13590 continue;
13592 if (bridge->subordinate &&
13593 (bridge->subordinate->number ==
13594 tp->pdev->bus->number)) {
13595 tg3_flag_set(tp, ICH_WORKAROUND);
13596 pci_dev_put(bridge);
13597 break;
13602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13603 static struct tg3_dev_id {
13604 u32 vendor;
13605 u32 device;
13606 } bridge_chipsets[] = {
13607 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13608 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13609 { },
13611 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13612 struct pci_dev *bridge = NULL;
13614 while (pci_id->vendor != 0) {
13615 bridge = pci_get_device(pci_id->vendor,
13616 pci_id->device,
13617 bridge);
13618 if (!bridge) {
13619 pci_id++;
13620 continue;
13622 if (bridge->subordinate &&
13623 (bridge->subordinate->number <=
13624 tp->pdev->bus->number) &&
13625 (bridge->subordinate->subordinate >=
13626 tp->pdev->bus->number)) {
13627 tg3_flag_set(tp, 5701_DMA_BUG);
13628 pci_dev_put(bridge);
13629 break;
13634 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13635 * DMA addresses > 40-bit. This bridge may have other additional
13636 * 57xx devices behind it in some 4-port NIC designs for example.
13637 * Any tg3 device found behind the bridge will also need the 40-bit
13638 * DMA workaround.
13640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13641 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13642 tg3_flag_set(tp, 5780_CLASS);
13643 tg3_flag_set(tp, 40BIT_DMA_BUG);
13644 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13645 } else {
13646 struct pci_dev *bridge = NULL;
13648 do {
13649 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13650 PCI_DEVICE_ID_SERVERWORKS_EPB,
13651 bridge);
13652 if (bridge && bridge->subordinate &&
13653 (bridge->subordinate->number <=
13654 tp->pdev->bus->number) &&
13655 (bridge->subordinate->subordinate >=
13656 tp->pdev->bus->number)) {
13657 tg3_flag_set(tp, 40BIT_DMA_BUG);
13658 pci_dev_put(bridge);
13659 break;
13661 } while (bridge);
13664 /* Initialize misc host control in PCI block. */
13665 tp->misc_host_ctrl |= (misc_ctrl_reg &
13666 MISC_HOST_CTRL_CHIPREV);
13667 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13668 tp->misc_host_ctrl);
13670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13672 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13673 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13674 tp->pdev_peer = tg3_find_peer(tp);
13676 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13677 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13678 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13679 tg3_flag_set(tp, 5717_PLUS);
13681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13682 tg3_flag(tp, 5717_PLUS))
13683 tg3_flag_set(tp, 57765_PLUS);
13685 /* Intentionally exclude ASIC_REV_5906 */
13686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13689 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13690 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13692 tg3_flag(tp, 57765_PLUS))
13693 tg3_flag_set(tp, 5755_PLUS);
13695 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13696 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13697 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13698 tg3_flag(tp, 5755_PLUS) ||
13699 tg3_flag(tp, 5780_CLASS))
13700 tg3_flag_set(tp, 5750_PLUS);
13702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13703 tg3_flag(tp, 5750_PLUS))
13704 tg3_flag_set(tp, 5705_PLUS);
13706 /* Determine TSO capabilities */
13707 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13708 ; /* Do nothing. HW bug. */
13709 else if (tg3_flag(tp, 57765_PLUS))
13710 tg3_flag_set(tp, HW_TSO_3);
13711 else if (tg3_flag(tp, 5755_PLUS) ||
13712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13713 tg3_flag_set(tp, HW_TSO_2);
13714 else if (tg3_flag(tp, 5750_PLUS)) {
13715 tg3_flag_set(tp, HW_TSO_1);
13716 tg3_flag_set(tp, TSO_BUG);
13717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13718 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13719 tg3_flag_clear(tp, TSO_BUG);
13720 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13721 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13722 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13723 tg3_flag_set(tp, TSO_BUG);
13724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13725 tp->fw_needed = FIRMWARE_TG3TSO5;
13726 else
13727 tp->fw_needed = FIRMWARE_TG3TSO;
13730 /* Selectively allow TSO based on operating conditions */
13731 if (tg3_flag(tp, HW_TSO_1) ||
13732 tg3_flag(tp, HW_TSO_2) ||
13733 tg3_flag(tp, HW_TSO_3) ||
13734 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13735 tg3_flag_set(tp, TSO_CAPABLE);
13736 else {
13737 tg3_flag_clear(tp, TSO_CAPABLE);
13738 tg3_flag_clear(tp, TSO_BUG);
13739 tp->fw_needed = NULL;
13742 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13743 tp->fw_needed = FIRMWARE_TG3;
13745 tp->irq_max = 1;
13747 if (tg3_flag(tp, 5750_PLUS)) {
13748 tg3_flag_set(tp, SUPPORT_MSI);
13749 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13750 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13751 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13752 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13753 tp->pdev_peer == tp->pdev))
13754 tg3_flag_clear(tp, SUPPORT_MSI);
13756 if (tg3_flag(tp, 5755_PLUS) ||
13757 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13758 tg3_flag_set(tp, 1SHOT_MSI);
13761 if (tg3_flag(tp, 57765_PLUS)) {
13762 tg3_flag_set(tp, SUPPORT_MSIX);
13763 tp->irq_max = TG3_IRQ_MAX_VECS;
13767 if (tg3_flag(tp, 5755_PLUS))
13768 tg3_flag_set(tp, SHORT_DMA_BUG);
13770 if (tg3_flag(tp, 5717_PLUS))
13771 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13773 if (tg3_flag(tp, 57765_PLUS) &&
13774 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13775 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13777 if (!tg3_flag(tp, 5705_PLUS) ||
13778 tg3_flag(tp, 5780_CLASS) ||
13779 tg3_flag(tp, USE_JUMBO_BDFLAG))
13780 tg3_flag_set(tp, JUMBO_CAPABLE);
13782 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13783 &pci_state_reg);
13785 if (pci_is_pcie(tp->pdev)) {
13786 u16 lnkctl;
13788 tg3_flag_set(tp, PCI_EXPRESS);
13790 tp->pcie_readrq = 4096;
13791 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13793 tp->pcie_readrq = 2048;
13795 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13797 pci_read_config_word(tp->pdev,
13798 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13799 &lnkctl);
13800 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13801 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13802 ASIC_REV_5906) {
13803 tg3_flag_clear(tp, HW_TSO_2);
13804 tg3_flag_clear(tp, TSO_CAPABLE);
13806 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13807 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13808 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13809 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13810 tg3_flag_set(tp, CLKREQ_BUG);
13811 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13812 tg3_flag_set(tp, L1PLLPD_EN);
13814 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13815 /* BCM5785 devices are effectively PCIe devices, and should
13816 * follow PCIe codepaths, but do not have a PCIe capabilities
13817 * section.
13819 tg3_flag_set(tp, PCI_EXPRESS);
13820 } else if (!tg3_flag(tp, 5705_PLUS) ||
13821 tg3_flag(tp, 5780_CLASS)) {
13822 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13823 if (!tp->pcix_cap) {
13824 dev_err(&tp->pdev->dev,
13825 "Cannot find PCI-X capability, aborting\n");
13826 return -EIO;
13829 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13830 tg3_flag_set(tp, PCIX_MODE);
13833 /* If we have an AMD 762 or VIA K8T800 chipset, write
13834 * reordering to the mailbox registers done by the host
13835 * controller can cause major troubles. We read back from
13836 * every mailbox register write to force the writes to be
13837 * posted to the chip in order.
13839 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13840 !tg3_flag(tp, PCI_EXPRESS))
13841 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13843 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13844 &tp->pci_cacheline_sz);
13845 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13846 &tp->pci_lat_timer);
13847 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13848 tp->pci_lat_timer < 64) {
13849 tp->pci_lat_timer = 64;
13850 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13851 tp->pci_lat_timer);
13854 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13855 /* 5700 BX chips need to have their TX producer index
13856 * mailboxes written twice to workaround a bug.
13858 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13860 /* If we are in PCI-X mode, enable register write workaround.
13862 * The workaround is to use indirect register accesses
13863 * for all chip writes not to mailbox registers.
13865 if (tg3_flag(tp, PCIX_MODE)) {
13866 u32 pm_reg;
13868 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13870 /* The chip can have it's power management PCI config
13871 * space registers clobbered due to this bug.
13872 * So explicitly force the chip into D0 here.
13874 pci_read_config_dword(tp->pdev,
13875 tp->pm_cap + PCI_PM_CTRL,
13876 &pm_reg);
13877 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13878 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13879 pci_write_config_dword(tp->pdev,
13880 tp->pm_cap + PCI_PM_CTRL,
13881 pm_reg);
13883 /* Also, force SERR#/PERR# in PCI command. */
13884 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13885 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13886 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13890 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13891 tg3_flag_set(tp, PCI_HIGH_SPEED);
13892 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13893 tg3_flag_set(tp, PCI_32BIT);
13895 /* Chip-specific fixup from Broadcom driver */
13896 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13897 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13898 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13899 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13902 /* Default fast path register access methods */
13903 tp->read32 = tg3_read32;
13904 tp->write32 = tg3_write32;
13905 tp->read32_mbox = tg3_read32;
13906 tp->write32_mbox = tg3_write32;
13907 tp->write32_tx_mbox = tg3_write32;
13908 tp->write32_rx_mbox = tg3_write32;
13910 /* Various workaround register access methods */
13911 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13912 tp->write32 = tg3_write_indirect_reg32;
13913 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13914 (tg3_flag(tp, PCI_EXPRESS) &&
13915 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13917 * Back to back register writes can cause problems on these
13918 * chips, the workaround is to read back all reg writes
13919 * except those to mailbox regs.
13921 * See tg3_write_indirect_reg32().
13923 tp->write32 = tg3_write_flush_reg32;
13926 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13927 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13928 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13929 tp->write32_rx_mbox = tg3_write_flush_reg32;
13932 if (tg3_flag(tp, ICH_WORKAROUND)) {
13933 tp->read32 = tg3_read_indirect_reg32;
13934 tp->write32 = tg3_write_indirect_reg32;
13935 tp->read32_mbox = tg3_read_indirect_mbox;
13936 tp->write32_mbox = tg3_write_indirect_mbox;
13937 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13938 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13940 iounmap(tp->regs);
13941 tp->regs = NULL;
13943 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13944 pci_cmd &= ~PCI_COMMAND_MEMORY;
13945 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13948 tp->read32_mbox = tg3_read32_mbox_5906;
13949 tp->write32_mbox = tg3_write32_mbox_5906;
13950 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13951 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13954 if (tp->write32 == tg3_write_indirect_reg32 ||
13955 (tg3_flag(tp, PCIX_MODE) &&
13956 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13958 tg3_flag_set(tp, SRAM_USE_CONFIG);
13960 /* Get eeprom hw config before calling tg3_set_power_state().
13961 * In particular, the TG3_FLAG_IS_NIC flag must be
13962 * determined before calling tg3_set_power_state() so that
13963 * we know whether or not to switch out of Vaux power.
13964 * When the flag is set, it means that GPIO1 is used for eeprom
13965 * write protect and also implies that it is a LOM where GPIOs
13966 * are not used to switch power.
13968 tg3_get_eeprom_hw_cfg(tp);
13970 if (tg3_flag(tp, ENABLE_APE)) {
13971 /* Allow reads and writes to the
13972 * APE register and memory space.
13974 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13975 PCISTATE_ALLOW_APE_SHMEM_WR |
13976 PCISTATE_ALLOW_APE_PSPACE_WR;
13977 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13978 pci_state_reg);
13980 tg3_ape_lock_init(tp);
13983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13984 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13987 tg3_flag(tp, 57765_PLUS))
13988 tg3_flag_set(tp, CPMU_PRESENT);
13990 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13991 * GPIO1 driven high will bring 5700's external PHY out of reset.
13992 * It is also used as eeprom write protect on LOMs.
13994 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13996 tg3_flag(tp, EEPROM_WRITE_PROT))
13997 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13998 GRC_LCLCTRL_GPIO_OUTPUT1);
13999 /* Unused GPIO3 must be driven as output on 5752 because there
14000 * are no pull-up resistors on unused GPIO pins.
14002 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14003 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14006 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14008 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14010 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14011 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14012 /* Turn off the debug UART. */
14013 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14014 if (tg3_flag(tp, IS_NIC))
14015 /* Keep VMain power. */
14016 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14017 GRC_LCLCTRL_GPIO_OUTPUT0;
14020 /* Force the chip into D0. */
14021 err = tg3_power_up(tp);
14022 if (err) {
14023 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
14024 return err;
14027 /* Derive initial jumbo mode from MTU assigned in
14028 * ether_setup() via the alloc_etherdev() call
14030 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14031 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14033 /* Determine WakeOnLan speed to use. */
14034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14035 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14036 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14037 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14038 tg3_flag_clear(tp, WOL_SPEED_100MB);
14039 } else {
14040 tg3_flag_set(tp, WOL_SPEED_100MB);
14043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14044 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14046 /* A few boards don't want Ethernet@WireSpeed phy feature */
14047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14048 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14049 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14050 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14051 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14052 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14053 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14055 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14056 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14057 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14058 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14059 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14061 if (tg3_flag(tp, 5705_PLUS) &&
14062 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14063 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14064 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14065 !tg3_flag(tp, 57765_PLUS)) {
14066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14067 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14068 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14069 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14070 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14071 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14072 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14073 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14074 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14075 } else
14076 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14080 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14081 tp->phy_otp = tg3_read_otp_phycfg(tp);
14082 if (tp->phy_otp == 0)
14083 tp->phy_otp = TG3_OTP_DEFAULT;
14086 if (tg3_flag(tp, CPMU_PRESENT))
14087 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14088 else
14089 tp->mi_mode = MAC_MI_MODE_BASE;
14091 tp->coalesce_mode = 0;
14092 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14093 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14094 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14096 /* Set these bits to enable statistics workaround. */
14097 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14098 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14099 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14100 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14101 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14106 tg3_flag_set(tp, USE_PHYLIB);
14108 err = tg3_mdio_init(tp);
14109 if (err)
14110 return err;
14112 /* Initialize data/descriptor byte/word swapping. */
14113 val = tr32(GRC_MODE);
14114 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14115 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14116 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14117 GRC_MODE_B2HRX_ENABLE |
14118 GRC_MODE_HTX2B_ENABLE |
14119 GRC_MODE_HOST_STACKUP);
14120 else
14121 val &= GRC_MODE_HOST_STACKUP;
14123 tw32(GRC_MODE, val | tp->grc_mode);
14125 tg3_switch_clocks(tp);
14127 /* Clear this out for sanity. */
14128 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14130 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14131 &pci_state_reg);
14132 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14133 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14134 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14136 if (chiprevid == CHIPREV_ID_5701_A0 ||
14137 chiprevid == CHIPREV_ID_5701_B0 ||
14138 chiprevid == CHIPREV_ID_5701_B2 ||
14139 chiprevid == CHIPREV_ID_5701_B5) {
14140 void __iomem *sram_base;
14142 /* Write some dummy words into the SRAM status block
14143 * area, see if it reads back correctly. If the return
14144 * value is bad, force enable the PCIX workaround.
14146 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14148 writel(0x00000000, sram_base);
14149 writel(0x00000000, sram_base + 4);
14150 writel(0xffffffff, sram_base + 4);
14151 if (readl(sram_base) != 0x00000000)
14152 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14156 udelay(50);
14157 tg3_nvram_init(tp);
14159 grc_misc_cfg = tr32(GRC_MISC_CFG);
14160 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14162 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14163 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14164 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14165 tg3_flag_set(tp, IS_5788);
14167 if (!tg3_flag(tp, IS_5788) &&
14168 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14169 tg3_flag_set(tp, TAGGED_STATUS);
14170 if (tg3_flag(tp, TAGGED_STATUS)) {
14171 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14172 HOSTCC_MODE_CLRTICK_TXBD);
14174 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14175 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14176 tp->misc_host_ctrl);
14179 /* Preserve the APE MAC_MODE bits */
14180 if (tg3_flag(tp, ENABLE_APE))
14181 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14182 else
14183 tp->mac_mode = TG3_DEF_MAC_MODE;
14185 /* these are limited to 10/100 only */
14186 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14187 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14188 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14189 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14190 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14191 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14192 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14193 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14194 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14195 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14196 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14197 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14198 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14199 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14200 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14201 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14203 err = tg3_phy_probe(tp);
14204 if (err) {
14205 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14206 /* ... but do not return immediately ... */
14207 tg3_mdio_fini(tp);
14210 tg3_read_vpd(tp);
14211 tg3_read_fw_ver(tp);
14213 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14214 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14215 } else {
14216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14217 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14218 else
14219 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14222 /* 5700 {AX,BX} chips have a broken status block link
14223 * change bit implementation, so we must use the
14224 * status register in those cases.
14226 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14227 tg3_flag_set(tp, USE_LINKCHG_REG);
14228 else
14229 tg3_flag_clear(tp, USE_LINKCHG_REG);
14231 /* The led_ctrl is set during tg3_phy_probe, here we might
14232 * have to force the link status polling mechanism based
14233 * upon subsystem IDs.
14235 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14236 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14237 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14238 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14239 tg3_flag_set(tp, USE_LINKCHG_REG);
14242 /* For all SERDES we poll the MAC status register. */
14243 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14244 tg3_flag_set(tp, POLL_SERDES);
14245 else
14246 tg3_flag_clear(tp, POLL_SERDES);
14248 tp->rx_offset = NET_IP_ALIGN;
14249 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14250 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14251 tg3_flag(tp, PCIX_MODE)) {
14252 tp->rx_offset = 0;
14253 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14254 tp->rx_copy_thresh = ~(u16)0;
14255 #endif
14258 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14259 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14260 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14262 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14264 /* Increment the rx prod index on the rx std ring by at most
14265 * 8 for these chips to workaround hw errata.
14267 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14269 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14270 tp->rx_std_max_post = 8;
14272 if (tg3_flag(tp, ASPM_WORKAROUND))
14273 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14274 PCIE_PWR_MGMT_L1_THRESH_MSK;
14276 return err;
14279 #ifdef CONFIG_SPARC
14280 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14282 struct net_device *dev = tp->dev;
14283 struct pci_dev *pdev = tp->pdev;
14284 struct device_node *dp = pci_device_to_OF_node(pdev);
14285 const unsigned char *addr;
14286 int len;
14288 addr = of_get_property(dp, "local-mac-address", &len);
14289 if (addr && len == 6) {
14290 memcpy(dev->dev_addr, addr, 6);
14291 memcpy(dev->perm_addr, dev->dev_addr, 6);
14292 return 0;
14294 return -ENODEV;
14297 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14299 struct net_device *dev = tp->dev;
14301 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14302 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14303 return 0;
14305 #endif
14307 static int __devinit tg3_get_device_address(struct tg3 *tp)
14309 struct net_device *dev = tp->dev;
14310 u32 hi, lo, mac_offset;
14311 int addr_ok = 0;
14313 #ifdef CONFIG_SPARC
14314 if (!tg3_get_macaddr_sparc(tp))
14315 return 0;
14316 #endif
14318 mac_offset = 0x7c;
14319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14320 tg3_flag(tp, 5780_CLASS)) {
14321 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14322 mac_offset = 0xcc;
14323 if (tg3_nvram_lock(tp))
14324 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14325 else
14326 tg3_nvram_unlock(tp);
14327 } else if (tg3_flag(tp, 5717_PLUS)) {
14328 if (PCI_FUNC(tp->pdev->devfn) & 1)
14329 mac_offset = 0xcc;
14330 if (PCI_FUNC(tp->pdev->devfn) > 1)
14331 mac_offset += 0x18c;
14332 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14333 mac_offset = 0x10;
14335 /* First try to get it from MAC address mailbox. */
14336 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14337 if ((hi >> 16) == 0x484b) {
14338 dev->dev_addr[0] = (hi >> 8) & 0xff;
14339 dev->dev_addr[1] = (hi >> 0) & 0xff;
14341 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14342 dev->dev_addr[2] = (lo >> 24) & 0xff;
14343 dev->dev_addr[3] = (lo >> 16) & 0xff;
14344 dev->dev_addr[4] = (lo >> 8) & 0xff;
14345 dev->dev_addr[5] = (lo >> 0) & 0xff;
14347 /* Some old bootcode may report a 0 MAC address in SRAM */
14348 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14350 if (!addr_ok) {
14351 /* Next, try NVRAM. */
14352 if (!tg3_flag(tp, NO_NVRAM) &&
14353 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14354 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14355 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14356 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14358 /* Finally just fetch it out of the MAC control regs. */
14359 else {
14360 hi = tr32(MAC_ADDR_0_HIGH);
14361 lo = tr32(MAC_ADDR_0_LOW);
14363 dev->dev_addr[5] = lo & 0xff;
14364 dev->dev_addr[4] = (lo >> 8) & 0xff;
14365 dev->dev_addr[3] = (lo >> 16) & 0xff;
14366 dev->dev_addr[2] = (lo >> 24) & 0xff;
14367 dev->dev_addr[1] = hi & 0xff;
14368 dev->dev_addr[0] = (hi >> 8) & 0xff;
14372 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14373 #ifdef CONFIG_SPARC
14374 if (!tg3_get_default_macaddr_sparc(tp))
14375 return 0;
14376 #endif
14377 return -EINVAL;
14379 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14380 return 0;
14383 #define BOUNDARY_SINGLE_CACHELINE 1
14384 #define BOUNDARY_MULTI_CACHELINE 2
14386 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14388 int cacheline_size;
14389 u8 byte;
14390 int goal;
14392 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14393 if (byte == 0)
14394 cacheline_size = 1024;
14395 else
14396 cacheline_size = (int) byte * 4;
14398 /* On 5703 and later chips, the boundary bits have no
14399 * effect.
14401 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14402 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14403 !tg3_flag(tp, PCI_EXPRESS))
14404 goto out;
14406 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14407 goal = BOUNDARY_MULTI_CACHELINE;
14408 #else
14409 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14410 goal = BOUNDARY_SINGLE_CACHELINE;
14411 #else
14412 goal = 0;
14413 #endif
14414 #endif
14416 if (tg3_flag(tp, 57765_PLUS)) {
14417 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14418 goto out;
14421 if (!goal)
14422 goto out;
14424 /* PCI controllers on most RISC systems tend to disconnect
14425 * when a device tries to burst across a cache-line boundary.
14426 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14428 * Unfortunately, for PCI-E there are only limited
14429 * write-side controls for this, and thus for reads
14430 * we will still get the disconnects. We'll also waste
14431 * these PCI cycles for both read and write for chips
14432 * other than 5700 and 5701 which do not implement the
14433 * boundary bits.
14435 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14436 switch (cacheline_size) {
14437 case 16:
14438 case 32:
14439 case 64:
14440 case 128:
14441 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14442 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14443 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14444 } else {
14445 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14446 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14448 break;
14450 case 256:
14451 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14452 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14453 break;
14455 default:
14456 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14457 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14458 break;
14460 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14461 switch (cacheline_size) {
14462 case 16:
14463 case 32:
14464 case 64:
14465 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14466 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14467 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14468 break;
14470 /* fallthrough */
14471 case 128:
14472 default:
14473 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14474 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14475 break;
14477 } else {
14478 switch (cacheline_size) {
14479 case 16:
14480 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14481 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14482 DMA_RWCTRL_WRITE_BNDRY_16);
14483 break;
14485 /* fallthrough */
14486 case 32:
14487 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14488 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14489 DMA_RWCTRL_WRITE_BNDRY_32);
14490 break;
14492 /* fallthrough */
14493 case 64:
14494 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14495 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14496 DMA_RWCTRL_WRITE_BNDRY_64);
14497 break;
14499 /* fallthrough */
14500 case 128:
14501 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14502 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14503 DMA_RWCTRL_WRITE_BNDRY_128);
14504 break;
14506 /* fallthrough */
14507 case 256:
14508 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14509 DMA_RWCTRL_WRITE_BNDRY_256);
14510 break;
14511 case 512:
14512 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14513 DMA_RWCTRL_WRITE_BNDRY_512);
14514 break;
14515 case 1024:
14516 default:
14517 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14518 DMA_RWCTRL_WRITE_BNDRY_1024);
14519 break;
14523 out:
14524 return val;
14527 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14529 struct tg3_internal_buffer_desc test_desc;
14530 u32 sram_dma_descs;
14531 int i, ret;
14533 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14535 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14536 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14537 tw32(RDMAC_STATUS, 0);
14538 tw32(WDMAC_STATUS, 0);
14540 tw32(BUFMGR_MODE, 0);
14541 tw32(FTQ_RESET, 0);
14543 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14544 test_desc.addr_lo = buf_dma & 0xffffffff;
14545 test_desc.nic_mbuf = 0x00002100;
14546 test_desc.len = size;
14549 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14550 * the *second* time the tg3 driver was getting loaded after an
14551 * initial scan.
14553 * Broadcom tells me:
14554 * ...the DMA engine is connected to the GRC block and a DMA
14555 * reset may affect the GRC block in some unpredictable way...
14556 * The behavior of resets to individual blocks has not been tested.
14558 * Broadcom noted the GRC reset will also reset all sub-components.
14560 if (to_device) {
14561 test_desc.cqid_sqid = (13 << 8) | 2;
14563 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14564 udelay(40);
14565 } else {
14566 test_desc.cqid_sqid = (16 << 8) | 7;
14568 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14569 udelay(40);
14571 test_desc.flags = 0x00000005;
14573 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14574 u32 val;
14576 val = *(((u32 *)&test_desc) + i);
14577 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14578 sram_dma_descs + (i * sizeof(u32)));
14579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14581 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14583 if (to_device)
14584 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14585 else
14586 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14588 ret = -ENODEV;
14589 for (i = 0; i < 40; i++) {
14590 u32 val;
14592 if (to_device)
14593 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14594 else
14595 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14596 if ((val & 0xffff) == sram_dma_descs) {
14597 ret = 0;
14598 break;
14601 udelay(100);
14604 return ret;
14607 #define TEST_BUFFER_SIZE 0x2000
14609 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14610 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14611 { },
14614 static int __devinit tg3_test_dma(struct tg3 *tp)
14616 dma_addr_t buf_dma;
14617 u32 *buf, saved_dma_rwctrl;
14618 int ret = 0;
14620 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14621 &buf_dma, GFP_KERNEL);
14622 if (!buf) {
14623 ret = -ENOMEM;
14624 goto out_nofree;
14627 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14628 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14630 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14632 if (tg3_flag(tp, 57765_PLUS))
14633 goto out;
14635 if (tg3_flag(tp, PCI_EXPRESS)) {
14636 /* DMA read watermark not used on PCIE */
14637 tp->dma_rwctrl |= 0x00180000;
14638 } else if (!tg3_flag(tp, PCIX_MODE)) {
14639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14640 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14641 tp->dma_rwctrl |= 0x003f0000;
14642 else
14643 tp->dma_rwctrl |= 0x003f000f;
14644 } else {
14645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14646 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14647 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14648 u32 read_water = 0x7;
14650 /* If the 5704 is behind the EPB bridge, we can
14651 * do the less restrictive ONE_DMA workaround for
14652 * better performance.
14654 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14656 tp->dma_rwctrl |= 0x8000;
14657 else if (ccval == 0x6 || ccval == 0x7)
14658 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14660 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14661 read_water = 4;
14662 /* Set bit 23 to enable PCIX hw bug fix */
14663 tp->dma_rwctrl |=
14664 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14665 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14666 (1 << 23);
14667 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14668 /* 5780 always in PCIX mode */
14669 tp->dma_rwctrl |= 0x00144000;
14670 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14671 /* 5714 always in PCIX mode */
14672 tp->dma_rwctrl |= 0x00148000;
14673 } else {
14674 tp->dma_rwctrl |= 0x001b000f;
14678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14680 tp->dma_rwctrl &= 0xfffffff0;
14682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14683 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14684 /* Remove this if it causes problems for some boards. */
14685 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14687 /* On 5700/5701 chips, we need to set this bit.
14688 * Otherwise the chip will issue cacheline transactions
14689 * to streamable DMA memory with not all the byte
14690 * enables turned on. This is an error on several
14691 * RISC PCI controllers, in particular sparc64.
14693 * On 5703/5704 chips, this bit has been reassigned
14694 * a different meaning. In particular, it is used
14695 * on those chips to enable a PCI-X workaround.
14697 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14700 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14702 #if 0
14703 /* Unneeded, already done by tg3_get_invariants. */
14704 tg3_switch_clocks(tp);
14705 #endif
14707 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14708 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14709 goto out;
14711 /* It is best to perform DMA test with maximum write burst size
14712 * to expose the 5700/5701 write DMA bug.
14714 saved_dma_rwctrl = tp->dma_rwctrl;
14715 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14716 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14718 while (1) {
14719 u32 *p = buf, i;
14721 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14722 p[i] = i;
14724 /* Send the buffer to the chip. */
14725 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14726 if (ret) {
14727 dev_err(&tp->pdev->dev,
14728 "%s: Buffer write failed. err = %d\n",
14729 __func__, ret);
14730 break;
14733 #if 0
14734 /* validate data reached card RAM correctly. */
14735 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14736 u32 val;
14737 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14738 if (le32_to_cpu(val) != p[i]) {
14739 dev_err(&tp->pdev->dev,
14740 "%s: Buffer corrupted on device! "
14741 "(%d != %d)\n", __func__, val, i);
14742 /* ret = -ENODEV here? */
14744 p[i] = 0;
14746 #endif
14747 /* Now read it back. */
14748 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14749 if (ret) {
14750 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14751 "err = %d\n", __func__, ret);
14752 break;
14755 /* Verify it. */
14756 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14757 if (p[i] == i)
14758 continue;
14760 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14761 DMA_RWCTRL_WRITE_BNDRY_16) {
14762 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14763 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14764 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14765 break;
14766 } else {
14767 dev_err(&tp->pdev->dev,
14768 "%s: Buffer corrupted on read back! "
14769 "(%d != %d)\n", __func__, p[i], i);
14770 ret = -ENODEV;
14771 goto out;
14775 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14776 /* Success. */
14777 ret = 0;
14778 break;
14781 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14782 DMA_RWCTRL_WRITE_BNDRY_16) {
14783 /* DMA test passed without adjusting DMA boundary,
14784 * now look for chipsets that are known to expose the
14785 * DMA bug without failing the test.
14787 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14788 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14789 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14790 } else {
14791 /* Safe to use the calculated DMA boundary. */
14792 tp->dma_rwctrl = saved_dma_rwctrl;
14795 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14798 out:
14799 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14800 out_nofree:
14801 return ret;
14804 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14806 if (tg3_flag(tp, 57765_PLUS)) {
14807 tp->bufmgr_config.mbuf_read_dma_low_water =
14808 DEFAULT_MB_RDMA_LOW_WATER_5705;
14809 tp->bufmgr_config.mbuf_mac_rx_low_water =
14810 DEFAULT_MB_MACRX_LOW_WATER_57765;
14811 tp->bufmgr_config.mbuf_high_water =
14812 DEFAULT_MB_HIGH_WATER_57765;
14814 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14815 DEFAULT_MB_RDMA_LOW_WATER_5705;
14816 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14817 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14818 tp->bufmgr_config.mbuf_high_water_jumbo =
14819 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14820 } else if (tg3_flag(tp, 5705_PLUS)) {
14821 tp->bufmgr_config.mbuf_read_dma_low_water =
14822 DEFAULT_MB_RDMA_LOW_WATER_5705;
14823 tp->bufmgr_config.mbuf_mac_rx_low_water =
14824 DEFAULT_MB_MACRX_LOW_WATER_5705;
14825 tp->bufmgr_config.mbuf_high_water =
14826 DEFAULT_MB_HIGH_WATER_5705;
14827 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14828 tp->bufmgr_config.mbuf_mac_rx_low_water =
14829 DEFAULT_MB_MACRX_LOW_WATER_5906;
14830 tp->bufmgr_config.mbuf_high_water =
14831 DEFAULT_MB_HIGH_WATER_5906;
14834 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14835 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14836 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14837 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14838 tp->bufmgr_config.mbuf_high_water_jumbo =
14839 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14840 } else {
14841 tp->bufmgr_config.mbuf_read_dma_low_water =
14842 DEFAULT_MB_RDMA_LOW_WATER;
14843 tp->bufmgr_config.mbuf_mac_rx_low_water =
14844 DEFAULT_MB_MACRX_LOW_WATER;
14845 tp->bufmgr_config.mbuf_high_water =
14846 DEFAULT_MB_HIGH_WATER;
14848 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14849 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14850 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14851 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14852 tp->bufmgr_config.mbuf_high_water_jumbo =
14853 DEFAULT_MB_HIGH_WATER_JUMBO;
14856 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14857 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14860 static char * __devinit tg3_phy_string(struct tg3 *tp)
14862 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14863 case TG3_PHY_ID_BCM5400: return "5400";
14864 case TG3_PHY_ID_BCM5401: return "5401";
14865 case TG3_PHY_ID_BCM5411: return "5411";
14866 case TG3_PHY_ID_BCM5701: return "5701";
14867 case TG3_PHY_ID_BCM5703: return "5703";
14868 case TG3_PHY_ID_BCM5704: return "5704";
14869 case TG3_PHY_ID_BCM5705: return "5705";
14870 case TG3_PHY_ID_BCM5750: return "5750";
14871 case TG3_PHY_ID_BCM5752: return "5752";
14872 case TG3_PHY_ID_BCM5714: return "5714";
14873 case TG3_PHY_ID_BCM5780: return "5780";
14874 case TG3_PHY_ID_BCM5755: return "5755";
14875 case TG3_PHY_ID_BCM5787: return "5787";
14876 case TG3_PHY_ID_BCM5784: return "5784";
14877 case TG3_PHY_ID_BCM5756: return "5722/5756";
14878 case TG3_PHY_ID_BCM5906: return "5906";
14879 case TG3_PHY_ID_BCM5761: return "5761";
14880 case TG3_PHY_ID_BCM5718C: return "5718C";
14881 case TG3_PHY_ID_BCM5718S: return "5718S";
14882 case TG3_PHY_ID_BCM57765: return "57765";
14883 case TG3_PHY_ID_BCM5719C: return "5719C";
14884 case TG3_PHY_ID_BCM5720C: return "5720C";
14885 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14886 case 0: return "serdes";
14887 default: return "unknown";
14891 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14893 if (tg3_flag(tp, PCI_EXPRESS)) {
14894 strcpy(str, "PCI Express");
14895 return str;
14896 } else if (tg3_flag(tp, PCIX_MODE)) {
14897 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14899 strcpy(str, "PCIX:");
14901 if ((clock_ctrl == 7) ||
14902 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14903 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14904 strcat(str, "133MHz");
14905 else if (clock_ctrl == 0)
14906 strcat(str, "33MHz");
14907 else if (clock_ctrl == 2)
14908 strcat(str, "50MHz");
14909 else if (clock_ctrl == 4)
14910 strcat(str, "66MHz");
14911 else if (clock_ctrl == 6)
14912 strcat(str, "100MHz");
14913 } else {
14914 strcpy(str, "PCI:");
14915 if (tg3_flag(tp, PCI_HIGH_SPEED))
14916 strcat(str, "66MHz");
14917 else
14918 strcat(str, "33MHz");
14920 if (tg3_flag(tp, PCI_32BIT))
14921 strcat(str, ":32-bit");
14922 else
14923 strcat(str, ":64-bit");
14924 return str;
14927 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14929 struct pci_dev *peer;
14930 unsigned int func, devnr = tp->pdev->devfn & ~7;
14932 for (func = 0; func < 8; func++) {
14933 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14934 if (peer && peer != tp->pdev)
14935 break;
14936 pci_dev_put(peer);
14938 /* 5704 can be configured in single-port mode, set peer to
14939 * tp->pdev in that case.
14941 if (!peer) {
14942 peer = tp->pdev;
14943 return peer;
14947 * We don't need to keep the refcount elevated; there's no way
14948 * to remove one half of this device without removing the other
14950 pci_dev_put(peer);
14952 return peer;
14955 static void __devinit tg3_init_coal(struct tg3 *tp)
14957 struct ethtool_coalesce *ec = &tp->coal;
14959 memset(ec, 0, sizeof(*ec));
14960 ec->cmd = ETHTOOL_GCOALESCE;
14961 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14962 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14963 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14964 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14965 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14966 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14967 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14968 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14969 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14971 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14972 HOSTCC_MODE_CLRTICK_TXBD)) {
14973 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14974 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14975 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14976 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14979 if (tg3_flag(tp, 5705_PLUS)) {
14980 ec->rx_coalesce_usecs_irq = 0;
14981 ec->tx_coalesce_usecs_irq = 0;
14982 ec->stats_block_coalesce_usecs = 0;
14986 static const struct net_device_ops tg3_netdev_ops = {
14987 .ndo_open = tg3_open,
14988 .ndo_stop = tg3_close,
14989 .ndo_start_xmit = tg3_start_xmit,
14990 .ndo_get_stats64 = tg3_get_stats64,
14991 .ndo_validate_addr = eth_validate_addr,
14992 .ndo_set_multicast_list = tg3_set_rx_mode,
14993 .ndo_set_mac_address = tg3_set_mac_addr,
14994 .ndo_do_ioctl = tg3_ioctl,
14995 .ndo_tx_timeout = tg3_tx_timeout,
14996 .ndo_change_mtu = tg3_change_mtu,
14997 .ndo_fix_features = tg3_fix_features,
14998 .ndo_set_features = tg3_set_features,
14999 #ifdef CONFIG_NET_POLL_CONTROLLER
15000 .ndo_poll_controller = tg3_poll_controller,
15001 #endif
15004 static int __devinit tg3_init_one(struct pci_dev *pdev,
15005 const struct pci_device_id *ent)
15007 struct net_device *dev;
15008 struct tg3 *tp;
15009 int i, err, pm_cap;
15010 u32 sndmbx, rcvmbx, intmbx;
15011 char str[40];
15012 u64 dma_mask, persist_dma_mask;
15013 u32 features = 0;
15015 printk_once(KERN_INFO "%s\n", version);
15017 err = pci_enable_device(pdev);
15018 if (err) {
15019 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15020 return err;
15023 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15024 if (err) {
15025 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15026 goto err_out_disable_pdev;
15029 pci_set_master(pdev);
15031 /* Find power-management capability. */
15032 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15033 if (pm_cap == 0) {
15034 dev_err(&pdev->dev,
15035 "Cannot find Power Management capability, aborting\n");
15036 err = -EIO;
15037 goto err_out_free_res;
15040 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15041 if (!dev) {
15042 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15043 err = -ENOMEM;
15044 goto err_out_free_res;
15047 SET_NETDEV_DEV(dev, &pdev->dev);
15049 tp = netdev_priv(dev);
15050 tp->pdev = pdev;
15051 tp->dev = dev;
15052 tp->pm_cap = pm_cap;
15053 tp->rx_mode = TG3_DEF_RX_MODE;
15054 tp->tx_mode = TG3_DEF_TX_MODE;
15056 if (tg3_debug > 0)
15057 tp->msg_enable = tg3_debug;
15058 else
15059 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15061 /* The word/byte swap controls here control register access byte
15062 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15063 * setting below.
15065 tp->misc_host_ctrl =
15066 MISC_HOST_CTRL_MASK_PCI_INT |
15067 MISC_HOST_CTRL_WORD_SWAP |
15068 MISC_HOST_CTRL_INDIR_ACCESS |
15069 MISC_HOST_CTRL_PCISTATE_RW;
15071 /* The NONFRM (non-frame) byte/word swap controls take effect
15072 * on descriptor entries, anything which isn't packet data.
15074 * The StrongARM chips on the board (one for tx, one for rx)
15075 * are running in big-endian mode.
15077 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15078 GRC_MODE_WSWAP_NONFRM_DATA);
15079 #ifdef __BIG_ENDIAN
15080 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15081 #endif
15082 spin_lock_init(&tp->lock);
15083 spin_lock_init(&tp->indirect_lock);
15084 INIT_WORK(&tp->reset_task, tg3_reset_task);
15086 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15087 if (!tp->regs) {
15088 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15089 err = -ENOMEM;
15090 goto err_out_free_dev;
15093 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15094 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15095 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15096 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15098 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15099 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15100 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15101 tg3_flag_set(tp, ENABLE_APE);
15102 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15103 if (!tp->aperegs) {
15104 dev_err(&pdev->dev,
15105 "Cannot map APE registers, aborting\n");
15106 err = -ENOMEM;
15107 goto err_out_iounmap;
15111 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15112 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15114 dev->ethtool_ops = &tg3_ethtool_ops;
15115 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15116 dev->netdev_ops = &tg3_netdev_ops;
15117 dev->irq = pdev->irq;
15119 err = tg3_get_invariants(tp);
15120 if (err) {
15121 dev_err(&pdev->dev,
15122 "Problem fetching invariants of chip, aborting\n");
15123 goto err_out_apeunmap;
15126 /* The EPB bridge inside 5714, 5715, and 5780 and any
15127 * device behind the EPB cannot support DMA addresses > 40-bit.
15128 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15129 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15130 * do DMA address check in tg3_start_xmit().
15132 if (tg3_flag(tp, IS_5788))
15133 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15134 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15135 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15136 #ifdef CONFIG_HIGHMEM
15137 dma_mask = DMA_BIT_MASK(64);
15138 #endif
15139 } else
15140 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15142 /* Configure DMA attributes. */
15143 if (dma_mask > DMA_BIT_MASK(32)) {
15144 err = pci_set_dma_mask(pdev, dma_mask);
15145 if (!err) {
15146 features |= NETIF_F_HIGHDMA;
15147 err = pci_set_consistent_dma_mask(pdev,
15148 persist_dma_mask);
15149 if (err < 0) {
15150 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15151 "DMA for consistent allocations\n");
15152 goto err_out_apeunmap;
15156 if (err || dma_mask == DMA_BIT_MASK(32)) {
15157 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15158 if (err) {
15159 dev_err(&pdev->dev,
15160 "No usable DMA configuration, aborting\n");
15161 goto err_out_apeunmap;
15165 tg3_init_bufmgr_config(tp);
15167 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15169 /* 5700 B0 chips do not support checksumming correctly due
15170 * to hardware bugs.
15172 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15173 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15175 if (tg3_flag(tp, 5755_PLUS))
15176 features |= NETIF_F_IPV6_CSUM;
15179 /* TSO is on by default on chips that support hardware TSO.
15180 * Firmware TSO on older chips gives lower performance, so it
15181 * is off by default, but can be enabled using ethtool.
15183 if ((tg3_flag(tp, HW_TSO_1) ||
15184 tg3_flag(tp, HW_TSO_2) ||
15185 tg3_flag(tp, HW_TSO_3)) &&
15186 (features & NETIF_F_IP_CSUM))
15187 features |= NETIF_F_TSO;
15188 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15189 if (features & NETIF_F_IPV6_CSUM)
15190 features |= NETIF_F_TSO6;
15191 if (tg3_flag(tp, HW_TSO_3) ||
15192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15193 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15194 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15195 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15196 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15197 features |= NETIF_F_TSO_ECN;
15200 dev->features |= features;
15201 dev->vlan_features |= features;
15204 * Add loopback capability only for a subset of devices that support
15205 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15206 * loopback for the remaining devices.
15208 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15209 !tg3_flag(tp, CPMU_PRESENT))
15210 /* Add the loopback capability */
15211 features |= NETIF_F_LOOPBACK;
15213 dev->hw_features |= features;
15215 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15216 !tg3_flag(tp, TSO_CAPABLE) &&
15217 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15218 tg3_flag_set(tp, MAX_RXPEND_64);
15219 tp->rx_pending = 63;
15222 err = tg3_get_device_address(tp);
15223 if (err) {
15224 dev_err(&pdev->dev,
15225 "Could not obtain valid ethernet address, aborting\n");
15226 goto err_out_apeunmap;
15230 * Reset chip in case UNDI or EFI driver did not shutdown
15231 * DMA self test will enable WDMAC and we'll see (spurious)
15232 * pending DMA on the PCI bus at that point.
15234 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15235 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15236 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15237 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15240 err = tg3_test_dma(tp);
15241 if (err) {
15242 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15243 goto err_out_apeunmap;
15246 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15247 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15248 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15249 for (i = 0; i < tp->irq_max; i++) {
15250 struct tg3_napi *tnapi = &tp->napi[i];
15252 tnapi->tp = tp;
15253 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15255 tnapi->int_mbox = intmbx;
15256 if (i < 4)
15257 intmbx += 0x8;
15258 else
15259 intmbx += 0x4;
15261 tnapi->consmbox = rcvmbx;
15262 tnapi->prodmbox = sndmbx;
15264 if (i)
15265 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15266 else
15267 tnapi->coal_now = HOSTCC_MODE_NOW;
15269 if (!tg3_flag(tp, SUPPORT_MSIX))
15270 break;
15273 * If we support MSIX, we'll be using RSS. If we're using
15274 * RSS, the first vector only handles link interrupts and the
15275 * remaining vectors handle rx and tx interrupts. Reuse the
15276 * mailbox values for the next iteration. The values we setup
15277 * above are still useful for the single vectored mode.
15279 if (!i)
15280 continue;
15282 rcvmbx += 0x8;
15284 if (sndmbx & 0x4)
15285 sndmbx -= 0x4;
15286 else
15287 sndmbx += 0xc;
15290 tg3_init_coal(tp);
15292 pci_set_drvdata(pdev, dev);
15294 err = register_netdev(dev);
15295 if (err) {
15296 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15297 goto err_out_apeunmap;
15300 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15301 tp->board_part_number,
15302 tp->pci_chip_rev_id,
15303 tg3_bus_string(tp, str),
15304 dev->dev_addr);
15306 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15307 struct phy_device *phydev;
15308 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15309 netdev_info(dev,
15310 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15311 phydev->drv->name, dev_name(&phydev->dev));
15312 } else {
15313 char *ethtype;
15315 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15316 ethtype = "10/100Base-TX";
15317 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15318 ethtype = "1000Base-SX";
15319 else
15320 ethtype = "10/100/1000Base-T";
15322 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15323 "(WireSpeed[%d], EEE[%d])\n",
15324 tg3_phy_string(tp), ethtype,
15325 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15326 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15329 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15330 (dev->features & NETIF_F_RXCSUM) != 0,
15331 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15332 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15333 tg3_flag(tp, ENABLE_ASF) != 0,
15334 tg3_flag(tp, TSO_CAPABLE) != 0);
15335 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15336 tp->dma_rwctrl,
15337 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15338 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15340 pci_save_state(pdev);
15342 return 0;
15344 err_out_apeunmap:
15345 if (tp->aperegs) {
15346 iounmap(tp->aperegs);
15347 tp->aperegs = NULL;
15350 err_out_iounmap:
15351 if (tp->regs) {
15352 iounmap(tp->regs);
15353 tp->regs = NULL;
15356 err_out_free_dev:
15357 free_netdev(dev);
15359 err_out_free_res:
15360 pci_release_regions(pdev);
15362 err_out_disable_pdev:
15363 pci_disable_device(pdev);
15364 pci_set_drvdata(pdev, NULL);
15365 return err;
15368 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15370 struct net_device *dev = pci_get_drvdata(pdev);
15372 if (dev) {
15373 struct tg3 *tp = netdev_priv(dev);
15375 if (tp->fw)
15376 release_firmware(tp->fw);
15378 cancel_work_sync(&tp->reset_task);
15380 if (!tg3_flag(tp, USE_PHYLIB)) {
15381 tg3_phy_fini(tp);
15382 tg3_mdio_fini(tp);
15385 unregister_netdev(dev);
15386 if (tp->aperegs) {
15387 iounmap(tp->aperegs);
15388 tp->aperegs = NULL;
15390 if (tp->regs) {
15391 iounmap(tp->regs);
15392 tp->regs = NULL;
15394 free_netdev(dev);
15395 pci_release_regions(pdev);
15396 pci_disable_device(pdev);
15397 pci_set_drvdata(pdev, NULL);
15401 #ifdef CONFIG_PM_SLEEP
15402 static int tg3_suspend(struct device *device)
15404 struct pci_dev *pdev = to_pci_dev(device);
15405 struct net_device *dev = pci_get_drvdata(pdev);
15406 struct tg3 *tp = netdev_priv(dev);
15407 int err;
15409 if (!netif_running(dev))
15410 return 0;
15412 flush_work_sync(&tp->reset_task);
15413 tg3_phy_stop(tp);
15414 tg3_netif_stop(tp);
15416 del_timer_sync(&tp->timer);
15418 tg3_full_lock(tp, 1);
15419 tg3_disable_ints(tp);
15420 tg3_full_unlock(tp);
15422 netif_device_detach(dev);
15424 tg3_full_lock(tp, 0);
15425 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15426 tg3_flag_clear(tp, INIT_COMPLETE);
15427 tg3_full_unlock(tp);
15429 err = tg3_power_down_prepare(tp);
15430 if (err) {
15431 int err2;
15433 tg3_full_lock(tp, 0);
15435 tg3_flag_set(tp, INIT_COMPLETE);
15436 err2 = tg3_restart_hw(tp, 1);
15437 if (err2)
15438 goto out;
15440 tp->timer.expires = jiffies + tp->timer_offset;
15441 add_timer(&tp->timer);
15443 netif_device_attach(dev);
15444 tg3_netif_start(tp);
15446 out:
15447 tg3_full_unlock(tp);
15449 if (!err2)
15450 tg3_phy_start(tp);
15453 return err;
15456 static int tg3_resume(struct device *device)
15458 struct pci_dev *pdev = to_pci_dev(device);
15459 struct net_device *dev = pci_get_drvdata(pdev);
15460 struct tg3 *tp = netdev_priv(dev);
15461 int err;
15463 if (!netif_running(dev))
15464 return 0;
15466 netif_device_attach(dev);
15468 tg3_full_lock(tp, 0);
15470 tg3_flag_set(tp, INIT_COMPLETE);
15471 err = tg3_restart_hw(tp, 1);
15472 if (err)
15473 goto out;
15475 tp->timer.expires = jiffies + tp->timer_offset;
15476 add_timer(&tp->timer);
15478 tg3_netif_start(tp);
15480 out:
15481 tg3_full_unlock(tp);
15483 if (!err)
15484 tg3_phy_start(tp);
15486 return err;
15489 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15490 #define TG3_PM_OPS (&tg3_pm_ops)
15492 #else
15494 #define TG3_PM_OPS NULL
15496 #endif /* CONFIG_PM_SLEEP */
15499 * tg3_io_error_detected - called when PCI error is detected
15500 * @pdev: Pointer to PCI device
15501 * @state: The current pci connection state
15503 * This function is called after a PCI bus error affecting
15504 * this device has been detected.
15506 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15507 pci_channel_state_t state)
15509 struct net_device *netdev = pci_get_drvdata(pdev);
15510 struct tg3 *tp = netdev_priv(netdev);
15511 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15513 netdev_info(netdev, "PCI I/O error detected\n");
15515 rtnl_lock();
15517 if (!netif_running(netdev))
15518 goto done;
15520 tg3_phy_stop(tp);
15522 tg3_netif_stop(tp);
15524 del_timer_sync(&tp->timer);
15525 tg3_flag_clear(tp, RESTART_TIMER);
15527 /* Want to make sure that the reset task doesn't run */
15528 cancel_work_sync(&tp->reset_task);
15529 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15530 tg3_flag_clear(tp, RESTART_TIMER);
15532 netif_device_detach(netdev);
15534 /* Clean up software state, even if MMIO is blocked */
15535 tg3_full_lock(tp, 0);
15536 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15537 tg3_full_unlock(tp);
15539 done:
15540 if (state == pci_channel_io_perm_failure)
15541 err = PCI_ERS_RESULT_DISCONNECT;
15542 else
15543 pci_disable_device(pdev);
15545 rtnl_unlock();
15547 return err;
15551 * tg3_io_slot_reset - called after the pci bus has been reset.
15552 * @pdev: Pointer to PCI device
15554 * Restart the card from scratch, as if from a cold-boot.
15555 * At this point, the card has exprienced a hard reset,
15556 * followed by fixups by BIOS, and has its config space
15557 * set up identically to what it was at cold boot.
15559 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15561 struct net_device *netdev = pci_get_drvdata(pdev);
15562 struct tg3 *tp = netdev_priv(netdev);
15563 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15564 int err;
15566 rtnl_lock();
15568 if (pci_enable_device(pdev)) {
15569 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15570 goto done;
15573 pci_set_master(pdev);
15574 pci_restore_state(pdev);
15575 pci_save_state(pdev);
15577 if (!netif_running(netdev)) {
15578 rc = PCI_ERS_RESULT_RECOVERED;
15579 goto done;
15582 err = tg3_power_up(tp);
15583 if (err) {
15584 netdev_err(netdev, "Failed to restore register access.\n");
15585 goto done;
15588 rc = PCI_ERS_RESULT_RECOVERED;
15590 done:
15591 rtnl_unlock();
15593 return rc;
15597 * tg3_io_resume - called when traffic can start flowing again.
15598 * @pdev: Pointer to PCI device
15600 * This callback is called when the error recovery driver tells
15601 * us that its OK to resume normal operation.
15603 static void tg3_io_resume(struct pci_dev *pdev)
15605 struct net_device *netdev = pci_get_drvdata(pdev);
15606 struct tg3 *tp = netdev_priv(netdev);
15607 int err;
15609 rtnl_lock();
15611 if (!netif_running(netdev))
15612 goto done;
15614 tg3_full_lock(tp, 0);
15615 tg3_flag_set(tp, INIT_COMPLETE);
15616 err = tg3_restart_hw(tp, 1);
15617 tg3_full_unlock(tp);
15618 if (err) {
15619 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15620 goto done;
15623 netif_device_attach(netdev);
15625 tp->timer.expires = jiffies + tp->timer_offset;
15626 add_timer(&tp->timer);
15628 tg3_netif_start(tp);
15630 tg3_phy_start(tp);
15632 done:
15633 rtnl_unlock();
15636 static struct pci_error_handlers tg3_err_handler = {
15637 .error_detected = tg3_io_error_detected,
15638 .slot_reset = tg3_io_slot_reset,
15639 .resume = tg3_io_resume
15642 static struct pci_driver tg3_driver = {
15643 .name = DRV_MODULE_NAME,
15644 .id_table = tg3_pci_tbl,
15645 .probe = tg3_init_one,
15646 .remove = __devexit_p(tg3_remove_one),
15647 .err_handler = &tg3_err_handler,
15648 .driver.pm = TG3_PM_OPS,
15651 static int __init tg3_init(void)
15653 return pci_register_driver(&tg3_driver);
15656 static void __exit tg3_cleanup(void)
15658 pci_unregister_driver(&tg3_driver);
15661 module_init(tg3_init);
15662 module_exit(tg3_cleanup);