tg3: Consolidate ASIC rev detection code
[linux-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
blob27cf74c747924f7d094d33d110635c3c083ed038
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
61 #define BAR_0 0
62 #define BAR_2 2
64 #include "tg3.h"
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 set_bit(flag, bits);
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 122
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "December 7, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
104 (NETIF_MSG_DRV | \
105 NETIF_MSG_PROBE | \
106 NETIF_MSG_LINK | \
107 NETIF_MSG_TIMER | \
108 NETIF_MSG_IFDOWN | \
109 NETIF_MSG_IFUP | \
110 NETIF_MSG_RX_ERR | \
111 NETIF_MSG_TX_ERR)
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
139 /* Do not place this n-ring entries value into the tp struct itself,
140 * we really want to expose these constants to GCC so that modulo et
141 * al. operations are done with shifts and masks instead of with
142 * hw multiply/modulo instructions. Another solution would be to
143 * replace things like '% foo' with '& (foo - 1)'.
146 #define TG3_TX_RING_SIZE 512
147 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
149 #define TG3_RX_STD_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
156 TG3_TX_RING_SIZE)
157 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159 #define TG3_DMA_BYTE_ENAB 64
161 #define TG3_RX_STD_DMA_SZ 1536
162 #define TG3_RX_JMB_DMA_SZ 9046
164 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
166 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176 * that are at least dword aligned when used in PCIX mode. The driver
177 * works around this bug by double copying the packet. This workaround
178 * is built into the normal double copy length check for efficiency.
180 * However, the double copy is only necessary on those architectures
181 * where unaligned memory accesses are inefficient. For those architectures
182 * where unaligned memory accesses incur little penalty, we can reintegrate
183 * the 5701 in the normal rx path. Doing so saves a device structure
184 * dereference by hardcoding the double copy threshold in place.
186 #define TG3_RX_COPY_THRESHOLD 256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
189 #else
190 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
191 #endif
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
197 #endif
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K 2048
202 #define TG3_TX_BD_DMA_MAX_4K 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314 static const struct {
315 const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317 { "rx_octets" },
318 { "rx_fragments" },
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
322 { "rx_fcs_errors" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
329 { "rx_jabbers" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
344 { "tx_octets" },
345 { "tx_collisions" },
347 { "tx_xon_sent" },
348 { "tx_xoff_sent" },
349 { "tx_flow_control" },
350 { "tx_mac_errors" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
353 { "tx_deferred" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
374 { "tx_discards" },
375 { "tx_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
379 { "rxbds_empty" },
380 { "rx_discards" },
381 { "rx_errors" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
390 { "nic_irqs" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 writel(val, tp->regs + off);
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 return readl(tp->regs + off);
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 writel(val, tp->aperegs + off);
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 return readl(tp->aperegs + off);
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
438 unsigned long flags;
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 writel(val, tp->regs + off);
449 readl(tp->regs + off);
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
454 unsigned long flags;
455 u32 val;
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
461 return val;
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
466 unsigned long flags;
468 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 TG3_64BIT_REG_LOW, val);
471 return;
473 if (off == TG3_RX_STD_PROD_IDX_REG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 TG3_64BIT_REG_LOW, val);
476 return;
479 spin_lock_irqsave(&tp->indirect_lock, flags);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488 (val == 0x1)) {
489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
496 unsigned long flags;
497 u32 val;
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 return val;
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 /* Non-posted methods */
515 tp->write32(tp, off, val);
516 else {
517 /* Posted method */
518 tg3_write32(tp, off, val);
519 if (usec_wait)
520 udelay(usec_wait);
521 tp->read32(tp, off);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
526 if (usec_wait)
527 udelay(usec_wait);
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 tp->write32_mbox(tp, off, val);
533 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 tp->read32_mbox(tp, off);
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 void __iomem *mbox = tp->regs + off;
540 writel(val, mbox);
541 if (tg3_flag(tp, TXD_MBOX_HWBUG))
542 writel(val, mbox);
543 if (tg3_flag(tp, MBOX_WRITE_REORDER))
544 readl(mbox);
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 return readl(tp->regs + off + GRCMBOX_BASE);
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 writel(val, tp->regs + off + GRCMBOX_BASE);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
570 unsigned long flags;
572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574 return;
576 spin_lock_irqsave(&tp->indirect_lock, flags);
577 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583 } else {
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590 spin_unlock_irqrestore(&tp->indirect_lock, flags);
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
595 unsigned long flags;
597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599 *val = 0;
600 return;
603 spin_lock_irqsave(&tp->indirect_lock, flags);
604 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610 } else {
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 *val = tr32(TG3PCI_MEM_WIN_DATA);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617 spin_unlock_irqrestore(&tp->indirect_lock, flags);
620 static void tg3_ape_lock_init(struct tg3 *tp)
622 int i;
623 u32 regbase, bit;
625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 regbase = TG3_APE_LOCK_GRANT;
627 else
628 regbase = TG3_APE_PER_LOCK_GRANT;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632 switch (i) {
633 case TG3_APE_LOCK_PHY0:
634 case TG3_APE_LOCK_PHY1:
635 case TG3_APE_LOCK_PHY2:
636 case TG3_APE_LOCK_PHY3:
637 bit = APE_LOCK_GRANT_DRIVER;
638 break;
639 default:
640 if (!tp->pci_fn)
641 bit = APE_LOCK_GRANT_DRIVER;
642 else
643 bit = 1 << tp->pci_fn;
645 tg3_ape_write32(tp, regbase + 4 * i, bit);
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
652 int i, off;
653 int ret = 0;
654 u32 status, req, gnt, bit;
656 if (!tg3_flag(tp, ENABLE_APE))
657 return 0;
659 switch (locknum) {
660 case TG3_APE_LOCK_GPIO:
661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662 return 0;
663 case TG3_APE_LOCK_GRC:
664 case TG3_APE_LOCK_MEM:
665 if (!tp->pci_fn)
666 bit = APE_LOCK_REQ_DRIVER;
667 else
668 bit = 1 << tp->pci_fn;
669 break;
670 default:
671 return -EINVAL;
674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 req = TG3_APE_LOCK_REQ;
676 gnt = TG3_APE_LOCK_GRANT;
677 } else {
678 req = TG3_APE_PER_LOCK_REQ;
679 gnt = TG3_APE_PER_LOCK_GRANT;
682 off = 4 * locknum;
684 tg3_ape_write32(tp, req + off, bit);
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i = 0; i < 100; i++) {
688 status = tg3_ape_read32(tp, gnt + off);
689 if (status == bit)
690 break;
691 udelay(10);
694 if (status != bit) {
695 /* Revoke the lock request. */
696 tg3_ape_write32(tp, gnt + off, bit);
697 ret = -EBUSY;
700 return ret;
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
705 u32 gnt, bit;
707 if (!tg3_flag(tp, ENABLE_APE))
708 return;
710 switch (locknum) {
711 case TG3_APE_LOCK_GPIO:
712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713 return;
714 case TG3_APE_LOCK_GRC:
715 case TG3_APE_LOCK_MEM:
716 if (!tp->pci_fn)
717 bit = APE_LOCK_GRANT_DRIVER;
718 else
719 bit = 1 << tp->pci_fn;
720 break;
721 default:
722 return;
725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 gnt = TG3_APE_LOCK_GRANT;
727 else
728 gnt = TG3_APE_PER_LOCK_GRANT;
730 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
735 int i;
736 u32 apedata;
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp, APE_HAS_NCSI))
740 return;
742 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 if (apedata != APE_SEG_SIG_MAGIC)
744 return;
746 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 if (!(apedata & APE_FW_STATUS_READY))
748 return;
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i = 0; i < 10; i++) {
752 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753 return;
755 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 event | APE_EVENT_STATUS_EVENT_PENDING);
761 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764 break;
766 udelay(100);
769 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
775 u32 event;
776 u32 apedata;
778 if (!tg3_flag(tp, ENABLE_APE))
779 return;
781 switch (kind) {
782 case RESET_KIND_INIT:
783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 APE_HOST_SEG_SIG_MAGIC);
785 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 APE_HOST_SEG_LEN_MAGIC);
787 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 APE_HOST_BEHAV_NO_PHYLOCK);
793 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 TG3_APE_HOST_DRVR_STATE_START);
796 event = APE_EVENT_STATUS_STATE_START;
797 break;
798 case RESET_KIND_SHUTDOWN:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
804 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
806 if (device_may_wakeup(&tp->pdev->dev) &&
807 tg3_flag(tp, WOL_ENABLE)) {
808 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 TG3_APE_HOST_WOL_SPEED_AUTO);
810 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811 } else
812 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
814 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
816 event = APE_EVENT_STATUS_STATE_UNLOAD;
817 break;
818 case RESET_KIND_SUSPEND:
819 event = APE_EVENT_STATUS_STATE_SUSPEND;
820 break;
821 default:
822 return;
825 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
827 tg3_ape_send_event(tp, event);
830 static void tg3_disable_ints(struct tg3 *tp)
832 int i;
834 tw32(TG3PCI_MISC_HOST_CTRL,
835 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836 for (i = 0; i < tp->irq_max; i++)
837 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
840 static void tg3_enable_ints(struct tg3 *tp)
842 int i;
844 tp->irq_sync = 0;
845 wmb();
847 tw32(TG3PCI_MISC_HOST_CTRL,
848 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
850 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851 for (i = 0; i < tp->irq_cnt; i++) {
852 struct tg3_napi *tnapi = &tp->napi[i];
854 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855 if (tg3_flag(tp, 1SHOT_MSI))
856 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
858 tp->coal_now |= tnapi->coal_now;
861 /* Force an initial interrupt */
862 if (!tg3_flag(tp, TAGGED_STATUS) &&
863 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865 else
866 tw32(HOSTCC_MODE, tp->coal_now);
868 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
873 struct tg3 *tp = tnapi->tp;
874 struct tg3_hw_status *sblk = tnapi->hw_status;
875 unsigned int work_exists = 0;
877 /* check for phy events */
878 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879 if (sblk->status & SD_STATUS_LINK_CHG)
880 work_exists = 1;
882 /* check for RX/TX work to do */
883 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885 work_exists = 1;
887 return work_exists;
890 /* tg3_int_reenable
891 * similar to tg3_enable_ints, but it accurately determines whether there
892 * is new work pending and can return without flushing the PIO write
893 * which reenables interrupts
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
897 struct tg3 *tp = tnapi->tp;
899 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900 mmiowb();
902 /* When doing tagged status, this work check is unnecessary.
903 * The last_tag we write above tells the chip which piece of
904 * work we've completed.
906 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907 tw32(HOSTCC_MODE, tp->coalesce_mode |
908 HOSTCC_MODE_ENABLE | tnapi->coal_now);
911 static void tg3_switch_clocks(struct tg3 *tp)
913 u32 clock_ctrl;
914 u32 orig_clock_ctrl;
916 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917 return;
919 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
921 orig_clock_ctrl = clock_ctrl;
922 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923 CLOCK_CTRL_CLKRUN_OENABLE |
924 0x1f);
925 tp->pci_clock_ctrl = clock_ctrl;
927 if (tg3_flag(tp, 5705_PLUS)) {
928 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929 tw32_wait_f(TG3PCI_CLOCK_CTRL,
930 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
932 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934 clock_ctrl |
935 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936 40);
937 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938 clock_ctrl | (CLOCK_CTRL_ALTCLK),
939 40);
941 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
944 #define PHY_BUSY_LOOPS 5000
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
948 u32 frame_val;
949 unsigned int loops;
950 int ret;
952 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953 tw32_f(MAC_MI_MODE,
954 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955 udelay(80);
958 *val = 0x0;
960 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961 MI_COM_PHY_ADDR_MASK);
962 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963 MI_COM_REG_ADDR_MASK);
964 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
966 tw32_f(MAC_MI_COM, frame_val);
968 loops = PHY_BUSY_LOOPS;
969 while (loops != 0) {
970 udelay(10);
971 frame_val = tr32(MAC_MI_COM);
973 if ((frame_val & MI_COM_BUSY) == 0) {
974 udelay(5);
975 frame_val = tr32(MAC_MI_COM);
976 break;
978 loops -= 1;
981 ret = -EBUSY;
982 if (loops != 0) {
983 *val = frame_val & MI_COM_DATA_MASK;
984 ret = 0;
987 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988 tw32_f(MAC_MI_MODE, tp->mi_mode);
989 udelay(80);
992 return ret;
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
997 u32 frame_val;
998 unsigned int loops;
999 int ret;
1001 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003 return 0;
1005 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006 tw32_f(MAC_MI_MODE,
1007 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008 udelay(80);
1011 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012 MI_COM_PHY_ADDR_MASK);
1013 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014 MI_COM_REG_ADDR_MASK);
1015 frame_val |= (val & MI_COM_DATA_MASK);
1016 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1018 tw32_f(MAC_MI_COM, frame_val);
1020 loops = PHY_BUSY_LOOPS;
1021 while (loops != 0) {
1022 udelay(10);
1023 frame_val = tr32(MAC_MI_COM);
1024 if ((frame_val & MI_COM_BUSY) == 0) {
1025 udelay(5);
1026 frame_val = tr32(MAC_MI_COM);
1027 break;
1029 loops -= 1;
1032 ret = -EBUSY;
1033 if (loops != 0)
1034 ret = 0;
1036 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038 udelay(80);
1041 return ret;
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1046 int err;
1048 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049 if (err)
1050 goto done;
1052 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053 if (err)
1054 goto done;
1056 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058 if (err)
1059 goto done;
1061 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1063 done:
1064 return err;
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1069 int err;
1071 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072 if (err)
1073 goto done;
1075 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076 if (err)
1077 goto done;
1079 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081 if (err)
1082 goto done;
1084 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1086 done:
1087 return err;
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1092 int err;
1094 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095 if (!err)
1096 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1098 return err;
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1103 int err;
1105 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106 if (!err)
1107 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1109 return err;
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1114 int err;
1116 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118 MII_TG3_AUXCTL_SHDWSEL_MISC);
1119 if (!err)
1120 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1122 return err;
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1127 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128 set |= MII_TG3_AUXCTL_MISC_WREN;
1130 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136 MII_TG3_AUXCTL_ACTL_TX_6DB)
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 MII_TG3_AUXCTL_ACTL_TX_6DB);
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1144 u32 phy_control;
1145 int limit, err;
1147 /* OK, reset it, and poll the BMCR_RESET bit until it
1148 * clears or we time out.
1150 phy_control = BMCR_RESET;
1151 err = tg3_writephy(tp, MII_BMCR, phy_control);
1152 if (err != 0)
1153 return -EBUSY;
1155 limit = 5000;
1156 while (limit--) {
1157 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158 if (err != 0)
1159 return -EBUSY;
1161 if ((phy_control & BMCR_RESET) == 0) {
1162 udelay(40);
1163 break;
1165 udelay(10);
1167 if (limit < 0)
1168 return -EBUSY;
1170 return 0;
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1175 struct tg3 *tp = bp->priv;
1176 u32 val;
1178 spin_lock_bh(&tp->lock);
1180 if (tg3_readphy(tp, reg, &val))
1181 val = -EIO;
1183 spin_unlock_bh(&tp->lock);
1185 return val;
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1190 struct tg3 *tp = bp->priv;
1191 u32 ret = 0;
1193 spin_lock_bh(&tp->lock);
1195 if (tg3_writephy(tp, reg, val))
1196 ret = -EIO;
1198 spin_unlock_bh(&tp->lock);
1200 return ret;
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1205 return 0;
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1210 u32 val;
1211 struct phy_device *phydev;
1213 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215 case PHY_ID_BCM50610:
1216 case PHY_ID_BCM50610M:
1217 val = MAC_PHYCFG2_50610_LED_MODES;
1218 break;
1219 case PHY_ID_BCMAC131:
1220 val = MAC_PHYCFG2_AC131_LED_MODES;
1221 break;
1222 case PHY_ID_RTL8211C:
1223 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224 break;
1225 case PHY_ID_RTL8201E:
1226 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227 break;
1228 default:
1229 return;
1232 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233 tw32(MAC_PHYCFG2, val);
1235 val = tr32(MAC_PHYCFG1);
1236 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239 tw32(MAC_PHYCFG1, val);
1241 return;
1244 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246 MAC_PHYCFG2_FMODE_MASK_MASK |
1247 MAC_PHYCFG2_GMODE_MASK_MASK |
1248 MAC_PHYCFG2_ACT_MASK_MASK |
1249 MAC_PHYCFG2_QUAL_MASK_MASK |
1250 MAC_PHYCFG2_INBAND_ENABLE;
1252 tw32(MAC_PHYCFG2, val);
1254 val = tr32(MAC_PHYCFG1);
1255 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1263 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265 tw32(MAC_PHYCFG1, val);
1267 val = tr32(MAC_EXT_RGMII_MODE);
1268 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269 MAC_RGMII_MODE_RX_QUALITY |
1270 MAC_RGMII_MODE_RX_ACTIVITY |
1271 MAC_RGMII_MODE_RX_ENG_DET |
1272 MAC_RGMII_MODE_TX_ENABLE |
1273 MAC_RGMII_MODE_TX_LOWPWR |
1274 MAC_RGMII_MODE_TX_RESET);
1275 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277 val |= MAC_RGMII_MODE_RX_INT_B |
1278 MAC_RGMII_MODE_RX_QUALITY |
1279 MAC_RGMII_MODE_RX_ACTIVITY |
1280 MAC_RGMII_MODE_RX_ENG_DET;
1281 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282 val |= MAC_RGMII_MODE_TX_ENABLE |
1283 MAC_RGMII_MODE_TX_LOWPWR |
1284 MAC_RGMII_MODE_TX_RESET;
1286 tw32(MAC_EXT_RGMII_MODE, val);
1289 static void tg3_mdio_start(struct tg3 *tp)
1291 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292 tw32_f(MAC_MI_MODE, tp->mi_mode);
1293 udelay(80);
1295 if (tg3_flag(tp, MDIOBUS_INITED) &&
1296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297 tg3_mdio_config_5785(tp);
1300 static int tg3_mdio_init(struct tg3 *tp)
1302 int i;
1303 u32 reg;
1304 struct phy_device *phydev;
1306 if (tg3_flag(tp, 5717_PLUS)) {
1307 u32 is_serdes;
1309 tp->phy_addr = tp->pci_fn + 1;
1311 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313 else
1314 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315 TG3_CPMU_PHY_STRAP_IS_SERDES;
1316 if (is_serdes)
1317 tp->phy_addr += 7;
1318 } else
1319 tp->phy_addr = TG3_PHY_MII_ADDR;
1321 tg3_mdio_start(tp);
1323 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324 return 0;
1326 tp->mdio_bus = mdiobus_alloc();
1327 if (tp->mdio_bus == NULL)
1328 return -ENOMEM;
1330 tp->mdio_bus->name = "tg3 mdio bus";
1331 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333 tp->mdio_bus->priv = tp;
1334 tp->mdio_bus->parent = &tp->pdev->dev;
1335 tp->mdio_bus->read = &tg3_mdio_read;
1336 tp->mdio_bus->write = &tg3_mdio_write;
1337 tp->mdio_bus->reset = &tg3_mdio_reset;
1338 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339 tp->mdio_bus->irq = &tp->mdio_irq[0];
1341 for (i = 0; i < PHY_MAX_ADDR; i++)
1342 tp->mdio_bus->irq[i] = PHY_POLL;
1344 /* The bus registration will look for all the PHYs on the mdio bus.
1345 * Unfortunately, it does not ensure the PHY is powered up before
1346 * accessing the PHY ID registers. A chip reset is the
1347 * quickest way to bring the device back to an operational state..
1349 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350 tg3_bmcr_reset(tp);
1352 i = mdiobus_register(tp->mdio_bus);
1353 if (i) {
1354 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355 mdiobus_free(tp->mdio_bus);
1356 return i;
1359 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1361 if (!phydev || !phydev->drv) {
1362 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363 mdiobus_unregister(tp->mdio_bus);
1364 mdiobus_free(tp->mdio_bus);
1365 return -ENODEV;
1368 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369 case PHY_ID_BCM57780:
1370 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372 break;
1373 case PHY_ID_BCM50610:
1374 case PHY_ID_BCM50610M:
1375 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376 PHY_BRCM_RX_REFCLK_UNUSED |
1377 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385 /* fallthru */
1386 case PHY_ID_RTL8211C:
1387 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388 break;
1389 case PHY_ID_RTL8201E:
1390 case PHY_ID_BCMAC131:
1391 phydev->interface = PHY_INTERFACE_MODE_MII;
1392 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394 break;
1397 tg3_flag_set(tp, MDIOBUS_INITED);
1399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400 tg3_mdio_config_5785(tp);
1402 return 0;
1405 static void tg3_mdio_fini(struct tg3 *tp)
1407 if (tg3_flag(tp, MDIOBUS_INITED)) {
1408 tg3_flag_clear(tp, MDIOBUS_INITED);
1409 mdiobus_unregister(tp->mdio_bus);
1410 mdiobus_free(tp->mdio_bus);
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1417 u32 val;
1419 val = tr32(GRC_RX_CPU_EVENT);
1420 val |= GRC_RX_CPU_DRIVER_EVENT;
1421 tw32_f(GRC_RX_CPU_EVENT, val);
1423 tp->last_event_jiffies = jiffies;
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1431 int i;
1432 unsigned int delay_cnt;
1433 long time_remain;
1435 /* If enough time has passed, no wait is necessary. */
1436 time_remain = (long)(tp->last_event_jiffies + 1 +
1437 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438 (long)jiffies;
1439 if (time_remain < 0)
1440 return;
1442 /* Check if we can shorten the wait time. */
1443 delay_cnt = jiffies_to_usecs(time_remain);
1444 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446 delay_cnt = (delay_cnt >> 3) + 1;
1448 for (i = 0; i < delay_cnt; i++) {
1449 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450 break;
1451 udelay(8);
1455 /* tp->lock is held. */
1456 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1458 u32 reg, val;
1460 val = 0;
1461 if (!tg3_readphy(tp, MII_BMCR, &reg))
1462 val = reg << 16;
1463 if (!tg3_readphy(tp, MII_BMSR, &reg))
1464 val |= (reg & 0xffff);
1465 *data++ = val;
1467 val = 0;
1468 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1469 val = reg << 16;
1470 if (!tg3_readphy(tp, MII_LPA, &reg))
1471 val |= (reg & 0xffff);
1472 *data++ = val;
1474 val = 0;
1475 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1476 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1477 val = reg << 16;
1478 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1479 val |= (reg & 0xffff);
1481 *data++ = val;
1483 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1484 val = reg << 16;
1485 else
1486 val = 0;
1487 *data++ = val;
1490 /* tp->lock is held. */
1491 static void tg3_ump_link_report(struct tg3 *tp)
1493 u32 data[4];
1495 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1496 return;
1498 tg3_phy_gather_ump_data(tp, data);
1500 tg3_wait_for_event_ack(tp);
1502 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1503 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1504 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1505 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1506 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1507 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1509 tg3_generate_fw_event(tp);
1512 /* tp->lock is held. */
1513 static void tg3_stop_fw(struct tg3 *tp)
1515 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1516 /* Wait for RX cpu to ACK the previous event. */
1517 tg3_wait_for_event_ack(tp);
1519 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1521 tg3_generate_fw_event(tp);
1523 /* Wait for RX cpu to ACK this event. */
1524 tg3_wait_for_event_ack(tp);
1528 /* tp->lock is held. */
1529 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1531 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1532 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1534 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1535 switch (kind) {
1536 case RESET_KIND_INIT:
1537 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538 DRV_STATE_START);
1539 break;
1541 case RESET_KIND_SHUTDOWN:
1542 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1543 DRV_STATE_UNLOAD);
1544 break;
1546 case RESET_KIND_SUSPEND:
1547 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1548 DRV_STATE_SUSPEND);
1549 break;
1551 default:
1552 break;
1556 if (kind == RESET_KIND_INIT ||
1557 kind == RESET_KIND_SUSPEND)
1558 tg3_ape_driver_state_change(tp, kind);
1561 /* tp->lock is held. */
1562 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1564 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1565 switch (kind) {
1566 case RESET_KIND_INIT:
1567 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1568 DRV_STATE_START_DONE);
1569 break;
1571 case RESET_KIND_SHUTDOWN:
1572 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573 DRV_STATE_UNLOAD_DONE);
1574 break;
1576 default:
1577 break;
1581 if (kind == RESET_KIND_SHUTDOWN)
1582 tg3_ape_driver_state_change(tp, kind);
1585 /* tp->lock is held. */
1586 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1588 if (tg3_flag(tp, ENABLE_ASF)) {
1589 switch (kind) {
1590 case RESET_KIND_INIT:
1591 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592 DRV_STATE_START);
1593 break;
1595 case RESET_KIND_SHUTDOWN:
1596 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1597 DRV_STATE_UNLOAD);
1598 break;
1600 case RESET_KIND_SUSPEND:
1601 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1602 DRV_STATE_SUSPEND);
1603 break;
1605 default:
1606 break;
1611 static int tg3_poll_fw(struct tg3 *tp)
1613 int i;
1614 u32 val;
1616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1617 /* Wait up to 20ms for init done. */
1618 for (i = 0; i < 200; i++) {
1619 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1620 return 0;
1621 udelay(100);
1623 return -ENODEV;
1626 /* Wait for firmware initialization to complete. */
1627 for (i = 0; i < 100000; i++) {
1628 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1629 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1630 break;
1631 udelay(10);
1634 /* Chip might not be fitted with firmware. Some Sun onboard
1635 * parts are configured like that. So don't signal the timeout
1636 * of the above loop as an error, but do report the lack of
1637 * running firmware once.
1639 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1640 tg3_flag_set(tp, NO_FWARE_REPORTED);
1642 netdev_info(tp->dev, "No firmware running\n");
1645 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1646 /* The 57765 A0 needs a little more
1647 * time to do some important work.
1649 mdelay(10);
1652 return 0;
1655 static void tg3_link_report(struct tg3 *tp)
1657 if (!netif_carrier_ok(tp->dev)) {
1658 netif_info(tp, link, tp->dev, "Link is down\n");
1659 tg3_ump_link_report(tp);
1660 } else if (netif_msg_link(tp)) {
1661 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1662 (tp->link_config.active_speed == SPEED_1000 ?
1663 1000 :
1664 (tp->link_config.active_speed == SPEED_100 ?
1665 100 : 10)),
1666 (tp->link_config.active_duplex == DUPLEX_FULL ?
1667 "full" : "half"));
1669 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1670 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1671 "on" : "off",
1672 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1673 "on" : "off");
1675 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1676 netdev_info(tp->dev, "EEE is %s\n",
1677 tp->setlpicnt ? "enabled" : "disabled");
1679 tg3_ump_link_report(tp);
1683 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1685 u16 miireg;
1687 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1688 miireg = ADVERTISE_1000XPAUSE;
1689 else if (flow_ctrl & FLOW_CTRL_TX)
1690 miireg = ADVERTISE_1000XPSE_ASYM;
1691 else if (flow_ctrl & FLOW_CTRL_RX)
1692 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1693 else
1694 miireg = 0;
1696 return miireg;
1699 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1701 u8 cap = 0;
1703 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1704 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1705 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1706 if (lcladv & ADVERTISE_1000XPAUSE)
1707 cap = FLOW_CTRL_RX;
1708 if (rmtadv & ADVERTISE_1000XPAUSE)
1709 cap = FLOW_CTRL_TX;
1712 return cap;
1715 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1717 u8 autoneg;
1718 u8 flowctrl = 0;
1719 u32 old_rx_mode = tp->rx_mode;
1720 u32 old_tx_mode = tp->tx_mode;
1722 if (tg3_flag(tp, USE_PHYLIB))
1723 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1724 else
1725 autoneg = tp->link_config.autoneg;
1727 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1728 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1729 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1730 else
1731 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1732 } else
1733 flowctrl = tp->link_config.flowctrl;
1735 tp->link_config.active_flowctrl = flowctrl;
1737 if (flowctrl & FLOW_CTRL_RX)
1738 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1739 else
1740 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1742 if (old_rx_mode != tp->rx_mode)
1743 tw32_f(MAC_RX_MODE, tp->rx_mode);
1745 if (flowctrl & FLOW_CTRL_TX)
1746 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1747 else
1748 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1750 if (old_tx_mode != tp->tx_mode)
1751 tw32_f(MAC_TX_MODE, tp->tx_mode);
1754 static void tg3_adjust_link(struct net_device *dev)
1756 u8 oldflowctrl, linkmesg = 0;
1757 u32 mac_mode, lcl_adv, rmt_adv;
1758 struct tg3 *tp = netdev_priv(dev);
1759 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1761 spin_lock_bh(&tp->lock);
1763 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1764 MAC_MODE_HALF_DUPLEX);
1766 oldflowctrl = tp->link_config.active_flowctrl;
1768 if (phydev->link) {
1769 lcl_adv = 0;
1770 rmt_adv = 0;
1772 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1773 mac_mode |= MAC_MODE_PORT_MODE_MII;
1774 else if (phydev->speed == SPEED_1000 ||
1775 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1776 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1777 else
1778 mac_mode |= MAC_MODE_PORT_MODE_MII;
1780 if (phydev->duplex == DUPLEX_HALF)
1781 mac_mode |= MAC_MODE_HALF_DUPLEX;
1782 else {
1783 lcl_adv = mii_advertise_flowctrl(
1784 tp->link_config.flowctrl);
1786 if (phydev->pause)
1787 rmt_adv = LPA_PAUSE_CAP;
1788 if (phydev->asym_pause)
1789 rmt_adv |= LPA_PAUSE_ASYM;
1792 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1793 } else
1794 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1796 if (mac_mode != tp->mac_mode) {
1797 tp->mac_mode = mac_mode;
1798 tw32_f(MAC_MODE, tp->mac_mode);
1799 udelay(40);
1802 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1803 if (phydev->speed == SPEED_10)
1804 tw32(MAC_MI_STAT,
1805 MAC_MI_STAT_10MBPS_MODE |
1806 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1807 else
1808 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1811 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1812 tw32(MAC_TX_LENGTHS,
1813 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1814 (6 << TX_LENGTHS_IPG_SHIFT) |
1815 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1816 else
1817 tw32(MAC_TX_LENGTHS,
1818 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819 (6 << TX_LENGTHS_IPG_SHIFT) |
1820 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1822 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1823 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1824 phydev->speed != tp->link_config.active_speed ||
1825 phydev->duplex != tp->link_config.active_duplex ||
1826 oldflowctrl != tp->link_config.active_flowctrl)
1827 linkmesg = 1;
1829 tp->link_config.active_speed = phydev->speed;
1830 tp->link_config.active_duplex = phydev->duplex;
1832 spin_unlock_bh(&tp->lock);
1834 if (linkmesg)
1835 tg3_link_report(tp);
1838 static int tg3_phy_init(struct tg3 *tp)
1840 struct phy_device *phydev;
1842 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1843 return 0;
1845 /* Bring the PHY back to a known state. */
1846 tg3_bmcr_reset(tp);
1848 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1850 /* Attach the MAC to the PHY. */
1851 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1852 phydev->dev_flags, phydev->interface);
1853 if (IS_ERR(phydev)) {
1854 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1855 return PTR_ERR(phydev);
1858 /* Mask with MAC supported features. */
1859 switch (phydev->interface) {
1860 case PHY_INTERFACE_MODE_GMII:
1861 case PHY_INTERFACE_MODE_RGMII:
1862 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1863 phydev->supported &= (PHY_GBIT_FEATURES |
1864 SUPPORTED_Pause |
1865 SUPPORTED_Asym_Pause);
1866 break;
1868 /* fallthru */
1869 case PHY_INTERFACE_MODE_MII:
1870 phydev->supported &= (PHY_BASIC_FEATURES |
1871 SUPPORTED_Pause |
1872 SUPPORTED_Asym_Pause);
1873 break;
1874 default:
1875 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1876 return -EINVAL;
1879 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1881 phydev->advertising = phydev->supported;
1883 return 0;
1886 static void tg3_phy_start(struct tg3 *tp)
1888 struct phy_device *phydev;
1890 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1891 return;
1893 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1895 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1896 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1897 phydev->speed = tp->link_config.orig_speed;
1898 phydev->duplex = tp->link_config.orig_duplex;
1899 phydev->autoneg = tp->link_config.orig_autoneg;
1900 phydev->advertising = tp->link_config.orig_advertising;
1903 phy_start(phydev);
1905 phy_start_aneg(phydev);
1908 static void tg3_phy_stop(struct tg3 *tp)
1910 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1911 return;
1913 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1916 static void tg3_phy_fini(struct tg3 *tp)
1918 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1919 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1920 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1924 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1926 int err;
1927 u32 val;
1929 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1930 return 0;
1932 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1933 /* Cannot do read-modify-write on 5401 */
1934 err = tg3_phy_auxctl_write(tp,
1935 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1936 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1937 0x4c20);
1938 goto done;
1941 err = tg3_phy_auxctl_read(tp,
1942 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1943 if (err)
1944 return err;
1946 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1947 err = tg3_phy_auxctl_write(tp,
1948 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1950 done:
1951 return err;
1954 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1956 u32 phytest;
1958 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1959 u32 phy;
1961 tg3_writephy(tp, MII_TG3_FET_TEST,
1962 phytest | MII_TG3_FET_SHADOW_EN);
1963 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1964 if (enable)
1965 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1966 else
1967 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1968 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1970 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1974 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1976 u32 reg;
1978 if (!tg3_flag(tp, 5705_PLUS) ||
1979 (tg3_flag(tp, 5717_PLUS) &&
1980 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1981 return;
1983 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1984 tg3_phy_fet_toggle_apd(tp, enable);
1985 return;
1988 reg = MII_TG3_MISC_SHDW_WREN |
1989 MII_TG3_MISC_SHDW_SCR5_SEL |
1990 MII_TG3_MISC_SHDW_SCR5_LPED |
1991 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1992 MII_TG3_MISC_SHDW_SCR5_SDTL |
1993 MII_TG3_MISC_SHDW_SCR5_C125OE;
1994 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1995 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1997 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2000 reg = MII_TG3_MISC_SHDW_WREN |
2001 MII_TG3_MISC_SHDW_APD_SEL |
2002 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2003 if (enable)
2004 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2006 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2009 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2011 u32 phy;
2013 if (!tg3_flag(tp, 5705_PLUS) ||
2014 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2015 return;
2017 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2018 u32 ephy;
2020 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2021 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2023 tg3_writephy(tp, MII_TG3_FET_TEST,
2024 ephy | MII_TG3_FET_SHADOW_EN);
2025 if (!tg3_readphy(tp, reg, &phy)) {
2026 if (enable)
2027 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2028 else
2029 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2030 tg3_writephy(tp, reg, phy);
2032 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2034 } else {
2035 int ret;
2037 ret = tg3_phy_auxctl_read(tp,
2038 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2039 if (!ret) {
2040 if (enable)
2041 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2042 else
2043 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2044 tg3_phy_auxctl_write(tp,
2045 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2050 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2052 int ret;
2053 u32 val;
2055 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2056 return;
2058 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2059 if (!ret)
2060 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2061 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2064 static void tg3_phy_apply_otp(struct tg3 *tp)
2066 u32 otp, phy;
2068 if (!tp->phy_otp)
2069 return;
2071 otp = tp->phy_otp;
2073 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2074 return;
2076 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2077 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2078 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2080 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2081 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2082 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2084 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2085 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2086 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2088 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2089 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2091 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2092 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2094 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2095 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2096 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2098 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2101 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2103 u32 val;
2105 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2106 return;
2108 tp->setlpicnt = 0;
2110 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2111 current_link_up == 1 &&
2112 tp->link_config.active_duplex == DUPLEX_FULL &&
2113 (tp->link_config.active_speed == SPEED_100 ||
2114 tp->link_config.active_speed == SPEED_1000)) {
2115 u32 eeectl;
2117 if (tp->link_config.active_speed == SPEED_1000)
2118 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2119 else
2120 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2122 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2124 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2125 TG3_CL45_D7_EEERES_STAT, &val);
2127 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2128 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2129 tp->setlpicnt = 2;
2132 if (!tp->setlpicnt) {
2133 if (current_link_up == 1 &&
2134 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2135 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2136 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2139 val = tr32(TG3_CPMU_EEE_MODE);
2140 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2144 static void tg3_phy_eee_enable(struct tg3 *tp)
2146 u32 val;
2148 if (tp->link_config.active_speed == SPEED_1000 &&
2149 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2150 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2151 tg3_flag(tp, 57765_CLASS)) &&
2152 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2153 val = MII_TG3_DSP_TAP26_ALNOKO |
2154 MII_TG3_DSP_TAP26_RMRXSTO;
2155 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2156 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2159 val = tr32(TG3_CPMU_EEE_MODE);
2160 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2163 static int tg3_wait_macro_done(struct tg3 *tp)
2165 int limit = 100;
2167 while (limit--) {
2168 u32 tmp32;
2170 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2171 if ((tmp32 & 0x1000) == 0)
2172 break;
2175 if (limit < 0)
2176 return -EBUSY;
2178 return 0;
2181 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2183 static const u32 test_pat[4][6] = {
2184 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2185 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2186 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2187 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2189 int chan;
2191 for (chan = 0; chan < 4; chan++) {
2192 int i;
2194 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2195 (chan * 0x2000) | 0x0200);
2196 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2198 for (i = 0; i < 6; i++)
2199 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2200 test_pat[chan][i]);
2202 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2203 if (tg3_wait_macro_done(tp)) {
2204 *resetp = 1;
2205 return -EBUSY;
2208 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2209 (chan * 0x2000) | 0x0200);
2210 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2211 if (tg3_wait_macro_done(tp)) {
2212 *resetp = 1;
2213 return -EBUSY;
2216 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2217 if (tg3_wait_macro_done(tp)) {
2218 *resetp = 1;
2219 return -EBUSY;
2222 for (i = 0; i < 6; i += 2) {
2223 u32 low, high;
2225 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2226 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2227 tg3_wait_macro_done(tp)) {
2228 *resetp = 1;
2229 return -EBUSY;
2231 low &= 0x7fff;
2232 high &= 0x000f;
2233 if (low != test_pat[chan][i] ||
2234 high != test_pat[chan][i+1]) {
2235 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2236 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2237 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2239 return -EBUSY;
2244 return 0;
2247 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2249 int chan;
2251 for (chan = 0; chan < 4; chan++) {
2252 int i;
2254 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2255 (chan * 0x2000) | 0x0200);
2256 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2257 for (i = 0; i < 6; i++)
2258 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2259 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2260 if (tg3_wait_macro_done(tp))
2261 return -EBUSY;
2264 return 0;
2267 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2269 u32 reg32, phy9_orig;
2270 int retries, do_phy_reset, err;
2272 retries = 10;
2273 do_phy_reset = 1;
2274 do {
2275 if (do_phy_reset) {
2276 err = tg3_bmcr_reset(tp);
2277 if (err)
2278 return err;
2279 do_phy_reset = 0;
2282 /* Disable transmitter and interrupt. */
2283 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2284 continue;
2286 reg32 |= 0x3000;
2287 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2289 /* Set full-duplex, 1000 mbps. */
2290 tg3_writephy(tp, MII_BMCR,
2291 BMCR_FULLDPLX | BMCR_SPEED1000);
2293 /* Set to master mode. */
2294 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2295 continue;
2297 tg3_writephy(tp, MII_CTRL1000,
2298 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2300 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2301 if (err)
2302 return err;
2304 /* Block the PHY control access. */
2305 tg3_phydsp_write(tp, 0x8005, 0x0800);
2307 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2308 if (!err)
2309 break;
2310 } while (--retries);
2312 err = tg3_phy_reset_chanpat(tp);
2313 if (err)
2314 return err;
2316 tg3_phydsp_write(tp, 0x8005, 0x0000);
2318 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2319 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2321 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2323 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2325 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2326 reg32 &= ~0x3000;
2327 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2328 } else if (!err)
2329 err = -EBUSY;
2331 return err;
2334 /* This will reset the tigon3 PHY if there is no valid
2335 * link unless the FORCE argument is non-zero.
2337 static int tg3_phy_reset(struct tg3 *tp)
2339 u32 val, cpmuctrl;
2340 int err;
2342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2343 val = tr32(GRC_MISC_CFG);
2344 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2345 udelay(40);
2347 err = tg3_readphy(tp, MII_BMSR, &val);
2348 err |= tg3_readphy(tp, MII_BMSR, &val);
2349 if (err != 0)
2350 return -EBUSY;
2352 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2353 netif_carrier_off(tp->dev);
2354 tg3_link_report(tp);
2357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2360 err = tg3_phy_reset_5703_4_5(tp);
2361 if (err)
2362 return err;
2363 goto out;
2366 cpmuctrl = 0;
2367 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2368 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2369 cpmuctrl = tr32(TG3_CPMU_CTRL);
2370 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2371 tw32(TG3_CPMU_CTRL,
2372 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2375 err = tg3_bmcr_reset(tp);
2376 if (err)
2377 return err;
2379 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2380 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2381 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2383 tw32(TG3_CPMU_CTRL, cpmuctrl);
2386 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2387 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2388 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2389 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2390 CPMU_LSPD_1000MB_MACCLK_12_5) {
2391 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2392 udelay(40);
2393 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2397 if (tg3_flag(tp, 5717_PLUS) &&
2398 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2399 return 0;
2401 tg3_phy_apply_otp(tp);
2403 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2404 tg3_phy_toggle_apd(tp, true);
2405 else
2406 tg3_phy_toggle_apd(tp, false);
2408 out:
2409 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2410 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2411 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2412 tg3_phydsp_write(tp, 0x000a, 0x0323);
2413 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2416 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2417 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2418 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2421 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2422 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423 tg3_phydsp_write(tp, 0x000a, 0x310b);
2424 tg3_phydsp_write(tp, 0x201f, 0x9506);
2425 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2426 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2428 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2429 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2430 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2431 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2432 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2433 tg3_writephy(tp, MII_TG3_TEST1,
2434 MII_TG3_TEST1_TRIM_EN | 0x4);
2435 } else
2436 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2438 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2442 /* Set Extended packet length bit (bit 14) on all chips that */
2443 /* support jumbo frames */
2444 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2445 /* Cannot do read-modify-write on 5401 */
2446 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2447 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2448 /* Set bit 14 with read-modify-write to preserve other bits */
2449 err = tg3_phy_auxctl_read(tp,
2450 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2451 if (!err)
2452 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2453 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2456 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2457 * jumbo frames transmission.
2459 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2460 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2461 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2462 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2466 /* adjust output voltage */
2467 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2470 tg3_phy_toggle_automdix(tp, 1);
2471 tg3_phy_set_wirespeed(tp);
2472 return 0;
2475 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2476 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2477 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2478 TG3_GPIO_MSG_NEED_VAUX)
2479 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2480 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2481 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2482 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2483 (TG3_GPIO_MSG_DRVR_PRES << 12))
2485 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2486 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2487 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2488 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2489 (TG3_GPIO_MSG_NEED_VAUX << 12))
2491 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2493 u32 status, shift;
2495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2498 else
2499 status = tr32(TG3_CPMU_DRV_STATUS);
2501 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2502 status &= ~(TG3_GPIO_MSG_MASK << shift);
2503 status |= (newstat << shift);
2505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2508 else
2509 tw32(TG3_CPMU_DRV_STATUS, status);
2511 return status >> TG3_APE_GPIO_MSG_SHIFT;
2514 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2516 if (!tg3_flag(tp, IS_NIC))
2517 return 0;
2519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2522 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2523 return -EIO;
2525 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2527 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2528 TG3_GRC_LCLCTL_PWRSW_DELAY);
2530 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2531 } else {
2532 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533 TG3_GRC_LCLCTL_PWRSW_DELAY);
2536 return 0;
2539 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2541 u32 grc_local_ctrl;
2543 if (!tg3_flag(tp, IS_NIC) ||
2544 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2546 return;
2548 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2550 tw32_wait_f(GRC_LOCAL_CTRL,
2551 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2552 TG3_GRC_LCLCTL_PWRSW_DELAY);
2554 tw32_wait_f(GRC_LOCAL_CTRL,
2555 grc_local_ctrl,
2556 TG3_GRC_LCLCTL_PWRSW_DELAY);
2558 tw32_wait_f(GRC_LOCAL_CTRL,
2559 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2560 TG3_GRC_LCLCTL_PWRSW_DELAY);
2563 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2565 if (!tg3_flag(tp, IS_NIC))
2566 return;
2568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2570 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2571 (GRC_LCLCTRL_GPIO_OE0 |
2572 GRC_LCLCTRL_GPIO_OE1 |
2573 GRC_LCLCTRL_GPIO_OE2 |
2574 GRC_LCLCTRL_GPIO_OUTPUT0 |
2575 GRC_LCLCTRL_GPIO_OUTPUT1),
2576 TG3_GRC_LCLCTL_PWRSW_DELAY);
2577 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2578 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2579 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2580 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2581 GRC_LCLCTRL_GPIO_OE1 |
2582 GRC_LCLCTRL_GPIO_OE2 |
2583 GRC_LCLCTRL_GPIO_OUTPUT0 |
2584 GRC_LCLCTRL_GPIO_OUTPUT1 |
2585 tp->grc_local_ctrl;
2586 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2587 TG3_GRC_LCLCTL_PWRSW_DELAY);
2589 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2590 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2591 TG3_GRC_LCLCTL_PWRSW_DELAY);
2593 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2594 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2595 TG3_GRC_LCLCTL_PWRSW_DELAY);
2596 } else {
2597 u32 no_gpio2;
2598 u32 grc_local_ctrl = 0;
2600 /* Workaround to prevent overdrawing Amps. */
2601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2602 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2603 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2604 grc_local_ctrl,
2605 TG3_GRC_LCLCTL_PWRSW_DELAY);
2608 /* On 5753 and variants, GPIO2 cannot be used. */
2609 no_gpio2 = tp->nic_sram_data_cfg &
2610 NIC_SRAM_DATA_CFG_NO_GPIO2;
2612 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2613 GRC_LCLCTRL_GPIO_OE1 |
2614 GRC_LCLCTRL_GPIO_OE2 |
2615 GRC_LCLCTRL_GPIO_OUTPUT1 |
2616 GRC_LCLCTRL_GPIO_OUTPUT2;
2617 if (no_gpio2) {
2618 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2619 GRC_LCLCTRL_GPIO_OUTPUT2);
2621 tw32_wait_f(GRC_LOCAL_CTRL,
2622 tp->grc_local_ctrl | grc_local_ctrl,
2623 TG3_GRC_LCLCTL_PWRSW_DELAY);
2625 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2627 tw32_wait_f(GRC_LOCAL_CTRL,
2628 tp->grc_local_ctrl | grc_local_ctrl,
2629 TG3_GRC_LCLCTL_PWRSW_DELAY);
2631 if (!no_gpio2) {
2632 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2633 tw32_wait_f(GRC_LOCAL_CTRL,
2634 tp->grc_local_ctrl | grc_local_ctrl,
2635 TG3_GRC_LCLCTL_PWRSW_DELAY);
2640 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2642 u32 msg = 0;
2644 /* Serialize power state transitions */
2645 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2646 return;
2648 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2649 msg = TG3_GPIO_MSG_NEED_VAUX;
2651 msg = tg3_set_function_status(tp, msg);
2653 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2654 goto done;
2656 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2657 tg3_pwrsrc_switch_to_vaux(tp);
2658 else
2659 tg3_pwrsrc_die_with_vmain(tp);
2661 done:
2662 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2665 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2667 bool need_vaux = false;
2669 /* The GPIOs do something completely different on 57765. */
2670 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2671 return;
2673 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2676 tg3_frob_aux_power_5717(tp, include_wol ?
2677 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2678 return;
2681 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2682 struct net_device *dev_peer;
2684 dev_peer = pci_get_drvdata(tp->pdev_peer);
2686 /* remove_one() may have been run on the peer. */
2687 if (dev_peer) {
2688 struct tg3 *tp_peer = netdev_priv(dev_peer);
2690 if (tg3_flag(tp_peer, INIT_COMPLETE))
2691 return;
2693 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2694 tg3_flag(tp_peer, ENABLE_ASF))
2695 need_vaux = true;
2699 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2700 tg3_flag(tp, ENABLE_ASF))
2701 need_vaux = true;
2703 if (need_vaux)
2704 tg3_pwrsrc_switch_to_vaux(tp);
2705 else
2706 tg3_pwrsrc_die_with_vmain(tp);
2709 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2711 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2712 return 1;
2713 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2714 if (speed != SPEED_10)
2715 return 1;
2716 } else if (speed == SPEED_10)
2717 return 1;
2719 return 0;
2722 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2724 u32 val;
2726 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2728 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2729 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2731 sg_dig_ctrl |=
2732 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2733 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2734 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2736 return;
2739 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2740 tg3_bmcr_reset(tp);
2741 val = tr32(GRC_MISC_CFG);
2742 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2743 udelay(40);
2744 return;
2745 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2746 u32 phytest;
2747 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2748 u32 phy;
2750 tg3_writephy(tp, MII_ADVERTISE, 0);
2751 tg3_writephy(tp, MII_BMCR,
2752 BMCR_ANENABLE | BMCR_ANRESTART);
2754 tg3_writephy(tp, MII_TG3_FET_TEST,
2755 phytest | MII_TG3_FET_SHADOW_EN);
2756 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2757 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2758 tg3_writephy(tp,
2759 MII_TG3_FET_SHDW_AUXMODE4,
2760 phy);
2762 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2764 return;
2765 } else if (do_low_power) {
2766 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2767 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2769 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2770 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2771 MII_TG3_AUXCTL_PCTL_VREG_11V;
2772 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2775 /* The PHY should not be powered down on some chips because
2776 * of bugs.
2778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2780 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2781 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2782 return;
2784 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2785 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2786 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2787 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2788 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2789 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2792 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2795 /* tp->lock is held. */
2796 static int tg3_nvram_lock(struct tg3 *tp)
2798 if (tg3_flag(tp, NVRAM)) {
2799 int i;
2801 if (tp->nvram_lock_cnt == 0) {
2802 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2803 for (i = 0; i < 8000; i++) {
2804 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2805 break;
2806 udelay(20);
2808 if (i == 8000) {
2809 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2810 return -ENODEV;
2813 tp->nvram_lock_cnt++;
2815 return 0;
2818 /* tp->lock is held. */
2819 static void tg3_nvram_unlock(struct tg3 *tp)
2821 if (tg3_flag(tp, NVRAM)) {
2822 if (tp->nvram_lock_cnt > 0)
2823 tp->nvram_lock_cnt--;
2824 if (tp->nvram_lock_cnt == 0)
2825 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2829 /* tp->lock is held. */
2830 static void tg3_enable_nvram_access(struct tg3 *tp)
2832 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2833 u32 nvaccess = tr32(NVRAM_ACCESS);
2835 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2839 /* tp->lock is held. */
2840 static void tg3_disable_nvram_access(struct tg3 *tp)
2842 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2843 u32 nvaccess = tr32(NVRAM_ACCESS);
2845 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2849 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2850 u32 offset, u32 *val)
2852 u32 tmp;
2853 int i;
2855 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2856 return -EINVAL;
2858 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2859 EEPROM_ADDR_DEVID_MASK |
2860 EEPROM_ADDR_READ);
2861 tw32(GRC_EEPROM_ADDR,
2862 tmp |
2863 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2864 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2865 EEPROM_ADDR_ADDR_MASK) |
2866 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2868 for (i = 0; i < 1000; i++) {
2869 tmp = tr32(GRC_EEPROM_ADDR);
2871 if (tmp & EEPROM_ADDR_COMPLETE)
2872 break;
2873 msleep(1);
2875 if (!(tmp & EEPROM_ADDR_COMPLETE))
2876 return -EBUSY;
2878 tmp = tr32(GRC_EEPROM_DATA);
2881 * The data will always be opposite the native endian
2882 * format. Perform a blind byteswap to compensate.
2884 *val = swab32(tmp);
2886 return 0;
2889 #define NVRAM_CMD_TIMEOUT 10000
2891 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2893 int i;
2895 tw32(NVRAM_CMD, nvram_cmd);
2896 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2897 udelay(10);
2898 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2899 udelay(10);
2900 break;
2904 if (i == NVRAM_CMD_TIMEOUT)
2905 return -EBUSY;
2907 return 0;
2910 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2912 if (tg3_flag(tp, NVRAM) &&
2913 tg3_flag(tp, NVRAM_BUFFERED) &&
2914 tg3_flag(tp, FLASH) &&
2915 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2916 (tp->nvram_jedecnum == JEDEC_ATMEL))
2918 addr = ((addr / tp->nvram_pagesize) <<
2919 ATMEL_AT45DB0X1B_PAGE_POS) +
2920 (addr % tp->nvram_pagesize);
2922 return addr;
2925 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2927 if (tg3_flag(tp, NVRAM) &&
2928 tg3_flag(tp, NVRAM_BUFFERED) &&
2929 tg3_flag(tp, FLASH) &&
2930 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2931 (tp->nvram_jedecnum == JEDEC_ATMEL))
2933 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2934 tp->nvram_pagesize) +
2935 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2937 return addr;
2940 /* NOTE: Data read in from NVRAM is byteswapped according to
2941 * the byteswapping settings for all other register accesses.
2942 * tg3 devices are BE devices, so on a BE machine, the data
2943 * returned will be exactly as it is seen in NVRAM. On a LE
2944 * machine, the 32-bit value will be byteswapped.
2946 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2948 int ret;
2950 if (!tg3_flag(tp, NVRAM))
2951 return tg3_nvram_read_using_eeprom(tp, offset, val);
2953 offset = tg3_nvram_phys_addr(tp, offset);
2955 if (offset > NVRAM_ADDR_MSK)
2956 return -EINVAL;
2958 ret = tg3_nvram_lock(tp);
2959 if (ret)
2960 return ret;
2962 tg3_enable_nvram_access(tp);
2964 tw32(NVRAM_ADDR, offset);
2965 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2966 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2968 if (ret == 0)
2969 *val = tr32(NVRAM_RDDATA);
2971 tg3_disable_nvram_access(tp);
2973 tg3_nvram_unlock(tp);
2975 return ret;
2978 /* Ensures NVRAM data is in bytestream format. */
2979 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2981 u32 v;
2982 int res = tg3_nvram_read(tp, offset, &v);
2983 if (!res)
2984 *val = cpu_to_be32(v);
2985 return res;
2988 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2989 u32 offset, u32 len, u8 *buf)
2991 int i, j, rc = 0;
2992 u32 val;
2994 for (i = 0; i < len; i += 4) {
2995 u32 addr;
2996 __be32 data;
2998 addr = offset + i;
3000 memcpy(&data, buf + i, 4);
3003 * The SEEPROM interface expects the data to always be opposite
3004 * the native endian format. We accomplish this by reversing
3005 * all the operations that would have been performed on the
3006 * data from a call to tg3_nvram_read_be32().
3008 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3010 val = tr32(GRC_EEPROM_ADDR);
3011 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3013 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3014 EEPROM_ADDR_READ);
3015 tw32(GRC_EEPROM_ADDR, val |
3016 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3017 (addr & EEPROM_ADDR_ADDR_MASK) |
3018 EEPROM_ADDR_START |
3019 EEPROM_ADDR_WRITE);
3021 for (j = 0; j < 1000; j++) {
3022 val = tr32(GRC_EEPROM_ADDR);
3024 if (val & EEPROM_ADDR_COMPLETE)
3025 break;
3026 msleep(1);
3028 if (!(val & EEPROM_ADDR_COMPLETE)) {
3029 rc = -EBUSY;
3030 break;
3034 return rc;
3037 /* offset and length are dword aligned */
3038 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3039 u8 *buf)
3041 int ret = 0;
3042 u32 pagesize = tp->nvram_pagesize;
3043 u32 pagemask = pagesize - 1;
3044 u32 nvram_cmd;
3045 u8 *tmp;
3047 tmp = kmalloc(pagesize, GFP_KERNEL);
3048 if (tmp == NULL)
3049 return -ENOMEM;
3051 while (len) {
3052 int j;
3053 u32 phy_addr, page_off, size;
3055 phy_addr = offset & ~pagemask;
3057 for (j = 0; j < pagesize; j += 4) {
3058 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3059 (__be32 *) (tmp + j));
3060 if (ret)
3061 break;
3063 if (ret)
3064 break;
3066 page_off = offset & pagemask;
3067 size = pagesize;
3068 if (len < size)
3069 size = len;
3071 len -= size;
3073 memcpy(tmp + page_off, buf, size);
3075 offset = offset + (pagesize - page_off);
3077 tg3_enable_nvram_access(tp);
3080 * Before we can erase the flash page, we need
3081 * to issue a special "write enable" command.
3083 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3085 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3086 break;
3088 /* Erase the target page */
3089 tw32(NVRAM_ADDR, phy_addr);
3091 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3092 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3094 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3095 break;
3097 /* Issue another write enable to start the write. */
3098 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3100 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3101 break;
3103 for (j = 0; j < pagesize; j += 4) {
3104 __be32 data;
3106 data = *((__be32 *) (tmp + j));
3108 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3110 tw32(NVRAM_ADDR, phy_addr + j);
3112 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3113 NVRAM_CMD_WR;
3115 if (j == 0)
3116 nvram_cmd |= NVRAM_CMD_FIRST;
3117 else if (j == (pagesize - 4))
3118 nvram_cmd |= NVRAM_CMD_LAST;
3120 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3121 if (ret)
3122 break;
3124 if (ret)
3125 break;
3128 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3129 tg3_nvram_exec_cmd(tp, nvram_cmd);
3131 kfree(tmp);
3133 return ret;
3136 /* offset and length are dword aligned */
3137 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3138 u8 *buf)
3140 int i, ret = 0;
3142 for (i = 0; i < len; i += 4, offset += 4) {
3143 u32 page_off, phy_addr, nvram_cmd;
3144 __be32 data;
3146 memcpy(&data, buf + i, 4);
3147 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3149 page_off = offset % tp->nvram_pagesize;
3151 phy_addr = tg3_nvram_phys_addr(tp, offset);
3153 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3155 if (page_off == 0 || i == 0)
3156 nvram_cmd |= NVRAM_CMD_FIRST;
3157 if (page_off == (tp->nvram_pagesize - 4))
3158 nvram_cmd |= NVRAM_CMD_LAST;
3160 if (i == (len - 4))
3161 nvram_cmd |= NVRAM_CMD_LAST;
3163 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3164 !tg3_flag(tp, FLASH) ||
3165 !tg3_flag(tp, 57765_PLUS))
3166 tw32(NVRAM_ADDR, phy_addr);
3168 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3169 !tg3_flag(tp, 5755_PLUS) &&
3170 (tp->nvram_jedecnum == JEDEC_ST) &&
3171 (nvram_cmd & NVRAM_CMD_FIRST)) {
3172 u32 cmd;
3174 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3175 ret = tg3_nvram_exec_cmd(tp, cmd);
3176 if (ret)
3177 break;
3179 if (!tg3_flag(tp, FLASH)) {
3180 /* We always do complete word writes to eeprom. */
3181 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3184 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3185 if (ret)
3186 break;
3188 return ret;
3191 /* offset and length are dword aligned */
3192 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3194 int ret;
3196 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3197 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3198 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3199 udelay(40);
3202 if (!tg3_flag(tp, NVRAM)) {
3203 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3204 } else {
3205 u32 grc_mode;
3207 ret = tg3_nvram_lock(tp);
3208 if (ret)
3209 return ret;
3211 tg3_enable_nvram_access(tp);
3212 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3213 tw32(NVRAM_WRITE1, 0x406);
3215 grc_mode = tr32(GRC_MODE);
3216 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3218 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3219 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3220 buf);
3221 } else {
3222 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3223 buf);
3226 grc_mode = tr32(GRC_MODE);
3227 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3229 tg3_disable_nvram_access(tp);
3230 tg3_nvram_unlock(tp);
3233 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3234 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3235 udelay(40);
3238 return ret;
3241 #define RX_CPU_SCRATCH_BASE 0x30000
3242 #define RX_CPU_SCRATCH_SIZE 0x04000
3243 #define TX_CPU_SCRATCH_BASE 0x34000
3244 #define TX_CPU_SCRATCH_SIZE 0x04000
3246 /* tp->lock is held. */
3247 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3249 int i;
3251 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3253 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3254 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3256 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3257 return 0;
3259 if (offset == RX_CPU_BASE) {
3260 for (i = 0; i < 10000; i++) {
3261 tw32(offset + CPU_STATE, 0xffffffff);
3262 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3263 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3264 break;
3267 tw32(offset + CPU_STATE, 0xffffffff);
3268 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3269 udelay(10);
3270 } else {
3271 for (i = 0; i < 10000; i++) {
3272 tw32(offset + CPU_STATE, 0xffffffff);
3273 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3274 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3275 break;
3279 if (i >= 10000) {
3280 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3281 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3282 return -ENODEV;
3285 /* Clear firmware's nvram arbitration. */
3286 if (tg3_flag(tp, NVRAM))
3287 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3288 return 0;
3291 struct fw_info {
3292 unsigned int fw_base;
3293 unsigned int fw_len;
3294 const __be32 *fw_data;
3297 /* tp->lock is held. */
3298 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3299 u32 cpu_scratch_base, int cpu_scratch_size,
3300 struct fw_info *info)
3302 int err, lock_err, i;
3303 void (*write_op)(struct tg3 *, u32, u32);
3305 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3306 netdev_err(tp->dev,
3307 "%s: Trying to load TX cpu firmware which is 5705\n",
3308 __func__);
3309 return -EINVAL;
3312 if (tg3_flag(tp, 5705_PLUS))
3313 write_op = tg3_write_mem;
3314 else
3315 write_op = tg3_write_indirect_reg32;
3317 /* It is possible that bootcode is still loading at this point.
3318 * Get the nvram lock first before halting the cpu.
3320 lock_err = tg3_nvram_lock(tp);
3321 err = tg3_halt_cpu(tp, cpu_base);
3322 if (!lock_err)
3323 tg3_nvram_unlock(tp);
3324 if (err)
3325 goto out;
3327 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3328 write_op(tp, cpu_scratch_base + i, 0);
3329 tw32(cpu_base + CPU_STATE, 0xffffffff);
3330 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3331 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3332 write_op(tp, (cpu_scratch_base +
3333 (info->fw_base & 0xffff) +
3334 (i * sizeof(u32))),
3335 be32_to_cpu(info->fw_data[i]));
3337 err = 0;
3339 out:
3340 return err;
3343 /* tp->lock is held. */
3344 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3346 struct fw_info info;
3347 const __be32 *fw_data;
3348 int err, i;
3350 fw_data = (void *)tp->fw->data;
3352 /* Firmware blob starts with version numbers, followed by
3353 start address and length. We are setting complete length.
3354 length = end_address_of_bss - start_address_of_text.
3355 Remainder is the blob to be loaded contiguously
3356 from start address. */
3358 info.fw_base = be32_to_cpu(fw_data[1]);
3359 info.fw_len = tp->fw->size - 12;
3360 info.fw_data = &fw_data[3];
3362 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3363 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3364 &info);
3365 if (err)
3366 return err;
3368 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3369 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3370 &info);
3371 if (err)
3372 return err;
3374 /* Now startup only the RX cpu. */
3375 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3376 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3378 for (i = 0; i < 5; i++) {
3379 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3380 break;
3381 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3382 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3383 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3384 udelay(1000);
3386 if (i >= 5) {
3387 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3388 "should be %08x\n", __func__,
3389 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3390 return -ENODEV;
3392 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3393 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3395 return 0;
3398 /* tp->lock is held. */
3399 static int tg3_load_tso_firmware(struct tg3 *tp)
3401 struct fw_info info;
3402 const __be32 *fw_data;
3403 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3404 int err, i;
3406 if (tg3_flag(tp, HW_TSO_1) ||
3407 tg3_flag(tp, HW_TSO_2) ||
3408 tg3_flag(tp, HW_TSO_3))
3409 return 0;
3411 fw_data = (void *)tp->fw->data;
3413 /* Firmware blob starts with version numbers, followed by
3414 start address and length. We are setting complete length.
3415 length = end_address_of_bss - start_address_of_text.
3416 Remainder is the blob to be loaded contiguously
3417 from start address. */
3419 info.fw_base = be32_to_cpu(fw_data[1]);
3420 cpu_scratch_size = tp->fw_len;
3421 info.fw_len = tp->fw->size - 12;
3422 info.fw_data = &fw_data[3];
3424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3425 cpu_base = RX_CPU_BASE;
3426 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3427 } else {
3428 cpu_base = TX_CPU_BASE;
3429 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3430 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3433 err = tg3_load_firmware_cpu(tp, cpu_base,
3434 cpu_scratch_base, cpu_scratch_size,
3435 &info);
3436 if (err)
3437 return err;
3439 /* Now startup the cpu. */
3440 tw32(cpu_base + CPU_STATE, 0xffffffff);
3441 tw32_f(cpu_base + CPU_PC, info.fw_base);
3443 for (i = 0; i < 5; i++) {
3444 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3445 break;
3446 tw32(cpu_base + CPU_STATE, 0xffffffff);
3447 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3448 tw32_f(cpu_base + CPU_PC, info.fw_base);
3449 udelay(1000);
3451 if (i >= 5) {
3452 netdev_err(tp->dev,
3453 "%s fails to set CPU PC, is %08x should be %08x\n",
3454 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3455 return -ENODEV;
3457 tw32(cpu_base + CPU_STATE, 0xffffffff);
3458 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3459 return 0;
3463 /* tp->lock is held. */
3464 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3466 u32 addr_high, addr_low;
3467 int i;
3469 addr_high = ((tp->dev->dev_addr[0] << 8) |
3470 tp->dev->dev_addr[1]);
3471 addr_low = ((tp->dev->dev_addr[2] << 24) |
3472 (tp->dev->dev_addr[3] << 16) |
3473 (tp->dev->dev_addr[4] << 8) |
3474 (tp->dev->dev_addr[5] << 0));
3475 for (i = 0; i < 4; i++) {
3476 if (i == 1 && skip_mac_1)
3477 continue;
3478 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3479 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3482 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3484 for (i = 0; i < 12; i++) {
3485 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3486 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3490 addr_high = (tp->dev->dev_addr[0] +
3491 tp->dev->dev_addr[1] +
3492 tp->dev->dev_addr[2] +
3493 tp->dev->dev_addr[3] +
3494 tp->dev->dev_addr[4] +
3495 tp->dev->dev_addr[5]) &
3496 TX_BACKOFF_SEED_MASK;
3497 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3500 static void tg3_enable_register_access(struct tg3 *tp)
3503 * Make sure register accesses (indirect or otherwise) will function
3504 * correctly.
3506 pci_write_config_dword(tp->pdev,
3507 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3510 static int tg3_power_up(struct tg3 *tp)
3512 int err;
3514 tg3_enable_register_access(tp);
3516 err = pci_set_power_state(tp->pdev, PCI_D0);
3517 if (!err) {
3518 /* Switch out of Vaux if it is a NIC */
3519 tg3_pwrsrc_switch_to_vmain(tp);
3520 } else {
3521 netdev_err(tp->dev, "Transition to D0 failed\n");
3524 return err;
3527 static int tg3_setup_phy(struct tg3 *, int);
3529 static int tg3_power_down_prepare(struct tg3 *tp)
3531 u32 misc_host_ctrl;
3532 bool device_should_wake, do_low_power;
3534 tg3_enable_register_access(tp);
3536 /* Restore the CLKREQ setting. */
3537 if (tg3_flag(tp, CLKREQ_BUG)) {
3538 u16 lnkctl;
3540 pci_read_config_word(tp->pdev,
3541 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3542 &lnkctl);
3543 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3544 pci_write_config_word(tp->pdev,
3545 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3546 lnkctl);
3549 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3550 tw32(TG3PCI_MISC_HOST_CTRL,
3551 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3553 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3554 tg3_flag(tp, WOL_ENABLE);
3556 if (tg3_flag(tp, USE_PHYLIB)) {
3557 do_low_power = false;
3558 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3559 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3560 struct phy_device *phydev;
3561 u32 phyid, advertising;
3563 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3565 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3567 tp->link_config.orig_speed = phydev->speed;
3568 tp->link_config.orig_duplex = phydev->duplex;
3569 tp->link_config.orig_autoneg = phydev->autoneg;
3570 tp->link_config.orig_advertising = phydev->advertising;
3572 advertising = ADVERTISED_TP |
3573 ADVERTISED_Pause |
3574 ADVERTISED_Autoneg |
3575 ADVERTISED_10baseT_Half;
3577 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3578 if (tg3_flag(tp, WOL_SPEED_100MB))
3579 advertising |=
3580 ADVERTISED_100baseT_Half |
3581 ADVERTISED_100baseT_Full |
3582 ADVERTISED_10baseT_Full;
3583 else
3584 advertising |= ADVERTISED_10baseT_Full;
3587 phydev->advertising = advertising;
3589 phy_start_aneg(phydev);
3591 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3592 if (phyid != PHY_ID_BCMAC131) {
3593 phyid &= PHY_BCM_OUI_MASK;
3594 if (phyid == PHY_BCM_OUI_1 ||
3595 phyid == PHY_BCM_OUI_2 ||
3596 phyid == PHY_BCM_OUI_3)
3597 do_low_power = true;
3600 } else {
3601 do_low_power = true;
3603 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3604 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3605 tp->link_config.orig_speed = tp->link_config.speed;
3606 tp->link_config.orig_duplex = tp->link_config.duplex;
3607 tp->link_config.orig_autoneg = tp->link_config.autoneg;
3610 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3611 tp->link_config.speed = SPEED_10;
3612 tp->link_config.duplex = DUPLEX_HALF;
3613 tp->link_config.autoneg = AUTONEG_ENABLE;
3614 tg3_setup_phy(tp, 0);
3618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3619 u32 val;
3621 val = tr32(GRC_VCPU_EXT_CTRL);
3622 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3623 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3624 int i;
3625 u32 val;
3627 for (i = 0; i < 200; i++) {
3628 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3629 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3630 break;
3631 msleep(1);
3634 if (tg3_flag(tp, WOL_CAP))
3635 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3636 WOL_DRV_STATE_SHUTDOWN |
3637 WOL_DRV_WOL |
3638 WOL_SET_MAGIC_PKT);
3640 if (device_should_wake) {
3641 u32 mac_mode;
3643 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3644 if (do_low_power &&
3645 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3646 tg3_phy_auxctl_write(tp,
3647 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3648 MII_TG3_AUXCTL_PCTL_WOL_EN |
3649 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3650 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3651 udelay(40);
3654 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3655 mac_mode = MAC_MODE_PORT_MODE_GMII;
3656 else
3657 mac_mode = MAC_MODE_PORT_MODE_MII;
3659 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3660 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3661 ASIC_REV_5700) {
3662 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3663 SPEED_100 : SPEED_10;
3664 if (tg3_5700_link_polarity(tp, speed))
3665 mac_mode |= MAC_MODE_LINK_POLARITY;
3666 else
3667 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3669 } else {
3670 mac_mode = MAC_MODE_PORT_MODE_TBI;
3673 if (!tg3_flag(tp, 5750_PLUS))
3674 tw32(MAC_LED_CTRL, tp->led_ctrl);
3676 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3677 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3678 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3679 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3681 if (tg3_flag(tp, ENABLE_APE))
3682 mac_mode |= MAC_MODE_APE_TX_EN |
3683 MAC_MODE_APE_RX_EN |
3684 MAC_MODE_TDE_ENABLE;
3686 tw32_f(MAC_MODE, mac_mode);
3687 udelay(100);
3689 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3690 udelay(10);
3693 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3694 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3696 u32 base_val;
3698 base_val = tp->pci_clock_ctrl;
3699 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3700 CLOCK_CTRL_TXCLK_DISABLE);
3702 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3703 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3704 } else if (tg3_flag(tp, 5780_CLASS) ||
3705 tg3_flag(tp, CPMU_PRESENT) ||
3706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3707 /* do nothing */
3708 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3709 u32 newbits1, newbits2;
3711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3713 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3714 CLOCK_CTRL_TXCLK_DISABLE |
3715 CLOCK_CTRL_ALTCLK);
3716 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3717 } else if (tg3_flag(tp, 5705_PLUS)) {
3718 newbits1 = CLOCK_CTRL_625_CORE;
3719 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3720 } else {
3721 newbits1 = CLOCK_CTRL_ALTCLK;
3722 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3725 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3726 40);
3728 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3729 40);
3731 if (!tg3_flag(tp, 5705_PLUS)) {
3732 u32 newbits3;
3734 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3735 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3736 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3737 CLOCK_CTRL_TXCLK_DISABLE |
3738 CLOCK_CTRL_44MHZ_CORE);
3739 } else {
3740 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3743 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3744 tp->pci_clock_ctrl | newbits3, 40);
3748 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3749 tg3_power_down_phy(tp, do_low_power);
3751 tg3_frob_aux_power(tp, true);
3753 /* Workaround for unstable PLL clock */
3754 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3755 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3756 u32 val = tr32(0x7d00);
3758 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3759 tw32(0x7d00, val);
3760 if (!tg3_flag(tp, ENABLE_ASF)) {
3761 int err;
3763 err = tg3_nvram_lock(tp);
3764 tg3_halt_cpu(tp, RX_CPU_BASE);
3765 if (!err)
3766 tg3_nvram_unlock(tp);
3770 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3772 return 0;
3775 static void tg3_power_down(struct tg3 *tp)
3777 tg3_power_down_prepare(tp);
3779 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3780 pci_set_power_state(tp->pdev, PCI_D3hot);
3783 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3785 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3786 case MII_TG3_AUX_STAT_10HALF:
3787 *speed = SPEED_10;
3788 *duplex = DUPLEX_HALF;
3789 break;
3791 case MII_TG3_AUX_STAT_10FULL:
3792 *speed = SPEED_10;
3793 *duplex = DUPLEX_FULL;
3794 break;
3796 case MII_TG3_AUX_STAT_100HALF:
3797 *speed = SPEED_100;
3798 *duplex = DUPLEX_HALF;
3799 break;
3801 case MII_TG3_AUX_STAT_100FULL:
3802 *speed = SPEED_100;
3803 *duplex = DUPLEX_FULL;
3804 break;
3806 case MII_TG3_AUX_STAT_1000HALF:
3807 *speed = SPEED_1000;
3808 *duplex = DUPLEX_HALF;
3809 break;
3811 case MII_TG3_AUX_STAT_1000FULL:
3812 *speed = SPEED_1000;
3813 *duplex = DUPLEX_FULL;
3814 break;
3816 default:
3817 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3818 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3819 SPEED_10;
3820 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3821 DUPLEX_HALF;
3822 break;
3824 *speed = SPEED_INVALID;
3825 *duplex = DUPLEX_INVALID;
3826 break;
3830 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3832 int err = 0;
3833 u32 val, new_adv;
3835 new_adv = ADVERTISE_CSMA;
3836 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3837 new_adv |= mii_advertise_flowctrl(flowctrl);
3839 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3840 if (err)
3841 goto done;
3843 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3844 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3846 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3847 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3848 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3850 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3851 if (err)
3852 goto done;
3855 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3856 goto done;
3858 tw32(TG3_CPMU_EEE_MODE,
3859 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3861 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3862 if (!err) {
3863 u32 err2;
3865 val = 0;
3866 /* Advertise 100-BaseTX EEE ability */
3867 if (advertise & ADVERTISED_100baseT_Full)
3868 val |= MDIO_AN_EEE_ADV_100TX;
3869 /* Advertise 1000-BaseT EEE ability */
3870 if (advertise & ADVERTISED_1000baseT_Full)
3871 val |= MDIO_AN_EEE_ADV_1000T;
3872 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3873 if (err)
3874 val = 0;
3876 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3877 case ASIC_REV_5717:
3878 case ASIC_REV_57765:
3879 case ASIC_REV_57766:
3880 case ASIC_REV_5719:
3881 /* If we advertised any eee advertisements above... */
3882 if (val)
3883 val = MII_TG3_DSP_TAP26_ALNOKO |
3884 MII_TG3_DSP_TAP26_RMRXSTO |
3885 MII_TG3_DSP_TAP26_OPCSINPT;
3886 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3887 /* Fall through */
3888 case ASIC_REV_5720:
3889 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3890 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3891 MII_TG3_DSP_CH34TP2_HIBW01);
3894 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3895 if (!err)
3896 err = err2;
3899 done:
3900 return err;
3903 static void tg3_phy_copper_begin(struct tg3 *tp)
3905 u32 new_adv;
3906 int i;
3908 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3909 new_adv = ADVERTISED_10baseT_Half |
3910 ADVERTISED_10baseT_Full;
3911 if (tg3_flag(tp, WOL_SPEED_100MB))
3912 new_adv |= ADVERTISED_100baseT_Half |
3913 ADVERTISED_100baseT_Full;
3915 tg3_phy_autoneg_cfg(tp, new_adv,
3916 FLOW_CTRL_TX | FLOW_CTRL_RX);
3917 } else if (tp->link_config.speed == SPEED_INVALID) {
3918 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3919 tp->link_config.advertising &=
3920 ~(ADVERTISED_1000baseT_Half |
3921 ADVERTISED_1000baseT_Full);
3923 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3924 tp->link_config.flowctrl);
3925 } else {
3926 /* Asking for a specific link mode. */
3927 if (tp->link_config.speed == SPEED_1000) {
3928 if (tp->link_config.duplex == DUPLEX_FULL)
3929 new_adv = ADVERTISED_1000baseT_Full;
3930 else
3931 new_adv = ADVERTISED_1000baseT_Half;
3932 } else if (tp->link_config.speed == SPEED_100) {
3933 if (tp->link_config.duplex == DUPLEX_FULL)
3934 new_adv = ADVERTISED_100baseT_Full;
3935 else
3936 new_adv = ADVERTISED_100baseT_Half;
3937 } else {
3938 if (tp->link_config.duplex == DUPLEX_FULL)
3939 new_adv = ADVERTISED_10baseT_Full;
3940 else
3941 new_adv = ADVERTISED_10baseT_Half;
3944 tg3_phy_autoneg_cfg(tp, new_adv,
3945 tp->link_config.flowctrl);
3948 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3949 tp->link_config.speed != SPEED_INVALID) {
3950 u32 bmcr, orig_bmcr;
3952 tp->link_config.active_speed = tp->link_config.speed;
3953 tp->link_config.active_duplex = tp->link_config.duplex;
3955 bmcr = 0;
3956 switch (tp->link_config.speed) {
3957 default:
3958 case SPEED_10:
3959 break;
3961 case SPEED_100:
3962 bmcr |= BMCR_SPEED100;
3963 break;
3965 case SPEED_1000:
3966 bmcr |= BMCR_SPEED1000;
3967 break;
3970 if (tp->link_config.duplex == DUPLEX_FULL)
3971 bmcr |= BMCR_FULLDPLX;
3973 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3974 (bmcr != orig_bmcr)) {
3975 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3976 for (i = 0; i < 1500; i++) {
3977 u32 tmp;
3979 udelay(10);
3980 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3981 tg3_readphy(tp, MII_BMSR, &tmp))
3982 continue;
3983 if (!(tmp & BMSR_LSTATUS)) {
3984 udelay(40);
3985 break;
3988 tg3_writephy(tp, MII_BMCR, bmcr);
3989 udelay(40);
3991 } else {
3992 tg3_writephy(tp, MII_BMCR,
3993 BMCR_ANENABLE | BMCR_ANRESTART);
3997 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3999 int err;
4001 /* Turn off tap power management. */
4002 /* Set Extended packet length bit */
4003 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4005 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4006 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4007 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4008 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4009 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4011 udelay(40);
4013 return err;
4016 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4018 u32 advmsk, tgtadv, advertising;
4020 advertising = tp->link_config.advertising;
4021 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4023 advmsk = ADVERTISE_ALL;
4024 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4025 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4026 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4029 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4030 return false;
4032 if ((*lcladv & advmsk) != tgtadv)
4033 return false;
4035 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4036 u32 tg3_ctrl;
4038 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4040 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4041 return false;
4043 if (tgtadv &&
4044 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4045 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4046 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4047 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4048 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4049 } else {
4050 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4053 if (tg3_ctrl != tgtadv)
4054 return false;
4057 return true;
4060 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4062 u32 lpeth = 0;
4064 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4065 u32 val;
4067 if (tg3_readphy(tp, MII_STAT1000, &val))
4068 return false;
4070 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4073 if (tg3_readphy(tp, MII_LPA, rmtadv))
4074 return false;
4076 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4077 tp->link_config.rmt_adv = lpeth;
4079 return true;
4082 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4084 int current_link_up;
4085 u32 bmsr, val;
4086 u32 lcl_adv, rmt_adv;
4087 u16 current_speed;
4088 u8 current_duplex;
4089 int i, err;
4091 tw32(MAC_EVENT, 0);
4093 tw32_f(MAC_STATUS,
4094 (MAC_STATUS_SYNC_CHANGED |
4095 MAC_STATUS_CFG_CHANGED |
4096 MAC_STATUS_MI_COMPLETION |
4097 MAC_STATUS_LNKSTATE_CHANGED));
4098 udelay(40);
4100 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4101 tw32_f(MAC_MI_MODE,
4102 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4103 udelay(80);
4106 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4108 /* Some third-party PHYs need to be reset on link going
4109 * down.
4111 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4114 netif_carrier_ok(tp->dev)) {
4115 tg3_readphy(tp, MII_BMSR, &bmsr);
4116 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4117 !(bmsr & BMSR_LSTATUS))
4118 force_reset = 1;
4120 if (force_reset)
4121 tg3_phy_reset(tp);
4123 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4124 tg3_readphy(tp, MII_BMSR, &bmsr);
4125 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4126 !tg3_flag(tp, INIT_COMPLETE))
4127 bmsr = 0;
4129 if (!(bmsr & BMSR_LSTATUS)) {
4130 err = tg3_init_5401phy_dsp(tp);
4131 if (err)
4132 return err;
4134 tg3_readphy(tp, MII_BMSR, &bmsr);
4135 for (i = 0; i < 1000; i++) {
4136 udelay(10);
4137 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4138 (bmsr & BMSR_LSTATUS)) {
4139 udelay(40);
4140 break;
4144 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4145 TG3_PHY_REV_BCM5401_B0 &&
4146 !(bmsr & BMSR_LSTATUS) &&
4147 tp->link_config.active_speed == SPEED_1000) {
4148 err = tg3_phy_reset(tp);
4149 if (!err)
4150 err = tg3_init_5401phy_dsp(tp);
4151 if (err)
4152 return err;
4155 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4156 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4157 /* 5701 {A0,B0} CRC bug workaround */
4158 tg3_writephy(tp, 0x15, 0x0a75);
4159 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4160 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4161 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4164 /* Clear pending interrupts... */
4165 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4166 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4168 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4169 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4170 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4171 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4173 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4175 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4176 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4177 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4178 else
4179 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4182 current_link_up = 0;
4183 current_speed = SPEED_INVALID;
4184 current_duplex = DUPLEX_INVALID;
4185 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4186 tp->link_config.rmt_adv = 0;
4188 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4189 err = tg3_phy_auxctl_read(tp,
4190 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4191 &val);
4192 if (!err && !(val & (1 << 10))) {
4193 tg3_phy_auxctl_write(tp,
4194 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4195 val | (1 << 10));
4196 goto relink;
4200 bmsr = 0;
4201 for (i = 0; i < 100; i++) {
4202 tg3_readphy(tp, MII_BMSR, &bmsr);
4203 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4204 (bmsr & BMSR_LSTATUS))
4205 break;
4206 udelay(40);
4209 if (bmsr & BMSR_LSTATUS) {
4210 u32 aux_stat, bmcr;
4212 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4213 for (i = 0; i < 2000; i++) {
4214 udelay(10);
4215 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4216 aux_stat)
4217 break;
4220 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4221 &current_speed,
4222 &current_duplex);
4224 bmcr = 0;
4225 for (i = 0; i < 200; i++) {
4226 tg3_readphy(tp, MII_BMCR, &bmcr);
4227 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4228 continue;
4229 if (bmcr && bmcr != 0x7fff)
4230 break;
4231 udelay(10);
4234 lcl_adv = 0;
4235 rmt_adv = 0;
4237 tp->link_config.active_speed = current_speed;
4238 tp->link_config.active_duplex = current_duplex;
4240 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4241 if ((bmcr & BMCR_ANENABLE) &&
4242 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4243 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4244 current_link_up = 1;
4245 } else {
4246 if (!(bmcr & BMCR_ANENABLE) &&
4247 tp->link_config.speed == current_speed &&
4248 tp->link_config.duplex == current_duplex &&
4249 tp->link_config.flowctrl ==
4250 tp->link_config.active_flowctrl) {
4251 current_link_up = 1;
4255 if (current_link_up == 1 &&
4256 tp->link_config.active_duplex == DUPLEX_FULL) {
4257 u32 reg, bit;
4259 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4260 reg = MII_TG3_FET_GEN_STAT;
4261 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4262 } else {
4263 reg = MII_TG3_EXT_STAT;
4264 bit = MII_TG3_EXT_STAT_MDIX;
4267 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4268 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4270 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4274 relink:
4275 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4276 tg3_phy_copper_begin(tp);
4278 tg3_readphy(tp, MII_BMSR, &bmsr);
4279 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4280 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4281 current_link_up = 1;
4284 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4285 if (current_link_up == 1) {
4286 if (tp->link_config.active_speed == SPEED_100 ||
4287 tp->link_config.active_speed == SPEED_10)
4288 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4289 else
4290 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4291 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4292 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4293 else
4294 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4296 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4297 if (tp->link_config.active_duplex == DUPLEX_HALF)
4298 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4300 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4301 if (current_link_up == 1 &&
4302 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4303 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4304 else
4305 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4308 /* ??? Without this setting Netgear GA302T PHY does not
4309 * ??? send/receive packets...
4311 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4312 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4313 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4314 tw32_f(MAC_MI_MODE, tp->mi_mode);
4315 udelay(80);
4318 tw32_f(MAC_MODE, tp->mac_mode);
4319 udelay(40);
4321 tg3_phy_eee_adjust(tp, current_link_up);
4323 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4324 /* Polled via timer. */
4325 tw32_f(MAC_EVENT, 0);
4326 } else {
4327 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4329 udelay(40);
4331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4332 current_link_up == 1 &&
4333 tp->link_config.active_speed == SPEED_1000 &&
4334 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4335 udelay(120);
4336 tw32_f(MAC_STATUS,
4337 (MAC_STATUS_SYNC_CHANGED |
4338 MAC_STATUS_CFG_CHANGED));
4339 udelay(40);
4340 tg3_write_mem(tp,
4341 NIC_SRAM_FIRMWARE_MBOX,
4342 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4345 /* Prevent send BD corruption. */
4346 if (tg3_flag(tp, CLKREQ_BUG)) {
4347 u16 oldlnkctl, newlnkctl;
4349 pci_read_config_word(tp->pdev,
4350 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4351 &oldlnkctl);
4352 if (tp->link_config.active_speed == SPEED_100 ||
4353 tp->link_config.active_speed == SPEED_10)
4354 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4355 else
4356 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4357 if (newlnkctl != oldlnkctl)
4358 pci_write_config_word(tp->pdev,
4359 pci_pcie_cap(tp->pdev) +
4360 PCI_EXP_LNKCTL, newlnkctl);
4363 if (current_link_up != netif_carrier_ok(tp->dev)) {
4364 if (current_link_up)
4365 netif_carrier_on(tp->dev);
4366 else
4367 netif_carrier_off(tp->dev);
4368 tg3_link_report(tp);
4371 return 0;
4374 struct tg3_fiber_aneginfo {
4375 int state;
4376 #define ANEG_STATE_UNKNOWN 0
4377 #define ANEG_STATE_AN_ENABLE 1
4378 #define ANEG_STATE_RESTART_INIT 2
4379 #define ANEG_STATE_RESTART 3
4380 #define ANEG_STATE_DISABLE_LINK_OK 4
4381 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4382 #define ANEG_STATE_ABILITY_DETECT 6
4383 #define ANEG_STATE_ACK_DETECT_INIT 7
4384 #define ANEG_STATE_ACK_DETECT 8
4385 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4386 #define ANEG_STATE_COMPLETE_ACK 10
4387 #define ANEG_STATE_IDLE_DETECT_INIT 11
4388 #define ANEG_STATE_IDLE_DETECT 12
4389 #define ANEG_STATE_LINK_OK 13
4390 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4391 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4393 u32 flags;
4394 #define MR_AN_ENABLE 0x00000001
4395 #define MR_RESTART_AN 0x00000002
4396 #define MR_AN_COMPLETE 0x00000004
4397 #define MR_PAGE_RX 0x00000008
4398 #define MR_NP_LOADED 0x00000010
4399 #define MR_TOGGLE_TX 0x00000020
4400 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4401 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4402 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4403 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4404 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4405 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4406 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4407 #define MR_TOGGLE_RX 0x00002000
4408 #define MR_NP_RX 0x00004000
4410 #define MR_LINK_OK 0x80000000
4412 unsigned long link_time, cur_time;
4414 u32 ability_match_cfg;
4415 int ability_match_count;
4417 char ability_match, idle_match, ack_match;
4419 u32 txconfig, rxconfig;
4420 #define ANEG_CFG_NP 0x00000080
4421 #define ANEG_CFG_ACK 0x00000040
4422 #define ANEG_CFG_RF2 0x00000020
4423 #define ANEG_CFG_RF1 0x00000010
4424 #define ANEG_CFG_PS2 0x00000001
4425 #define ANEG_CFG_PS1 0x00008000
4426 #define ANEG_CFG_HD 0x00004000
4427 #define ANEG_CFG_FD 0x00002000
4428 #define ANEG_CFG_INVAL 0x00001f06
4431 #define ANEG_OK 0
4432 #define ANEG_DONE 1
4433 #define ANEG_TIMER_ENAB 2
4434 #define ANEG_FAILED -1
4436 #define ANEG_STATE_SETTLE_TIME 10000
4438 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4439 struct tg3_fiber_aneginfo *ap)
4441 u16 flowctrl;
4442 unsigned long delta;
4443 u32 rx_cfg_reg;
4444 int ret;
4446 if (ap->state == ANEG_STATE_UNKNOWN) {
4447 ap->rxconfig = 0;
4448 ap->link_time = 0;
4449 ap->cur_time = 0;
4450 ap->ability_match_cfg = 0;
4451 ap->ability_match_count = 0;
4452 ap->ability_match = 0;
4453 ap->idle_match = 0;
4454 ap->ack_match = 0;
4456 ap->cur_time++;
4458 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4459 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4461 if (rx_cfg_reg != ap->ability_match_cfg) {
4462 ap->ability_match_cfg = rx_cfg_reg;
4463 ap->ability_match = 0;
4464 ap->ability_match_count = 0;
4465 } else {
4466 if (++ap->ability_match_count > 1) {
4467 ap->ability_match = 1;
4468 ap->ability_match_cfg = rx_cfg_reg;
4471 if (rx_cfg_reg & ANEG_CFG_ACK)
4472 ap->ack_match = 1;
4473 else
4474 ap->ack_match = 0;
4476 ap->idle_match = 0;
4477 } else {
4478 ap->idle_match = 1;
4479 ap->ability_match_cfg = 0;
4480 ap->ability_match_count = 0;
4481 ap->ability_match = 0;
4482 ap->ack_match = 0;
4484 rx_cfg_reg = 0;
4487 ap->rxconfig = rx_cfg_reg;
4488 ret = ANEG_OK;
4490 switch (ap->state) {
4491 case ANEG_STATE_UNKNOWN:
4492 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4493 ap->state = ANEG_STATE_AN_ENABLE;
4495 /* fallthru */
4496 case ANEG_STATE_AN_ENABLE:
4497 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4498 if (ap->flags & MR_AN_ENABLE) {
4499 ap->link_time = 0;
4500 ap->cur_time = 0;
4501 ap->ability_match_cfg = 0;
4502 ap->ability_match_count = 0;
4503 ap->ability_match = 0;
4504 ap->idle_match = 0;
4505 ap->ack_match = 0;
4507 ap->state = ANEG_STATE_RESTART_INIT;
4508 } else {
4509 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4511 break;
4513 case ANEG_STATE_RESTART_INIT:
4514 ap->link_time = ap->cur_time;
4515 ap->flags &= ~(MR_NP_LOADED);
4516 ap->txconfig = 0;
4517 tw32(MAC_TX_AUTO_NEG, 0);
4518 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4519 tw32_f(MAC_MODE, tp->mac_mode);
4520 udelay(40);
4522 ret = ANEG_TIMER_ENAB;
4523 ap->state = ANEG_STATE_RESTART;
4525 /* fallthru */
4526 case ANEG_STATE_RESTART:
4527 delta = ap->cur_time - ap->link_time;
4528 if (delta > ANEG_STATE_SETTLE_TIME)
4529 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4530 else
4531 ret = ANEG_TIMER_ENAB;
4532 break;
4534 case ANEG_STATE_DISABLE_LINK_OK:
4535 ret = ANEG_DONE;
4536 break;
4538 case ANEG_STATE_ABILITY_DETECT_INIT:
4539 ap->flags &= ~(MR_TOGGLE_TX);
4540 ap->txconfig = ANEG_CFG_FD;
4541 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4542 if (flowctrl & ADVERTISE_1000XPAUSE)
4543 ap->txconfig |= ANEG_CFG_PS1;
4544 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4545 ap->txconfig |= ANEG_CFG_PS2;
4546 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4547 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4548 tw32_f(MAC_MODE, tp->mac_mode);
4549 udelay(40);
4551 ap->state = ANEG_STATE_ABILITY_DETECT;
4552 break;
4554 case ANEG_STATE_ABILITY_DETECT:
4555 if (ap->ability_match != 0 && ap->rxconfig != 0)
4556 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4557 break;
4559 case ANEG_STATE_ACK_DETECT_INIT:
4560 ap->txconfig |= ANEG_CFG_ACK;
4561 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4562 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4563 tw32_f(MAC_MODE, tp->mac_mode);
4564 udelay(40);
4566 ap->state = ANEG_STATE_ACK_DETECT;
4568 /* fallthru */
4569 case ANEG_STATE_ACK_DETECT:
4570 if (ap->ack_match != 0) {
4571 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4572 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4573 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4574 } else {
4575 ap->state = ANEG_STATE_AN_ENABLE;
4577 } else if (ap->ability_match != 0 &&
4578 ap->rxconfig == 0) {
4579 ap->state = ANEG_STATE_AN_ENABLE;
4581 break;
4583 case ANEG_STATE_COMPLETE_ACK_INIT:
4584 if (ap->rxconfig & ANEG_CFG_INVAL) {
4585 ret = ANEG_FAILED;
4586 break;
4588 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4589 MR_LP_ADV_HALF_DUPLEX |
4590 MR_LP_ADV_SYM_PAUSE |
4591 MR_LP_ADV_ASYM_PAUSE |
4592 MR_LP_ADV_REMOTE_FAULT1 |
4593 MR_LP_ADV_REMOTE_FAULT2 |
4594 MR_LP_ADV_NEXT_PAGE |
4595 MR_TOGGLE_RX |
4596 MR_NP_RX);
4597 if (ap->rxconfig & ANEG_CFG_FD)
4598 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4599 if (ap->rxconfig & ANEG_CFG_HD)
4600 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4601 if (ap->rxconfig & ANEG_CFG_PS1)
4602 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4603 if (ap->rxconfig & ANEG_CFG_PS2)
4604 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4605 if (ap->rxconfig & ANEG_CFG_RF1)
4606 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4607 if (ap->rxconfig & ANEG_CFG_RF2)
4608 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4609 if (ap->rxconfig & ANEG_CFG_NP)
4610 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4612 ap->link_time = ap->cur_time;
4614 ap->flags ^= (MR_TOGGLE_TX);
4615 if (ap->rxconfig & 0x0008)
4616 ap->flags |= MR_TOGGLE_RX;
4617 if (ap->rxconfig & ANEG_CFG_NP)
4618 ap->flags |= MR_NP_RX;
4619 ap->flags |= MR_PAGE_RX;
4621 ap->state = ANEG_STATE_COMPLETE_ACK;
4622 ret = ANEG_TIMER_ENAB;
4623 break;
4625 case ANEG_STATE_COMPLETE_ACK:
4626 if (ap->ability_match != 0 &&
4627 ap->rxconfig == 0) {
4628 ap->state = ANEG_STATE_AN_ENABLE;
4629 break;
4631 delta = ap->cur_time - ap->link_time;
4632 if (delta > ANEG_STATE_SETTLE_TIME) {
4633 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4634 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4635 } else {
4636 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4637 !(ap->flags & MR_NP_RX)) {
4638 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4639 } else {
4640 ret = ANEG_FAILED;
4644 break;
4646 case ANEG_STATE_IDLE_DETECT_INIT:
4647 ap->link_time = ap->cur_time;
4648 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4649 tw32_f(MAC_MODE, tp->mac_mode);
4650 udelay(40);
4652 ap->state = ANEG_STATE_IDLE_DETECT;
4653 ret = ANEG_TIMER_ENAB;
4654 break;
4656 case ANEG_STATE_IDLE_DETECT:
4657 if (ap->ability_match != 0 &&
4658 ap->rxconfig == 0) {
4659 ap->state = ANEG_STATE_AN_ENABLE;
4660 break;
4662 delta = ap->cur_time - ap->link_time;
4663 if (delta > ANEG_STATE_SETTLE_TIME) {
4664 /* XXX another gem from the Broadcom driver :( */
4665 ap->state = ANEG_STATE_LINK_OK;
4667 break;
4669 case ANEG_STATE_LINK_OK:
4670 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4671 ret = ANEG_DONE;
4672 break;
4674 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4675 /* ??? unimplemented */
4676 break;
4678 case ANEG_STATE_NEXT_PAGE_WAIT:
4679 /* ??? unimplemented */
4680 break;
4682 default:
4683 ret = ANEG_FAILED;
4684 break;
4687 return ret;
4690 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4692 int res = 0;
4693 struct tg3_fiber_aneginfo aninfo;
4694 int status = ANEG_FAILED;
4695 unsigned int tick;
4696 u32 tmp;
4698 tw32_f(MAC_TX_AUTO_NEG, 0);
4700 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4701 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4702 udelay(40);
4704 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4705 udelay(40);
4707 memset(&aninfo, 0, sizeof(aninfo));
4708 aninfo.flags |= MR_AN_ENABLE;
4709 aninfo.state = ANEG_STATE_UNKNOWN;
4710 aninfo.cur_time = 0;
4711 tick = 0;
4712 while (++tick < 195000) {
4713 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4714 if (status == ANEG_DONE || status == ANEG_FAILED)
4715 break;
4717 udelay(1);
4720 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4721 tw32_f(MAC_MODE, tp->mac_mode);
4722 udelay(40);
4724 *txflags = aninfo.txconfig;
4725 *rxflags = aninfo.flags;
4727 if (status == ANEG_DONE &&
4728 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4729 MR_LP_ADV_FULL_DUPLEX)))
4730 res = 1;
4732 return res;
4735 static void tg3_init_bcm8002(struct tg3 *tp)
4737 u32 mac_status = tr32(MAC_STATUS);
4738 int i;
4740 /* Reset when initting first time or we have a link. */
4741 if (tg3_flag(tp, INIT_COMPLETE) &&
4742 !(mac_status & MAC_STATUS_PCS_SYNCED))
4743 return;
4745 /* Set PLL lock range. */
4746 tg3_writephy(tp, 0x16, 0x8007);
4748 /* SW reset */
4749 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4751 /* Wait for reset to complete. */
4752 /* XXX schedule_timeout() ... */
4753 for (i = 0; i < 500; i++)
4754 udelay(10);
4756 /* Config mode; select PMA/Ch 1 regs. */
4757 tg3_writephy(tp, 0x10, 0x8411);
4759 /* Enable auto-lock and comdet, select txclk for tx. */
4760 tg3_writephy(tp, 0x11, 0x0a10);
4762 tg3_writephy(tp, 0x18, 0x00a0);
4763 tg3_writephy(tp, 0x16, 0x41ff);
4765 /* Assert and deassert POR. */
4766 tg3_writephy(tp, 0x13, 0x0400);
4767 udelay(40);
4768 tg3_writephy(tp, 0x13, 0x0000);
4770 tg3_writephy(tp, 0x11, 0x0a50);
4771 udelay(40);
4772 tg3_writephy(tp, 0x11, 0x0a10);
4774 /* Wait for signal to stabilize */
4775 /* XXX schedule_timeout() ... */
4776 for (i = 0; i < 15000; i++)
4777 udelay(10);
4779 /* Deselect the channel register so we can read the PHYID
4780 * later.
4782 tg3_writephy(tp, 0x10, 0x8011);
4785 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4787 u16 flowctrl;
4788 u32 sg_dig_ctrl, sg_dig_status;
4789 u32 serdes_cfg, expected_sg_dig_ctrl;
4790 int workaround, port_a;
4791 int current_link_up;
4793 serdes_cfg = 0;
4794 expected_sg_dig_ctrl = 0;
4795 workaround = 0;
4796 port_a = 1;
4797 current_link_up = 0;
4799 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4800 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4801 workaround = 1;
4802 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4803 port_a = 0;
4805 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4806 /* preserve bits 20-23 for voltage regulator */
4807 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4810 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4812 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4813 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4814 if (workaround) {
4815 u32 val = serdes_cfg;
4817 if (port_a)
4818 val |= 0xc010000;
4819 else
4820 val |= 0x4010000;
4821 tw32_f(MAC_SERDES_CFG, val);
4824 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4826 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4827 tg3_setup_flow_control(tp, 0, 0);
4828 current_link_up = 1;
4830 goto out;
4833 /* Want auto-negotiation. */
4834 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4836 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4837 if (flowctrl & ADVERTISE_1000XPAUSE)
4838 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4839 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4840 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4842 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4843 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4844 tp->serdes_counter &&
4845 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4846 MAC_STATUS_RCVD_CFG)) ==
4847 MAC_STATUS_PCS_SYNCED)) {
4848 tp->serdes_counter--;
4849 current_link_up = 1;
4850 goto out;
4852 restart_autoneg:
4853 if (workaround)
4854 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4855 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4856 udelay(5);
4857 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4859 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4860 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4861 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4862 MAC_STATUS_SIGNAL_DET)) {
4863 sg_dig_status = tr32(SG_DIG_STATUS);
4864 mac_status = tr32(MAC_STATUS);
4866 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4867 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4868 u32 local_adv = 0, remote_adv = 0;
4870 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4871 local_adv |= ADVERTISE_1000XPAUSE;
4872 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4873 local_adv |= ADVERTISE_1000XPSE_ASYM;
4875 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4876 remote_adv |= LPA_1000XPAUSE;
4877 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4878 remote_adv |= LPA_1000XPAUSE_ASYM;
4880 tp->link_config.rmt_adv =
4881 mii_adv_to_ethtool_adv_x(remote_adv);
4883 tg3_setup_flow_control(tp, local_adv, remote_adv);
4884 current_link_up = 1;
4885 tp->serdes_counter = 0;
4886 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4887 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4888 if (tp->serdes_counter)
4889 tp->serdes_counter--;
4890 else {
4891 if (workaround) {
4892 u32 val = serdes_cfg;
4894 if (port_a)
4895 val |= 0xc010000;
4896 else
4897 val |= 0x4010000;
4899 tw32_f(MAC_SERDES_CFG, val);
4902 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4903 udelay(40);
4905 /* Link parallel detection - link is up */
4906 /* only if we have PCS_SYNC and not */
4907 /* receiving config code words */
4908 mac_status = tr32(MAC_STATUS);
4909 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4910 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4911 tg3_setup_flow_control(tp, 0, 0);
4912 current_link_up = 1;
4913 tp->phy_flags |=
4914 TG3_PHYFLG_PARALLEL_DETECT;
4915 tp->serdes_counter =
4916 SERDES_PARALLEL_DET_TIMEOUT;
4917 } else
4918 goto restart_autoneg;
4921 } else {
4922 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4923 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4926 out:
4927 return current_link_up;
4930 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4932 int current_link_up = 0;
4934 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4935 goto out;
4937 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4938 u32 txflags, rxflags;
4939 int i;
4941 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4942 u32 local_adv = 0, remote_adv = 0;
4944 if (txflags & ANEG_CFG_PS1)
4945 local_adv |= ADVERTISE_1000XPAUSE;
4946 if (txflags & ANEG_CFG_PS2)
4947 local_adv |= ADVERTISE_1000XPSE_ASYM;
4949 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4950 remote_adv |= LPA_1000XPAUSE;
4951 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4952 remote_adv |= LPA_1000XPAUSE_ASYM;
4954 tp->link_config.rmt_adv =
4955 mii_adv_to_ethtool_adv_x(remote_adv);
4957 tg3_setup_flow_control(tp, local_adv, remote_adv);
4959 current_link_up = 1;
4961 for (i = 0; i < 30; i++) {
4962 udelay(20);
4963 tw32_f(MAC_STATUS,
4964 (MAC_STATUS_SYNC_CHANGED |
4965 MAC_STATUS_CFG_CHANGED));
4966 udelay(40);
4967 if ((tr32(MAC_STATUS) &
4968 (MAC_STATUS_SYNC_CHANGED |
4969 MAC_STATUS_CFG_CHANGED)) == 0)
4970 break;
4973 mac_status = tr32(MAC_STATUS);
4974 if (current_link_up == 0 &&
4975 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4976 !(mac_status & MAC_STATUS_RCVD_CFG))
4977 current_link_up = 1;
4978 } else {
4979 tg3_setup_flow_control(tp, 0, 0);
4981 /* Forcing 1000FD link up. */
4982 current_link_up = 1;
4984 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4985 udelay(40);
4987 tw32_f(MAC_MODE, tp->mac_mode);
4988 udelay(40);
4991 out:
4992 return current_link_up;
4995 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4997 u32 orig_pause_cfg;
4998 u16 orig_active_speed;
4999 u8 orig_active_duplex;
5000 u32 mac_status;
5001 int current_link_up;
5002 int i;
5004 orig_pause_cfg = tp->link_config.active_flowctrl;
5005 orig_active_speed = tp->link_config.active_speed;
5006 orig_active_duplex = tp->link_config.active_duplex;
5008 if (!tg3_flag(tp, HW_AUTONEG) &&
5009 netif_carrier_ok(tp->dev) &&
5010 tg3_flag(tp, INIT_COMPLETE)) {
5011 mac_status = tr32(MAC_STATUS);
5012 mac_status &= (MAC_STATUS_PCS_SYNCED |
5013 MAC_STATUS_SIGNAL_DET |
5014 MAC_STATUS_CFG_CHANGED |
5015 MAC_STATUS_RCVD_CFG);
5016 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5017 MAC_STATUS_SIGNAL_DET)) {
5018 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5019 MAC_STATUS_CFG_CHANGED));
5020 return 0;
5024 tw32_f(MAC_TX_AUTO_NEG, 0);
5026 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5027 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5028 tw32_f(MAC_MODE, tp->mac_mode);
5029 udelay(40);
5031 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5032 tg3_init_bcm8002(tp);
5034 /* Enable link change event even when serdes polling. */
5035 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5036 udelay(40);
5038 current_link_up = 0;
5039 tp->link_config.rmt_adv = 0;
5040 mac_status = tr32(MAC_STATUS);
5042 if (tg3_flag(tp, HW_AUTONEG))
5043 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5044 else
5045 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5047 tp->napi[0].hw_status->status =
5048 (SD_STATUS_UPDATED |
5049 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5051 for (i = 0; i < 100; i++) {
5052 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5053 MAC_STATUS_CFG_CHANGED));
5054 udelay(5);
5055 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5056 MAC_STATUS_CFG_CHANGED |
5057 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5058 break;
5061 mac_status = tr32(MAC_STATUS);
5062 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5063 current_link_up = 0;
5064 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5065 tp->serdes_counter == 0) {
5066 tw32_f(MAC_MODE, (tp->mac_mode |
5067 MAC_MODE_SEND_CONFIGS));
5068 udelay(1);
5069 tw32_f(MAC_MODE, tp->mac_mode);
5073 if (current_link_up == 1) {
5074 tp->link_config.active_speed = SPEED_1000;
5075 tp->link_config.active_duplex = DUPLEX_FULL;
5076 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5077 LED_CTRL_LNKLED_OVERRIDE |
5078 LED_CTRL_1000MBPS_ON));
5079 } else {
5080 tp->link_config.active_speed = SPEED_INVALID;
5081 tp->link_config.active_duplex = DUPLEX_INVALID;
5082 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5083 LED_CTRL_LNKLED_OVERRIDE |
5084 LED_CTRL_TRAFFIC_OVERRIDE));
5087 if (current_link_up != netif_carrier_ok(tp->dev)) {
5088 if (current_link_up)
5089 netif_carrier_on(tp->dev);
5090 else
5091 netif_carrier_off(tp->dev);
5092 tg3_link_report(tp);
5093 } else {
5094 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5095 if (orig_pause_cfg != now_pause_cfg ||
5096 orig_active_speed != tp->link_config.active_speed ||
5097 orig_active_duplex != tp->link_config.active_duplex)
5098 tg3_link_report(tp);
5101 return 0;
5104 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5106 int current_link_up, err = 0;
5107 u32 bmsr, bmcr;
5108 u16 current_speed;
5109 u8 current_duplex;
5110 u32 local_adv, remote_adv;
5112 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5113 tw32_f(MAC_MODE, tp->mac_mode);
5114 udelay(40);
5116 tw32(MAC_EVENT, 0);
5118 tw32_f(MAC_STATUS,
5119 (MAC_STATUS_SYNC_CHANGED |
5120 MAC_STATUS_CFG_CHANGED |
5121 MAC_STATUS_MI_COMPLETION |
5122 MAC_STATUS_LNKSTATE_CHANGED));
5123 udelay(40);
5125 if (force_reset)
5126 tg3_phy_reset(tp);
5128 current_link_up = 0;
5129 current_speed = SPEED_INVALID;
5130 current_duplex = DUPLEX_INVALID;
5131 tp->link_config.rmt_adv = 0;
5133 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5134 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5136 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5137 bmsr |= BMSR_LSTATUS;
5138 else
5139 bmsr &= ~BMSR_LSTATUS;
5142 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5144 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5145 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5146 /* do nothing, just check for link up at the end */
5147 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5148 u32 adv, newadv;
5150 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5151 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5152 ADVERTISE_1000XPAUSE |
5153 ADVERTISE_1000XPSE_ASYM |
5154 ADVERTISE_SLCT);
5156 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5157 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5159 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5160 tg3_writephy(tp, MII_ADVERTISE, newadv);
5161 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5162 tg3_writephy(tp, MII_BMCR, bmcr);
5164 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5165 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5166 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5168 return err;
5170 } else {
5171 u32 new_bmcr;
5173 bmcr &= ~BMCR_SPEED1000;
5174 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5176 if (tp->link_config.duplex == DUPLEX_FULL)
5177 new_bmcr |= BMCR_FULLDPLX;
5179 if (new_bmcr != bmcr) {
5180 /* BMCR_SPEED1000 is a reserved bit that needs
5181 * to be set on write.
5183 new_bmcr |= BMCR_SPEED1000;
5185 /* Force a linkdown */
5186 if (netif_carrier_ok(tp->dev)) {
5187 u32 adv;
5189 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5190 adv &= ~(ADVERTISE_1000XFULL |
5191 ADVERTISE_1000XHALF |
5192 ADVERTISE_SLCT);
5193 tg3_writephy(tp, MII_ADVERTISE, adv);
5194 tg3_writephy(tp, MII_BMCR, bmcr |
5195 BMCR_ANRESTART |
5196 BMCR_ANENABLE);
5197 udelay(10);
5198 netif_carrier_off(tp->dev);
5200 tg3_writephy(tp, MII_BMCR, new_bmcr);
5201 bmcr = new_bmcr;
5202 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5203 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5204 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5205 ASIC_REV_5714) {
5206 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5207 bmsr |= BMSR_LSTATUS;
5208 else
5209 bmsr &= ~BMSR_LSTATUS;
5211 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5215 if (bmsr & BMSR_LSTATUS) {
5216 current_speed = SPEED_1000;
5217 current_link_up = 1;
5218 if (bmcr & BMCR_FULLDPLX)
5219 current_duplex = DUPLEX_FULL;
5220 else
5221 current_duplex = DUPLEX_HALF;
5223 local_adv = 0;
5224 remote_adv = 0;
5226 if (bmcr & BMCR_ANENABLE) {
5227 u32 common;
5229 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5230 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5231 common = local_adv & remote_adv;
5232 if (common & (ADVERTISE_1000XHALF |
5233 ADVERTISE_1000XFULL)) {
5234 if (common & ADVERTISE_1000XFULL)
5235 current_duplex = DUPLEX_FULL;
5236 else
5237 current_duplex = DUPLEX_HALF;
5239 tp->link_config.rmt_adv =
5240 mii_adv_to_ethtool_adv_x(remote_adv);
5241 } else if (!tg3_flag(tp, 5780_CLASS)) {
5242 /* Link is up via parallel detect */
5243 } else {
5244 current_link_up = 0;
5249 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5250 tg3_setup_flow_control(tp, local_adv, remote_adv);
5252 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5253 if (tp->link_config.active_duplex == DUPLEX_HALF)
5254 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5256 tw32_f(MAC_MODE, tp->mac_mode);
5257 udelay(40);
5259 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5261 tp->link_config.active_speed = current_speed;
5262 tp->link_config.active_duplex = current_duplex;
5264 if (current_link_up != netif_carrier_ok(tp->dev)) {
5265 if (current_link_up)
5266 netif_carrier_on(tp->dev);
5267 else {
5268 netif_carrier_off(tp->dev);
5269 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5271 tg3_link_report(tp);
5273 return err;
5276 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5278 if (tp->serdes_counter) {
5279 /* Give autoneg time to complete. */
5280 tp->serdes_counter--;
5281 return;
5284 if (!netif_carrier_ok(tp->dev) &&
5285 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5286 u32 bmcr;
5288 tg3_readphy(tp, MII_BMCR, &bmcr);
5289 if (bmcr & BMCR_ANENABLE) {
5290 u32 phy1, phy2;
5292 /* Select shadow register 0x1f */
5293 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5294 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5296 /* Select expansion interrupt status register */
5297 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5298 MII_TG3_DSP_EXP1_INT_STAT);
5299 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5300 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5302 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5303 /* We have signal detect and not receiving
5304 * config code words, link is up by parallel
5305 * detection.
5308 bmcr &= ~BMCR_ANENABLE;
5309 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5310 tg3_writephy(tp, MII_BMCR, bmcr);
5311 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5314 } else if (netif_carrier_ok(tp->dev) &&
5315 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5316 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5317 u32 phy2;
5319 /* Select expansion interrupt status register */
5320 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5321 MII_TG3_DSP_EXP1_INT_STAT);
5322 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5323 if (phy2 & 0x20) {
5324 u32 bmcr;
5326 /* Config code words received, turn on autoneg. */
5327 tg3_readphy(tp, MII_BMCR, &bmcr);
5328 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5330 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5336 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5338 u32 val;
5339 int err;
5341 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5342 err = tg3_setup_fiber_phy(tp, force_reset);
5343 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5344 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5345 else
5346 err = tg3_setup_copper_phy(tp, force_reset);
5348 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5349 u32 scale;
5351 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5352 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5353 scale = 65;
5354 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5355 scale = 6;
5356 else
5357 scale = 12;
5359 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5360 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5361 tw32(GRC_MISC_CFG, val);
5364 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5365 (6 << TX_LENGTHS_IPG_SHIFT);
5366 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5367 val |= tr32(MAC_TX_LENGTHS) &
5368 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5369 TX_LENGTHS_CNT_DWN_VAL_MSK);
5371 if (tp->link_config.active_speed == SPEED_1000 &&
5372 tp->link_config.active_duplex == DUPLEX_HALF)
5373 tw32(MAC_TX_LENGTHS, val |
5374 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5375 else
5376 tw32(MAC_TX_LENGTHS, val |
5377 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5379 if (!tg3_flag(tp, 5705_PLUS)) {
5380 if (netif_carrier_ok(tp->dev)) {
5381 tw32(HOSTCC_STAT_COAL_TICKS,
5382 tp->coal.stats_block_coalesce_usecs);
5383 } else {
5384 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5388 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5389 val = tr32(PCIE_PWR_MGMT_THRESH);
5390 if (!netif_carrier_ok(tp->dev))
5391 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5392 tp->pwrmgmt_thresh;
5393 else
5394 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5395 tw32(PCIE_PWR_MGMT_THRESH, val);
5398 return err;
5401 static inline int tg3_irq_sync(struct tg3 *tp)
5403 return tp->irq_sync;
5406 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5408 int i;
5410 dst = (u32 *)((u8 *)dst + off);
5411 for (i = 0; i < len; i += sizeof(u32))
5412 *dst++ = tr32(off + i);
5415 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5417 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5418 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5419 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5420 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5421 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5422 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5423 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5424 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5425 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5426 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5427 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5428 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5429 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5430 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5431 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5432 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5433 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5434 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5435 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5437 if (tg3_flag(tp, SUPPORT_MSIX))
5438 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5440 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5441 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5442 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5443 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5444 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5445 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5446 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5447 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5449 if (!tg3_flag(tp, 5705_PLUS)) {
5450 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5451 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5452 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5455 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5456 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5457 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5458 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5459 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5461 if (tg3_flag(tp, NVRAM))
5462 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5465 static void tg3_dump_state(struct tg3 *tp)
5467 int i;
5468 u32 *regs;
5470 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5471 if (!regs) {
5472 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5473 return;
5476 if (tg3_flag(tp, PCI_EXPRESS)) {
5477 /* Read up to but not including private PCI registers */
5478 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5479 regs[i / sizeof(u32)] = tr32(i);
5480 } else
5481 tg3_dump_legacy_regs(tp, regs);
5483 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5484 if (!regs[i + 0] && !regs[i + 1] &&
5485 !regs[i + 2] && !regs[i + 3])
5486 continue;
5488 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5489 i * 4,
5490 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5493 kfree(regs);
5495 for (i = 0; i < tp->irq_cnt; i++) {
5496 struct tg3_napi *tnapi = &tp->napi[i];
5498 /* SW status block */
5499 netdev_err(tp->dev,
5500 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5502 tnapi->hw_status->status,
5503 tnapi->hw_status->status_tag,
5504 tnapi->hw_status->rx_jumbo_consumer,
5505 tnapi->hw_status->rx_consumer,
5506 tnapi->hw_status->rx_mini_consumer,
5507 tnapi->hw_status->idx[0].rx_producer,
5508 tnapi->hw_status->idx[0].tx_consumer);
5510 netdev_err(tp->dev,
5511 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5513 tnapi->last_tag, tnapi->last_irq_tag,
5514 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5515 tnapi->rx_rcb_ptr,
5516 tnapi->prodring.rx_std_prod_idx,
5517 tnapi->prodring.rx_std_cons_idx,
5518 tnapi->prodring.rx_jmb_prod_idx,
5519 tnapi->prodring.rx_jmb_cons_idx);
5523 /* This is called whenever we suspect that the system chipset is re-
5524 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5525 * is bogus tx completions. We try to recover by setting the
5526 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5527 * in the workqueue.
5529 static void tg3_tx_recover(struct tg3 *tp)
5531 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5532 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5534 netdev_warn(tp->dev,
5535 "The system may be re-ordering memory-mapped I/O "
5536 "cycles to the network device, attempting to recover. "
5537 "Please report the problem to the driver maintainer "
5538 "and include system chipset information.\n");
5540 spin_lock(&tp->lock);
5541 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5542 spin_unlock(&tp->lock);
5545 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5547 /* Tell compiler to fetch tx indices from memory. */
5548 barrier();
5549 return tnapi->tx_pending -
5550 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5553 /* Tigon3 never reports partial packet sends. So we do not
5554 * need special logic to handle SKBs that have not had all
5555 * of their frags sent yet, like SunGEM does.
5557 static void tg3_tx(struct tg3_napi *tnapi)
5559 struct tg3 *tp = tnapi->tp;
5560 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5561 u32 sw_idx = tnapi->tx_cons;
5562 struct netdev_queue *txq;
5563 int index = tnapi - tp->napi;
5564 unsigned int pkts_compl = 0, bytes_compl = 0;
5566 if (tg3_flag(tp, ENABLE_TSS))
5567 index--;
5569 txq = netdev_get_tx_queue(tp->dev, index);
5571 while (sw_idx != hw_idx) {
5572 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5573 struct sk_buff *skb = ri->skb;
5574 int i, tx_bug = 0;
5576 if (unlikely(skb == NULL)) {
5577 tg3_tx_recover(tp);
5578 return;
5581 pci_unmap_single(tp->pdev,
5582 dma_unmap_addr(ri, mapping),
5583 skb_headlen(skb),
5584 PCI_DMA_TODEVICE);
5586 ri->skb = NULL;
5588 while (ri->fragmented) {
5589 ri->fragmented = false;
5590 sw_idx = NEXT_TX(sw_idx);
5591 ri = &tnapi->tx_buffers[sw_idx];
5594 sw_idx = NEXT_TX(sw_idx);
5596 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5597 ri = &tnapi->tx_buffers[sw_idx];
5598 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5599 tx_bug = 1;
5601 pci_unmap_page(tp->pdev,
5602 dma_unmap_addr(ri, mapping),
5603 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5604 PCI_DMA_TODEVICE);
5606 while (ri->fragmented) {
5607 ri->fragmented = false;
5608 sw_idx = NEXT_TX(sw_idx);
5609 ri = &tnapi->tx_buffers[sw_idx];
5612 sw_idx = NEXT_TX(sw_idx);
5615 pkts_compl++;
5616 bytes_compl += skb->len;
5618 dev_kfree_skb(skb);
5620 if (unlikely(tx_bug)) {
5621 tg3_tx_recover(tp);
5622 return;
5626 netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5628 tnapi->tx_cons = sw_idx;
5630 /* Need to make the tx_cons update visible to tg3_start_xmit()
5631 * before checking for netif_queue_stopped(). Without the
5632 * memory barrier, there is a small possibility that tg3_start_xmit()
5633 * will miss it and cause the queue to be stopped forever.
5635 smp_mb();
5637 if (unlikely(netif_tx_queue_stopped(txq) &&
5638 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5639 __netif_tx_lock(txq, smp_processor_id());
5640 if (netif_tx_queue_stopped(txq) &&
5641 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5642 netif_tx_wake_queue(txq);
5643 __netif_tx_unlock(txq);
5647 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5649 if (!ri->data)
5650 return;
5652 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5653 map_sz, PCI_DMA_FROMDEVICE);
5654 kfree(ri->data);
5655 ri->data = NULL;
5658 /* Returns size of skb allocated or < 0 on error.
5660 * We only need to fill in the address because the other members
5661 * of the RX descriptor are invariant, see tg3_init_rings.
5663 * Note the purposeful assymetry of cpu vs. chip accesses. For
5664 * posting buffers we only dirty the first cache line of the RX
5665 * descriptor (containing the address). Whereas for the RX status
5666 * buffers the cpu only reads the last cacheline of the RX descriptor
5667 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5669 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5670 u32 opaque_key, u32 dest_idx_unmasked)
5672 struct tg3_rx_buffer_desc *desc;
5673 struct ring_info *map;
5674 u8 *data;
5675 dma_addr_t mapping;
5676 int skb_size, data_size, dest_idx;
5678 switch (opaque_key) {
5679 case RXD_OPAQUE_RING_STD:
5680 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5681 desc = &tpr->rx_std[dest_idx];
5682 map = &tpr->rx_std_buffers[dest_idx];
5683 data_size = tp->rx_pkt_map_sz;
5684 break;
5686 case RXD_OPAQUE_RING_JUMBO:
5687 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5688 desc = &tpr->rx_jmb[dest_idx].std;
5689 map = &tpr->rx_jmb_buffers[dest_idx];
5690 data_size = TG3_RX_JMB_MAP_SZ;
5691 break;
5693 default:
5694 return -EINVAL;
5697 /* Do not overwrite any of the map or rp information
5698 * until we are sure we can commit to a new buffer.
5700 * Callers depend upon this behavior and assume that
5701 * we leave everything unchanged if we fail.
5703 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5704 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5705 data = kmalloc(skb_size, GFP_ATOMIC);
5706 if (!data)
5707 return -ENOMEM;
5709 mapping = pci_map_single(tp->pdev,
5710 data + TG3_RX_OFFSET(tp),
5711 data_size,
5712 PCI_DMA_FROMDEVICE);
5713 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5714 kfree(data);
5715 return -EIO;
5718 map->data = data;
5719 dma_unmap_addr_set(map, mapping, mapping);
5721 desc->addr_hi = ((u64)mapping >> 32);
5722 desc->addr_lo = ((u64)mapping & 0xffffffff);
5724 return data_size;
5727 /* We only need to move over in the address because the other
5728 * members of the RX descriptor are invariant. See notes above
5729 * tg3_alloc_rx_data for full details.
5731 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5732 struct tg3_rx_prodring_set *dpr,
5733 u32 opaque_key, int src_idx,
5734 u32 dest_idx_unmasked)
5736 struct tg3 *tp = tnapi->tp;
5737 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5738 struct ring_info *src_map, *dest_map;
5739 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5740 int dest_idx;
5742 switch (opaque_key) {
5743 case RXD_OPAQUE_RING_STD:
5744 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5745 dest_desc = &dpr->rx_std[dest_idx];
5746 dest_map = &dpr->rx_std_buffers[dest_idx];
5747 src_desc = &spr->rx_std[src_idx];
5748 src_map = &spr->rx_std_buffers[src_idx];
5749 break;
5751 case RXD_OPAQUE_RING_JUMBO:
5752 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5753 dest_desc = &dpr->rx_jmb[dest_idx].std;
5754 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5755 src_desc = &spr->rx_jmb[src_idx].std;
5756 src_map = &spr->rx_jmb_buffers[src_idx];
5757 break;
5759 default:
5760 return;
5763 dest_map->data = src_map->data;
5764 dma_unmap_addr_set(dest_map, mapping,
5765 dma_unmap_addr(src_map, mapping));
5766 dest_desc->addr_hi = src_desc->addr_hi;
5767 dest_desc->addr_lo = src_desc->addr_lo;
5769 /* Ensure that the update to the skb happens after the physical
5770 * addresses have been transferred to the new BD location.
5772 smp_wmb();
5774 src_map->data = NULL;
5777 /* The RX ring scheme is composed of multiple rings which post fresh
5778 * buffers to the chip, and one special ring the chip uses to report
5779 * status back to the host.
5781 * The special ring reports the status of received packets to the
5782 * host. The chip does not write into the original descriptor the
5783 * RX buffer was obtained from. The chip simply takes the original
5784 * descriptor as provided by the host, updates the status and length
5785 * field, then writes this into the next status ring entry.
5787 * Each ring the host uses to post buffers to the chip is described
5788 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5789 * it is first placed into the on-chip ram. When the packet's length
5790 * is known, it walks down the TG3_BDINFO entries to select the ring.
5791 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5792 * which is within the range of the new packet's length is chosen.
5794 * The "separate ring for rx status" scheme may sound queer, but it makes
5795 * sense from a cache coherency perspective. If only the host writes
5796 * to the buffer post rings, and only the chip writes to the rx status
5797 * rings, then cache lines never move beyond shared-modified state.
5798 * If both the host and chip were to write into the same ring, cache line
5799 * eviction could occur since both entities want it in an exclusive state.
5801 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5803 struct tg3 *tp = tnapi->tp;
5804 u32 work_mask, rx_std_posted = 0;
5805 u32 std_prod_idx, jmb_prod_idx;
5806 u32 sw_idx = tnapi->rx_rcb_ptr;
5807 u16 hw_idx;
5808 int received;
5809 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5811 hw_idx = *(tnapi->rx_rcb_prod_idx);
5813 * We need to order the read of hw_idx and the read of
5814 * the opaque cookie.
5816 rmb();
5817 work_mask = 0;
5818 received = 0;
5819 std_prod_idx = tpr->rx_std_prod_idx;
5820 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5821 while (sw_idx != hw_idx && budget > 0) {
5822 struct ring_info *ri;
5823 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5824 unsigned int len;
5825 struct sk_buff *skb;
5826 dma_addr_t dma_addr;
5827 u32 opaque_key, desc_idx, *post_ptr;
5828 u8 *data;
5830 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5831 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5832 if (opaque_key == RXD_OPAQUE_RING_STD) {
5833 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5834 dma_addr = dma_unmap_addr(ri, mapping);
5835 data = ri->data;
5836 post_ptr = &std_prod_idx;
5837 rx_std_posted++;
5838 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5839 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5840 dma_addr = dma_unmap_addr(ri, mapping);
5841 data = ri->data;
5842 post_ptr = &jmb_prod_idx;
5843 } else
5844 goto next_pkt_nopost;
5846 work_mask |= opaque_key;
5848 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5849 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5850 drop_it:
5851 tg3_recycle_rx(tnapi, tpr, opaque_key,
5852 desc_idx, *post_ptr);
5853 drop_it_no_recycle:
5854 /* Other statistics kept track of by card. */
5855 tp->rx_dropped++;
5856 goto next_pkt;
5859 prefetch(data + TG3_RX_OFFSET(tp));
5860 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5861 ETH_FCS_LEN;
5863 if (len > TG3_RX_COPY_THRESH(tp)) {
5864 int skb_size;
5866 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5867 *post_ptr);
5868 if (skb_size < 0)
5869 goto drop_it;
5871 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5872 PCI_DMA_FROMDEVICE);
5874 skb = build_skb(data);
5875 if (!skb) {
5876 kfree(data);
5877 goto drop_it_no_recycle;
5879 skb_reserve(skb, TG3_RX_OFFSET(tp));
5880 /* Ensure that the update to the data happens
5881 * after the usage of the old DMA mapping.
5883 smp_wmb();
5885 ri->data = NULL;
5887 } else {
5888 tg3_recycle_rx(tnapi, tpr, opaque_key,
5889 desc_idx, *post_ptr);
5891 skb = netdev_alloc_skb(tp->dev,
5892 len + TG3_RAW_IP_ALIGN);
5893 if (skb == NULL)
5894 goto drop_it_no_recycle;
5896 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5897 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5898 memcpy(skb->data,
5899 data + TG3_RX_OFFSET(tp),
5900 len);
5901 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5904 skb_put(skb, len);
5905 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5906 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5907 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5908 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5909 skb->ip_summed = CHECKSUM_UNNECESSARY;
5910 else
5911 skb_checksum_none_assert(skb);
5913 skb->protocol = eth_type_trans(skb, tp->dev);
5915 if (len > (tp->dev->mtu + ETH_HLEN) &&
5916 skb->protocol != htons(ETH_P_8021Q)) {
5917 dev_kfree_skb(skb);
5918 goto drop_it_no_recycle;
5921 if (desc->type_flags & RXD_FLAG_VLAN &&
5922 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5923 __vlan_hwaccel_put_tag(skb,
5924 desc->err_vlan & RXD_VLAN_MASK);
5926 napi_gro_receive(&tnapi->napi, skb);
5928 received++;
5929 budget--;
5931 next_pkt:
5932 (*post_ptr)++;
5934 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5935 tpr->rx_std_prod_idx = std_prod_idx &
5936 tp->rx_std_ring_mask;
5937 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5938 tpr->rx_std_prod_idx);
5939 work_mask &= ~RXD_OPAQUE_RING_STD;
5940 rx_std_posted = 0;
5942 next_pkt_nopost:
5943 sw_idx++;
5944 sw_idx &= tp->rx_ret_ring_mask;
5946 /* Refresh hw_idx to see if there is new work */
5947 if (sw_idx == hw_idx) {
5948 hw_idx = *(tnapi->rx_rcb_prod_idx);
5949 rmb();
5953 /* ACK the status ring. */
5954 tnapi->rx_rcb_ptr = sw_idx;
5955 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5957 /* Refill RX ring(s). */
5958 if (!tg3_flag(tp, ENABLE_RSS)) {
5959 if (work_mask & RXD_OPAQUE_RING_STD) {
5960 tpr->rx_std_prod_idx = std_prod_idx &
5961 tp->rx_std_ring_mask;
5962 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5963 tpr->rx_std_prod_idx);
5965 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5966 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5967 tp->rx_jmb_ring_mask;
5968 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5969 tpr->rx_jmb_prod_idx);
5971 mmiowb();
5972 } else if (work_mask) {
5973 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5974 * updated before the producer indices can be updated.
5976 smp_wmb();
5978 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5979 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5981 if (tnapi != &tp->napi[1])
5982 napi_schedule(&tp->napi[1].napi);
5985 return received;
5988 static void tg3_poll_link(struct tg3 *tp)
5990 /* handle link change and other phy events */
5991 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5992 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5994 if (sblk->status & SD_STATUS_LINK_CHG) {
5995 sblk->status = SD_STATUS_UPDATED |
5996 (sblk->status & ~SD_STATUS_LINK_CHG);
5997 spin_lock(&tp->lock);
5998 if (tg3_flag(tp, USE_PHYLIB)) {
5999 tw32_f(MAC_STATUS,
6000 (MAC_STATUS_SYNC_CHANGED |
6001 MAC_STATUS_CFG_CHANGED |
6002 MAC_STATUS_MI_COMPLETION |
6003 MAC_STATUS_LNKSTATE_CHANGED));
6004 udelay(40);
6005 } else
6006 tg3_setup_phy(tp, 0);
6007 spin_unlock(&tp->lock);
6012 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6013 struct tg3_rx_prodring_set *dpr,
6014 struct tg3_rx_prodring_set *spr)
6016 u32 si, di, cpycnt, src_prod_idx;
6017 int i, err = 0;
6019 while (1) {
6020 src_prod_idx = spr->rx_std_prod_idx;
6022 /* Make sure updates to the rx_std_buffers[] entries and the
6023 * standard producer index are seen in the correct order.
6025 smp_rmb();
6027 if (spr->rx_std_cons_idx == src_prod_idx)
6028 break;
6030 if (spr->rx_std_cons_idx < src_prod_idx)
6031 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6032 else
6033 cpycnt = tp->rx_std_ring_mask + 1 -
6034 spr->rx_std_cons_idx;
6036 cpycnt = min(cpycnt,
6037 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6039 si = spr->rx_std_cons_idx;
6040 di = dpr->rx_std_prod_idx;
6042 for (i = di; i < di + cpycnt; i++) {
6043 if (dpr->rx_std_buffers[i].data) {
6044 cpycnt = i - di;
6045 err = -ENOSPC;
6046 break;
6050 if (!cpycnt)
6051 break;
6053 /* Ensure that updates to the rx_std_buffers ring and the
6054 * shadowed hardware producer ring from tg3_recycle_skb() are
6055 * ordered correctly WRT the skb check above.
6057 smp_rmb();
6059 memcpy(&dpr->rx_std_buffers[di],
6060 &spr->rx_std_buffers[si],
6061 cpycnt * sizeof(struct ring_info));
6063 for (i = 0; i < cpycnt; i++, di++, si++) {
6064 struct tg3_rx_buffer_desc *sbd, *dbd;
6065 sbd = &spr->rx_std[si];
6066 dbd = &dpr->rx_std[di];
6067 dbd->addr_hi = sbd->addr_hi;
6068 dbd->addr_lo = sbd->addr_lo;
6071 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6072 tp->rx_std_ring_mask;
6073 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6074 tp->rx_std_ring_mask;
6077 while (1) {
6078 src_prod_idx = spr->rx_jmb_prod_idx;
6080 /* Make sure updates to the rx_jmb_buffers[] entries and
6081 * the jumbo producer index are seen in the correct order.
6083 smp_rmb();
6085 if (spr->rx_jmb_cons_idx == src_prod_idx)
6086 break;
6088 if (spr->rx_jmb_cons_idx < src_prod_idx)
6089 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6090 else
6091 cpycnt = tp->rx_jmb_ring_mask + 1 -
6092 spr->rx_jmb_cons_idx;
6094 cpycnt = min(cpycnt,
6095 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6097 si = spr->rx_jmb_cons_idx;
6098 di = dpr->rx_jmb_prod_idx;
6100 for (i = di; i < di + cpycnt; i++) {
6101 if (dpr->rx_jmb_buffers[i].data) {
6102 cpycnt = i - di;
6103 err = -ENOSPC;
6104 break;
6108 if (!cpycnt)
6109 break;
6111 /* Ensure that updates to the rx_jmb_buffers ring and the
6112 * shadowed hardware producer ring from tg3_recycle_skb() are
6113 * ordered correctly WRT the skb check above.
6115 smp_rmb();
6117 memcpy(&dpr->rx_jmb_buffers[di],
6118 &spr->rx_jmb_buffers[si],
6119 cpycnt * sizeof(struct ring_info));
6121 for (i = 0; i < cpycnt; i++, di++, si++) {
6122 struct tg3_rx_buffer_desc *sbd, *dbd;
6123 sbd = &spr->rx_jmb[si].std;
6124 dbd = &dpr->rx_jmb[di].std;
6125 dbd->addr_hi = sbd->addr_hi;
6126 dbd->addr_lo = sbd->addr_lo;
6129 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6130 tp->rx_jmb_ring_mask;
6131 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6132 tp->rx_jmb_ring_mask;
6135 return err;
6138 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6140 struct tg3 *tp = tnapi->tp;
6142 /* run TX completion thread */
6143 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6144 tg3_tx(tnapi);
6145 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6146 return work_done;
6149 /* run RX thread, within the bounds set by NAPI.
6150 * All RX "locking" is done by ensuring outside
6151 * code synchronizes with tg3->napi.poll()
6153 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6154 work_done += tg3_rx(tnapi, budget - work_done);
6156 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6157 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6158 int i, err = 0;
6159 u32 std_prod_idx = dpr->rx_std_prod_idx;
6160 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6162 for (i = 1; i < tp->irq_cnt; i++)
6163 err |= tg3_rx_prodring_xfer(tp, dpr,
6164 &tp->napi[i].prodring);
6166 wmb();
6168 if (std_prod_idx != dpr->rx_std_prod_idx)
6169 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6170 dpr->rx_std_prod_idx);
6172 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6173 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6174 dpr->rx_jmb_prod_idx);
6176 mmiowb();
6178 if (err)
6179 tw32_f(HOSTCC_MODE, tp->coal_now);
6182 return work_done;
6185 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6187 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6188 schedule_work(&tp->reset_task);
6191 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6193 cancel_work_sync(&tp->reset_task);
6194 tg3_flag_clear(tp, RESET_TASK_PENDING);
6197 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6199 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6200 struct tg3 *tp = tnapi->tp;
6201 int work_done = 0;
6202 struct tg3_hw_status *sblk = tnapi->hw_status;
6204 while (1) {
6205 work_done = tg3_poll_work(tnapi, work_done, budget);
6207 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6208 goto tx_recovery;
6210 if (unlikely(work_done >= budget))
6211 break;
6213 /* tp->last_tag is used in tg3_int_reenable() below
6214 * to tell the hw how much work has been processed,
6215 * so we must read it before checking for more work.
6217 tnapi->last_tag = sblk->status_tag;
6218 tnapi->last_irq_tag = tnapi->last_tag;
6219 rmb();
6221 /* check for RX/TX work to do */
6222 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6223 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6224 napi_complete(napi);
6225 /* Reenable interrupts. */
6226 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6227 mmiowb();
6228 break;
6232 return work_done;
6234 tx_recovery:
6235 /* work_done is guaranteed to be less than budget. */
6236 napi_complete(napi);
6237 tg3_reset_task_schedule(tp);
6238 return work_done;
6241 static void tg3_process_error(struct tg3 *tp)
6243 u32 val;
6244 bool real_error = false;
6246 if (tg3_flag(tp, ERROR_PROCESSED))
6247 return;
6249 /* Check Flow Attention register */
6250 val = tr32(HOSTCC_FLOW_ATTN);
6251 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6252 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6253 real_error = true;
6256 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6257 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6258 real_error = true;
6261 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6262 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6263 real_error = true;
6266 if (!real_error)
6267 return;
6269 tg3_dump_state(tp);
6271 tg3_flag_set(tp, ERROR_PROCESSED);
6272 tg3_reset_task_schedule(tp);
6275 static int tg3_poll(struct napi_struct *napi, int budget)
6277 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6278 struct tg3 *tp = tnapi->tp;
6279 int work_done = 0;
6280 struct tg3_hw_status *sblk = tnapi->hw_status;
6282 while (1) {
6283 if (sblk->status & SD_STATUS_ERROR)
6284 tg3_process_error(tp);
6286 tg3_poll_link(tp);
6288 work_done = tg3_poll_work(tnapi, work_done, budget);
6290 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6291 goto tx_recovery;
6293 if (unlikely(work_done >= budget))
6294 break;
6296 if (tg3_flag(tp, TAGGED_STATUS)) {
6297 /* tp->last_tag is used in tg3_int_reenable() below
6298 * to tell the hw how much work has been processed,
6299 * so we must read it before checking for more work.
6301 tnapi->last_tag = sblk->status_tag;
6302 tnapi->last_irq_tag = tnapi->last_tag;
6303 rmb();
6304 } else
6305 sblk->status &= ~SD_STATUS_UPDATED;
6307 if (likely(!tg3_has_work(tnapi))) {
6308 napi_complete(napi);
6309 tg3_int_reenable(tnapi);
6310 break;
6314 return work_done;
6316 tx_recovery:
6317 /* work_done is guaranteed to be less than budget. */
6318 napi_complete(napi);
6319 tg3_reset_task_schedule(tp);
6320 return work_done;
6323 static void tg3_napi_disable(struct tg3 *tp)
6325 int i;
6327 for (i = tp->irq_cnt - 1; i >= 0; i--)
6328 napi_disable(&tp->napi[i].napi);
6331 static void tg3_napi_enable(struct tg3 *tp)
6333 int i;
6335 for (i = 0; i < tp->irq_cnt; i++)
6336 napi_enable(&tp->napi[i].napi);
6339 static void tg3_napi_init(struct tg3 *tp)
6341 int i;
6343 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6344 for (i = 1; i < tp->irq_cnt; i++)
6345 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6348 static void tg3_napi_fini(struct tg3 *tp)
6350 int i;
6352 for (i = 0; i < tp->irq_cnt; i++)
6353 netif_napi_del(&tp->napi[i].napi);
6356 static inline void tg3_netif_stop(struct tg3 *tp)
6358 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6359 tg3_napi_disable(tp);
6360 netif_tx_disable(tp->dev);
6363 static inline void tg3_netif_start(struct tg3 *tp)
6365 /* NOTE: unconditional netif_tx_wake_all_queues is only
6366 * appropriate so long as all callers are assured to
6367 * have free tx slots (such as after tg3_init_hw)
6369 netif_tx_wake_all_queues(tp->dev);
6371 tg3_napi_enable(tp);
6372 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6373 tg3_enable_ints(tp);
6376 static void tg3_irq_quiesce(struct tg3 *tp)
6378 int i;
6380 BUG_ON(tp->irq_sync);
6382 tp->irq_sync = 1;
6383 smp_mb();
6385 for (i = 0; i < tp->irq_cnt; i++)
6386 synchronize_irq(tp->napi[i].irq_vec);
6389 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6390 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6391 * with as well. Most of the time, this is not necessary except when
6392 * shutting down the device.
6394 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6396 spin_lock_bh(&tp->lock);
6397 if (irq_sync)
6398 tg3_irq_quiesce(tp);
6401 static inline void tg3_full_unlock(struct tg3 *tp)
6403 spin_unlock_bh(&tp->lock);
6406 /* One-shot MSI handler - Chip automatically disables interrupt
6407 * after sending MSI so driver doesn't have to do it.
6409 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6411 struct tg3_napi *tnapi = dev_id;
6412 struct tg3 *tp = tnapi->tp;
6414 prefetch(tnapi->hw_status);
6415 if (tnapi->rx_rcb)
6416 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6418 if (likely(!tg3_irq_sync(tp)))
6419 napi_schedule(&tnapi->napi);
6421 return IRQ_HANDLED;
6424 /* MSI ISR - No need to check for interrupt sharing and no need to
6425 * flush status block and interrupt mailbox. PCI ordering rules
6426 * guarantee that MSI will arrive after the status block.
6428 static irqreturn_t tg3_msi(int irq, void *dev_id)
6430 struct tg3_napi *tnapi = dev_id;
6431 struct tg3 *tp = tnapi->tp;
6433 prefetch(tnapi->hw_status);
6434 if (tnapi->rx_rcb)
6435 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6437 * Writing any value to intr-mbox-0 clears PCI INTA# and
6438 * chip-internal interrupt pending events.
6439 * Writing non-zero to intr-mbox-0 additional tells the
6440 * NIC to stop sending us irqs, engaging "in-intr-handler"
6441 * event coalescing.
6443 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6444 if (likely(!tg3_irq_sync(tp)))
6445 napi_schedule(&tnapi->napi);
6447 return IRQ_RETVAL(1);
6450 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6452 struct tg3_napi *tnapi = dev_id;
6453 struct tg3 *tp = tnapi->tp;
6454 struct tg3_hw_status *sblk = tnapi->hw_status;
6455 unsigned int handled = 1;
6457 /* In INTx mode, it is possible for the interrupt to arrive at
6458 * the CPU before the status block posted prior to the interrupt.
6459 * Reading the PCI State register will confirm whether the
6460 * interrupt is ours and will flush the status block.
6462 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6463 if (tg3_flag(tp, CHIP_RESETTING) ||
6464 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6465 handled = 0;
6466 goto out;
6471 * Writing any value to intr-mbox-0 clears PCI INTA# and
6472 * chip-internal interrupt pending events.
6473 * Writing non-zero to intr-mbox-0 additional tells the
6474 * NIC to stop sending us irqs, engaging "in-intr-handler"
6475 * event coalescing.
6477 * Flush the mailbox to de-assert the IRQ immediately to prevent
6478 * spurious interrupts. The flush impacts performance but
6479 * excessive spurious interrupts can be worse in some cases.
6481 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6482 if (tg3_irq_sync(tp))
6483 goto out;
6484 sblk->status &= ~SD_STATUS_UPDATED;
6485 if (likely(tg3_has_work(tnapi))) {
6486 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6487 napi_schedule(&tnapi->napi);
6488 } else {
6489 /* No work, shared interrupt perhaps? re-enable
6490 * interrupts, and flush that PCI write
6492 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6493 0x00000000);
6495 out:
6496 return IRQ_RETVAL(handled);
6499 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6501 struct tg3_napi *tnapi = dev_id;
6502 struct tg3 *tp = tnapi->tp;
6503 struct tg3_hw_status *sblk = tnapi->hw_status;
6504 unsigned int handled = 1;
6506 /* In INTx mode, it is possible for the interrupt to arrive at
6507 * the CPU before the status block posted prior to the interrupt.
6508 * Reading the PCI State register will confirm whether the
6509 * interrupt is ours and will flush the status block.
6511 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6512 if (tg3_flag(tp, CHIP_RESETTING) ||
6513 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6514 handled = 0;
6515 goto out;
6520 * writing any value to intr-mbox-0 clears PCI INTA# and
6521 * chip-internal interrupt pending events.
6522 * writing non-zero to intr-mbox-0 additional tells the
6523 * NIC to stop sending us irqs, engaging "in-intr-handler"
6524 * event coalescing.
6526 * Flush the mailbox to de-assert the IRQ immediately to prevent
6527 * spurious interrupts. The flush impacts performance but
6528 * excessive spurious interrupts can be worse in some cases.
6530 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6533 * In a shared interrupt configuration, sometimes other devices'
6534 * interrupts will scream. We record the current status tag here
6535 * so that the above check can report that the screaming interrupts
6536 * are unhandled. Eventually they will be silenced.
6538 tnapi->last_irq_tag = sblk->status_tag;
6540 if (tg3_irq_sync(tp))
6541 goto out;
6543 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6545 napi_schedule(&tnapi->napi);
6547 out:
6548 return IRQ_RETVAL(handled);
6551 /* ISR for interrupt test */
6552 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6554 struct tg3_napi *tnapi = dev_id;
6555 struct tg3 *tp = tnapi->tp;
6556 struct tg3_hw_status *sblk = tnapi->hw_status;
6558 if ((sblk->status & SD_STATUS_UPDATED) ||
6559 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6560 tg3_disable_ints(tp);
6561 return IRQ_RETVAL(1);
6563 return IRQ_RETVAL(0);
6566 #ifdef CONFIG_NET_POLL_CONTROLLER
6567 static void tg3_poll_controller(struct net_device *dev)
6569 int i;
6570 struct tg3 *tp = netdev_priv(dev);
6572 for (i = 0; i < tp->irq_cnt; i++)
6573 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6575 #endif
6577 static void tg3_tx_timeout(struct net_device *dev)
6579 struct tg3 *tp = netdev_priv(dev);
6581 if (netif_msg_tx_err(tp)) {
6582 netdev_err(dev, "transmit timed out, resetting\n");
6583 tg3_dump_state(tp);
6586 tg3_reset_task_schedule(tp);
6589 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6590 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6592 u32 base = (u32) mapping & 0xffffffff;
6594 return (base > 0xffffdcc0) && (base + len + 8 < base);
6597 /* Test for DMA addresses > 40-bit */
6598 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6599 int len)
6601 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6602 if (tg3_flag(tp, 40BIT_DMA_BUG))
6603 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6604 return 0;
6605 #else
6606 return 0;
6607 #endif
6610 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6611 dma_addr_t mapping, u32 len, u32 flags,
6612 u32 mss, u32 vlan)
6614 txbd->addr_hi = ((u64) mapping >> 32);
6615 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6616 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6617 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6620 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6621 dma_addr_t map, u32 len, u32 flags,
6622 u32 mss, u32 vlan)
6624 struct tg3 *tp = tnapi->tp;
6625 bool hwbug = false;
6627 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6628 hwbug = true;
6630 if (tg3_4g_overflow_test(map, len))
6631 hwbug = true;
6633 if (tg3_40bit_overflow_test(tp, map, len))
6634 hwbug = true;
6636 if (tp->dma_limit) {
6637 u32 prvidx = *entry;
6638 u32 tmp_flag = flags & ~TXD_FLAG_END;
6639 while (len > tp->dma_limit && *budget) {
6640 u32 frag_len = tp->dma_limit;
6641 len -= tp->dma_limit;
6643 /* Avoid the 8byte DMA problem */
6644 if (len <= 8) {
6645 len += tp->dma_limit / 2;
6646 frag_len = tp->dma_limit / 2;
6649 tnapi->tx_buffers[*entry].fragmented = true;
6651 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6652 frag_len, tmp_flag, mss, vlan);
6653 *budget -= 1;
6654 prvidx = *entry;
6655 *entry = NEXT_TX(*entry);
6657 map += frag_len;
6660 if (len) {
6661 if (*budget) {
6662 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6663 len, flags, mss, vlan);
6664 *budget -= 1;
6665 *entry = NEXT_TX(*entry);
6666 } else {
6667 hwbug = true;
6668 tnapi->tx_buffers[prvidx].fragmented = false;
6671 } else {
6672 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6673 len, flags, mss, vlan);
6674 *entry = NEXT_TX(*entry);
6677 return hwbug;
6680 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6682 int i;
6683 struct sk_buff *skb;
6684 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6686 skb = txb->skb;
6687 txb->skb = NULL;
6689 pci_unmap_single(tnapi->tp->pdev,
6690 dma_unmap_addr(txb, mapping),
6691 skb_headlen(skb),
6692 PCI_DMA_TODEVICE);
6694 while (txb->fragmented) {
6695 txb->fragmented = false;
6696 entry = NEXT_TX(entry);
6697 txb = &tnapi->tx_buffers[entry];
6700 for (i = 0; i <= last; i++) {
6701 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6703 entry = NEXT_TX(entry);
6704 txb = &tnapi->tx_buffers[entry];
6706 pci_unmap_page(tnapi->tp->pdev,
6707 dma_unmap_addr(txb, mapping),
6708 skb_frag_size(frag), PCI_DMA_TODEVICE);
6710 while (txb->fragmented) {
6711 txb->fragmented = false;
6712 entry = NEXT_TX(entry);
6713 txb = &tnapi->tx_buffers[entry];
6718 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6719 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6720 struct sk_buff **pskb,
6721 u32 *entry, u32 *budget,
6722 u32 base_flags, u32 mss, u32 vlan)
6724 struct tg3 *tp = tnapi->tp;
6725 struct sk_buff *new_skb, *skb = *pskb;
6726 dma_addr_t new_addr = 0;
6727 int ret = 0;
6729 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6730 new_skb = skb_copy(skb, GFP_ATOMIC);
6731 else {
6732 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6734 new_skb = skb_copy_expand(skb,
6735 skb_headroom(skb) + more_headroom,
6736 skb_tailroom(skb), GFP_ATOMIC);
6739 if (!new_skb) {
6740 ret = -1;
6741 } else {
6742 /* New SKB is guaranteed to be linear. */
6743 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6744 PCI_DMA_TODEVICE);
6745 /* Make sure the mapping succeeded */
6746 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6747 dev_kfree_skb(new_skb);
6748 ret = -1;
6749 } else {
6750 u32 save_entry = *entry;
6752 base_flags |= TXD_FLAG_END;
6754 tnapi->tx_buffers[*entry].skb = new_skb;
6755 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6756 mapping, new_addr);
6758 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6759 new_skb->len, base_flags,
6760 mss, vlan)) {
6761 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6762 dev_kfree_skb(new_skb);
6763 ret = -1;
6768 dev_kfree_skb(skb);
6769 *pskb = new_skb;
6770 return ret;
6773 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6775 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6776 * TSO header is greater than 80 bytes.
6778 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6780 struct sk_buff *segs, *nskb;
6781 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6783 /* Estimate the number of fragments in the worst case */
6784 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6785 netif_stop_queue(tp->dev);
6787 /* netif_tx_stop_queue() must be done before checking
6788 * checking tx index in tg3_tx_avail() below, because in
6789 * tg3_tx(), we update tx index before checking for
6790 * netif_tx_queue_stopped().
6792 smp_mb();
6793 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6794 return NETDEV_TX_BUSY;
6796 netif_wake_queue(tp->dev);
6799 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6800 if (IS_ERR(segs))
6801 goto tg3_tso_bug_end;
6803 do {
6804 nskb = segs;
6805 segs = segs->next;
6806 nskb->next = NULL;
6807 tg3_start_xmit(nskb, tp->dev);
6808 } while (segs);
6810 tg3_tso_bug_end:
6811 dev_kfree_skb(skb);
6813 return NETDEV_TX_OK;
6816 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6817 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6819 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6821 struct tg3 *tp = netdev_priv(dev);
6822 u32 len, entry, base_flags, mss, vlan = 0;
6823 u32 budget;
6824 int i = -1, would_hit_hwbug;
6825 dma_addr_t mapping;
6826 struct tg3_napi *tnapi;
6827 struct netdev_queue *txq;
6828 unsigned int last;
6830 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6831 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6832 if (tg3_flag(tp, ENABLE_TSS))
6833 tnapi++;
6835 budget = tg3_tx_avail(tnapi);
6837 /* We are running in BH disabled context with netif_tx_lock
6838 * and TX reclaim runs via tp->napi.poll inside of a software
6839 * interrupt. Furthermore, IRQ processing runs lockless so we have
6840 * no IRQ context deadlocks to worry about either. Rejoice!
6842 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6843 if (!netif_tx_queue_stopped(txq)) {
6844 netif_tx_stop_queue(txq);
6846 /* This is a hard error, log it. */
6847 netdev_err(dev,
6848 "BUG! Tx Ring full when queue awake!\n");
6850 return NETDEV_TX_BUSY;
6853 entry = tnapi->tx_prod;
6854 base_flags = 0;
6855 if (skb->ip_summed == CHECKSUM_PARTIAL)
6856 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6858 mss = skb_shinfo(skb)->gso_size;
6859 if (mss) {
6860 struct iphdr *iph;
6861 u32 tcp_opt_len, hdr_len;
6863 if (skb_header_cloned(skb) &&
6864 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6865 goto drop;
6867 iph = ip_hdr(skb);
6868 tcp_opt_len = tcp_optlen(skb);
6870 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6872 if (!skb_is_gso_v6(skb)) {
6873 iph->check = 0;
6874 iph->tot_len = htons(mss + hdr_len);
6877 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6878 tg3_flag(tp, TSO_BUG))
6879 return tg3_tso_bug(tp, skb);
6881 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6882 TXD_FLAG_CPU_POST_DMA);
6884 if (tg3_flag(tp, HW_TSO_1) ||
6885 tg3_flag(tp, HW_TSO_2) ||
6886 tg3_flag(tp, HW_TSO_3)) {
6887 tcp_hdr(skb)->check = 0;
6888 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6889 } else
6890 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6891 iph->daddr, 0,
6892 IPPROTO_TCP,
6895 if (tg3_flag(tp, HW_TSO_3)) {
6896 mss |= (hdr_len & 0xc) << 12;
6897 if (hdr_len & 0x10)
6898 base_flags |= 0x00000010;
6899 base_flags |= (hdr_len & 0x3e0) << 5;
6900 } else if (tg3_flag(tp, HW_TSO_2))
6901 mss |= hdr_len << 9;
6902 else if (tg3_flag(tp, HW_TSO_1) ||
6903 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6904 if (tcp_opt_len || iph->ihl > 5) {
6905 int tsflags;
6907 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6908 mss |= (tsflags << 11);
6910 } else {
6911 if (tcp_opt_len || iph->ihl > 5) {
6912 int tsflags;
6914 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6915 base_flags |= tsflags << 12;
6920 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6921 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6922 base_flags |= TXD_FLAG_JMB_PKT;
6924 if (vlan_tx_tag_present(skb)) {
6925 base_flags |= TXD_FLAG_VLAN;
6926 vlan = vlan_tx_tag_get(skb);
6929 len = skb_headlen(skb);
6931 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6932 if (pci_dma_mapping_error(tp->pdev, mapping))
6933 goto drop;
6936 tnapi->tx_buffers[entry].skb = skb;
6937 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6939 would_hit_hwbug = 0;
6941 if (tg3_flag(tp, 5701_DMA_BUG))
6942 would_hit_hwbug = 1;
6944 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6945 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6946 mss, vlan)) {
6947 would_hit_hwbug = 1;
6948 } else if (skb_shinfo(skb)->nr_frags > 0) {
6949 u32 tmp_mss = mss;
6951 if (!tg3_flag(tp, HW_TSO_1) &&
6952 !tg3_flag(tp, HW_TSO_2) &&
6953 !tg3_flag(tp, HW_TSO_3))
6954 tmp_mss = 0;
6956 /* Now loop through additional data
6957 * fragments, and queue them.
6959 last = skb_shinfo(skb)->nr_frags - 1;
6960 for (i = 0; i <= last; i++) {
6961 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6963 len = skb_frag_size(frag);
6964 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6965 len, DMA_TO_DEVICE);
6967 tnapi->tx_buffers[entry].skb = NULL;
6968 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6969 mapping);
6970 if (dma_mapping_error(&tp->pdev->dev, mapping))
6971 goto dma_error;
6973 if (!budget ||
6974 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6975 len, base_flags |
6976 ((i == last) ? TXD_FLAG_END : 0),
6977 tmp_mss, vlan)) {
6978 would_hit_hwbug = 1;
6979 break;
6984 if (would_hit_hwbug) {
6985 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6987 /* If the workaround fails due to memory/mapping
6988 * failure, silently drop this packet.
6990 entry = tnapi->tx_prod;
6991 budget = tg3_tx_avail(tnapi);
6992 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6993 base_flags, mss, vlan))
6994 goto drop_nofree;
6997 skb_tx_timestamp(skb);
6998 netdev_sent_queue(tp->dev, skb->len);
7000 /* Packets are ready, update Tx producer idx local and on card. */
7001 tw32_tx_mbox(tnapi->prodmbox, entry);
7003 tnapi->tx_prod = entry;
7004 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7005 netif_tx_stop_queue(txq);
7007 /* netif_tx_stop_queue() must be done before checking
7008 * checking tx index in tg3_tx_avail() below, because in
7009 * tg3_tx(), we update tx index before checking for
7010 * netif_tx_queue_stopped().
7012 smp_mb();
7013 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7014 netif_tx_wake_queue(txq);
7017 mmiowb();
7018 return NETDEV_TX_OK;
7020 dma_error:
7021 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7022 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7023 drop:
7024 dev_kfree_skb(skb);
7025 drop_nofree:
7026 tp->tx_dropped++;
7027 return NETDEV_TX_OK;
7030 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7032 if (enable) {
7033 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7034 MAC_MODE_PORT_MODE_MASK);
7036 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7038 if (!tg3_flag(tp, 5705_PLUS))
7039 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7041 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7042 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7043 else
7044 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7045 } else {
7046 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7048 if (tg3_flag(tp, 5705_PLUS) ||
7049 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7051 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7054 tw32(MAC_MODE, tp->mac_mode);
7055 udelay(40);
7058 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7060 u32 val, bmcr, mac_mode, ptest = 0;
7062 tg3_phy_toggle_apd(tp, false);
7063 tg3_phy_toggle_automdix(tp, 0);
7065 if (extlpbk && tg3_phy_set_extloopbk(tp))
7066 return -EIO;
7068 bmcr = BMCR_FULLDPLX;
7069 switch (speed) {
7070 case SPEED_10:
7071 break;
7072 case SPEED_100:
7073 bmcr |= BMCR_SPEED100;
7074 break;
7075 case SPEED_1000:
7076 default:
7077 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7078 speed = SPEED_100;
7079 bmcr |= BMCR_SPEED100;
7080 } else {
7081 speed = SPEED_1000;
7082 bmcr |= BMCR_SPEED1000;
7086 if (extlpbk) {
7087 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7088 tg3_readphy(tp, MII_CTRL1000, &val);
7089 val |= CTL1000_AS_MASTER |
7090 CTL1000_ENABLE_MASTER;
7091 tg3_writephy(tp, MII_CTRL1000, val);
7092 } else {
7093 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7094 MII_TG3_FET_PTEST_TRIM_2;
7095 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7097 } else
7098 bmcr |= BMCR_LOOPBACK;
7100 tg3_writephy(tp, MII_BMCR, bmcr);
7102 /* The write needs to be flushed for the FETs */
7103 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7104 tg3_readphy(tp, MII_BMCR, &bmcr);
7106 udelay(40);
7108 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7110 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7111 MII_TG3_FET_PTEST_FRC_TX_LINK |
7112 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7114 /* The write needs to be flushed for the AC131 */
7115 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7118 /* Reset to prevent losing 1st rx packet intermittently */
7119 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7120 tg3_flag(tp, 5780_CLASS)) {
7121 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7122 udelay(10);
7123 tw32_f(MAC_RX_MODE, tp->rx_mode);
7126 mac_mode = tp->mac_mode &
7127 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7128 if (speed == SPEED_1000)
7129 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7130 else
7131 mac_mode |= MAC_MODE_PORT_MODE_MII;
7133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7134 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7136 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7137 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7138 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7139 mac_mode |= MAC_MODE_LINK_POLARITY;
7141 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7142 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7145 tw32(MAC_MODE, mac_mode);
7146 udelay(40);
7148 return 0;
7151 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7153 struct tg3 *tp = netdev_priv(dev);
7155 if (features & NETIF_F_LOOPBACK) {
7156 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7157 return;
7159 spin_lock_bh(&tp->lock);
7160 tg3_mac_loopback(tp, true);
7161 netif_carrier_on(tp->dev);
7162 spin_unlock_bh(&tp->lock);
7163 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7164 } else {
7165 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7166 return;
7168 spin_lock_bh(&tp->lock);
7169 tg3_mac_loopback(tp, false);
7170 /* Force link status check */
7171 tg3_setup_phy(tp, 1);
7172 spin_unlock_bh(&tp->lock);
7173 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7177 static netdev_features_t tg3_fix_features(struct net_device *dev,
7178 netdev_features_t features)
7180 struct tg3 *tp = netdev_priv(dev);
7182 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7183 features &= ~NETIF_F_ALL_TSO;
7185 return features;
7188 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7190 netdev_features_t changed = dev->features ^ features;
7192 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7193 tg3_set_loopback(dev, features);
7195 return 0;
7198 static void tg3_rx_prodring_free(struct tg3 *tp,
7199 struct tg3_rx_prodring_set *tpr)
7201 int i;
7203 if (tpr != &tp->napi[0].prodring) {
7204 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7205 i = (i + 1) & tp->rx_std_ring_mask)
7206 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7207 tp->rx_pkt_map_sz);
7209 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7210 for (i = tpr->rx_jmb_cons_idx;
7211 i != tpr->rx_jmb_prod_idx;
7212 i = (i + 1) & tp->rx_jmb_ring_mask) {
7213 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7214 TG3_RX_JMB_MAP_SZ);
7218 return;
7221 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7222 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7223 tp->rx_pkt_map_sz);
7225 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7226 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7227 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7228 TG3_RX_JMB_MAP_SZ);
7232 /* Initialize rx rings for packet processing.
7234 * The chip has been shut down and the driver detached from
7235 * the networking, so no interrupts or new tx packets will
7236 * end up in the driver. tp->{tx,}lock are held and thus
7237 * we may not sleep.
7239 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7240 struct tg3_rx_prodring_set *tpr)
7242 u32 i, rx_pkt_dma_sz;
7244 tpr->rx_std_cons_idx = 0;
7245 tpr->rx_std_prod_idx = 0;
7246 tpr->rx_jmb_cons_idx = 0;
7247 tpr->rx_jmb_prod_idx = 0;
7249 if (tpr != &tp->napi[0].prodring) {
7250 memset(&tpr->rx_std_buffers[0], 0,
7251 TG3_RX_STD_BUFF_RING_SIZE(tp));
7252 if (tpr->rx_jmb_buffers)
7253 memset(&tpr->rx_jmb_buffers[0], 0,
7254 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7255 goto done;
7258 /* Zero out all descriptors. */
7259 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7261 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7262 if (tg3_flag(tp, 5780_CLASS) &&
7263 tp->dev->mtu > ETH_DATA_LEN)
7264 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7265 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7267 /* Initialize invariants of the rings, we only set this
7268 * stuff once. This works because the card does not
7269 * write into the rx buffer posting rings.
7271 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7272 struct tg3_rx_buffer_desc *rxd;
7274 rxd = &tpr->rx_std[i];
7275 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7276 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7277 rxd->opaque = (RXD_OPAQUE_RING_STD |
7278 (i << RXD_OPAQUE_INDEX_SHIFT));
7281 /* Now allocate fresh SKBs for each rx ring. */
7282 for (i = 0; i < tp->rx_pending; i++) {
7283 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7284 netdev_warn(tp->dev,
7285 "Using a smaller RX standard ring. Only "
7286 "%d out of %d buffers were allocated "
7287 "successfully\n", i, tp->rx_pending);
7288 if (i == 0)
7289 goto initfail;
7290 tp->rx_pending = i;
7291 break;
7295 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7296 goto done;
7298 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7300 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7301 goto done;
7303 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7304 struct tg3_rx_buffer_desc *rxd;
7306 rxd = &tpr->rx_jmb[i].std;
7307 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7308 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7309 RXD_FLAG_JUMBO;
7310 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7311 (i << RXD_OPAQUE_INDEX_SHIFT));
7314 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7315 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7316 netdev_warn(tp->dev,
7317 "Using a smaller RX jumbo ring. Only %d "
7318 "out of %d buffers were allocated "
7319 "successfully\n", i, tp->rx_jumbo_pending);
7320 if (i == 0)
7321 goto initfail;
7322 tp->rx_jumbo_pending = i;
7323 break;
7327 done:
7328 return 0;
7330 initfail:
7331 tg3_rx_prodring_free(tp, tpr);
7332 return -ENOMEM;
7335 static void tg3_rx_prodring_fini(struct tg3 *tp,
7336 struct tg3_rx_prodring_set *tpr)
7338 kfree(tpr->rx_std_buffers);
7339 tpr->rx_std_buffers = NULL;
7340 kfree(tpr->rx_jmb_buffers);
7341 tpr->rx_jmb_buffers = NULL;
7342 if (tpr->rx_std) {
7343 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7344 tpr->rx_std, tpr->rx_std_mapping);
7345 tpr->rx_std = NULL;
7347 if (tpr->rx_jmb) {
7348 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7349 tpr->rx_jmb, tpr->rx_jmb_mapping);
7350 tpr->rx_jmb = NULL;
7354 static int tg3_rx_prodring_init(struct tg3 *tp,
7355 struct tg3_rx_prodring_set *tpr)
7357 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7358 GFP_KERNEL);
7359 if (!tpr->rx_std_buffers)
7360 return -ENOMEM;
7362 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7363 TG3_RX_STD_RING_BYTES(tp),
7364 &tpr->rx_std_mapping,
7365 GFP_KERNEL);
7366 if (!tpr->rx_std)
7367 goto err_out;
7369 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7370 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7371 GFP_KERNEL);
7372 if (!tpr->rx_jmb_buffers)
7373 goto err_out;
7375 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7376 TG3_RX_JMB_RING_BYTES(tp),
7377 &tpr->rx_jmb_mapping,
7378 GFP_KERNEL);
7379 if (!tpr->rx_jmb)
7380 goto err_out;
7383 return 0;
7385 err_out:
7386 tg3_rx_prodring_fini(tp, tpr);
7387 return -ENOMEM;
7390 /* Free up pending packets in all rx/tx rings.
7392 * The chip has been shut down and the driver detached from
7393 * the networking, so no interrupts or new tx packets will
7394 * end up in the driver. tp->{tx,}lock is not held and we are not
7395 * in an interrupt context and thus may sleep.
7397 static void tg3_free_rings(struct tg3 *tp)
7399 int i, j;
7401 for (j = 0; j < tp->irq_cnt; j++) {
7402 struct tg3_napi *tnapi = &tp->napi[j];
7404 tg3_rx_prodring_free(tp, &tnapi->prodring);
7406 if (!tnapi->tx_buffers)
7407 continue;
7409 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7410 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7412 if (!skb)
7413 continue;
7415 tg3_tx_skb_unmap(tnapi, i,
7416 skb_shinfo(skb)->nr_frags - 1);
7418 dev_kfree_skb_any(skb);
7421 netdev_reset_queue(tp->dev);
7424 /* Initialize tx/rx rings for packet processing.
7426 * The chip has been shut down and the driver detached from
7427 * the networking, so no interrupts or new tx packets will
7428 * end up in the driver. tp->{tx,}lock are held and thus
7429 * we may not sleep.
7431 static int tg3_init_rings(struct tg3 *tp)
7433 int i;
7435 /* Free up all the SKBs. */
7436 tg3_free_rings(tp);
7438 for (i = 0; i < tp->irq_cnt; i++) {
7439 struct tg3_napi *tnapi = &tp->napi[i];
7441 tnapi->last_tag = 0;
7442 tnapi->last_irq_tag = 0;
7443 tnapi->hw_status->status = 0;
7444 tnapi->hw_status->status_tag = 0;
7445 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7447 tnapi->tx_prod = 0;
7448 tnapi->tx_cons = 0;
7449 if (tnapi->tx_ring)
7450 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7452 tnapi->rx_rcb_ptr = 0;
7453 if (tnapi->rx_rcb)
7454 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7456 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7457 tg3_free_rings(tp);
7458 return -ENOMEM;
7462 return 0;
7466 * Must not be invoked with interrupt sources disabled and
7467 * the hardware shutdown down.
7469 static void tg3_free_consistent(struct tg3 *tp)
7471 int i;
7473 for (i = 0; i < tp->irq_cnt; i++) {
7474 struct tg3_napi *tnapi = &tp->napi[i];
7476 if (tnapi->tx_ring) {
7477 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7478 tnapi->tx_ring, tnapi->tx_desc_mapping);
7479 tnapi->tx_ring = NULL;
7482 kfree(tnapi->tx_buffers);
7483 tnapi->tx_buffers = NULL;
7485 if (tnapi->rx_rcb) {
7486 dma_free_coherent(&tp->pdev->dev,
7487 TG3_RX_RCB_RING_BYTES(tp),
7488 tnapi->rx_rcb,
7489 tnapi->rx_rcb_mapping);
7490 tnapi->rx_rcb = NULL;
7493 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7495 if (tnapi->hw_status) {
7496 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7497 tnapi->hw_status,
7498 tnapi->status_mapping);
7499 tnapi->hw_status = NULL;
7503 if (tp->hw_stats) {
7504 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7505 tp->hw_stats, tp->stats_mapping);
7506 tp->hw_stats = NULL;
7511 * Must not be invoked with interrupt sources disabled and
7512 * the hardware shutdown down. Can sleep.
7514 static int tg3_alloc_consistent(struct tg3 *tp)
7516 int i;
7518 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7519 sizeof(struct tg3_hw_stats),
7520 &tp->stats_mapping,
7521 GFP_KERNEL);
7522 if (!tp->hw_stats)
7523 goto err_out;
7525 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7527 for (i = 0; i < tp->irq_cnt; i++) {
7528 struct tg3_napi *tnapi = &tp->napi[i];
7529 struct tg3_hw_status *sblk;
7531 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7532 TG3_HW_STATUS_SIZE,
7533 &tnapi->status_mapping,
7534 GFP_KERNEL);
7535 if (!tnapi->hw_status)
7536 goto err_out;
7538 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7539 sblk = tnapi->hw_status;
7541 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7542 goto err_out;
7544 /* If multivector TSS is enabled, vector 0 does not handle
7545 * tx interrupts. Don't allocate any resources for it.
7547 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7548 (i && tg3_flag(tp, ENABLE_TSS))) {
7549 tnapi->tx_buffers = kzalloc(
7550 sizeof(struct tg3_tx_ring_info) *
7551 TG3_TX_RING_SIZE, GFP_KERNEL);
7552 if (!tnapi->tx_buffers)
7553 goto err_out;
7555 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7556 TG3_TX_RING_BYTES,
7557 &tnapi->tx_desc_mapping,
7558 GFP_KERNEL);
7559 if (!tnapi->tx_ring)
7560 goto err_out;
7564 * When RSS is enabled, the status block format changes
7565 * slightly. The "rx_jumbo_consumer", "reserved",
7566 * and "rx_mini_consumer" members get mapped to the
7567 * other three rx return ring producer indexes.
7569 switch (i) {
7570 default:
7571 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7572 break;
7573 case 2:
7574 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7575 break;
7576 case 3:
7577 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7578 break;
7579 case 4:
7580 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7581 break;
7585 * If multivector RSS is enabled, vector 0 does not handle
7586 * rx or tx interrupts. Don't allocate any resources for it.
7588 if (!i && tg3_flag(tp, ENABLE_RSS))
7589 continue;
7591 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7592 TG3_RX_RCB_RING_BYTES(tp),
7593 &tnapi->rx_rcb_mapping,
7594 GFP_KERNEL);
7595 if (!tnapi->rx_rcb)
7596 goto err_out;
7598 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7601 return 0;
7603 err_out:
7604 tg3_free_consistent(tp);
7605 return -ENOMEM;
7608 #define MAX_WAIT_CNT 1000
7610 /* To stop a block, clear the enable bit and poll till it
7611 * clears. tp->lock is held.
7613 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7615 unsigned int i;
7616 u32 val;
7618 if (tg3_flag(tp, 5705_PLUS)) {
7619 switch (ofs) {
7620 case RCVLSC_MODE:
7621 case DMAC_MODE:
7622 case MBFREE_MODE:
7623 case BUFMGR_MODE:
7624 case MEMARB_MODE:
7625 /* We can't enable/disable these bits of the
7626 * 5705/5750, just say success.
7628 return 0;
7630 default:
7631 break;
7635 val = tr32(ofs);
7636 val &= ~enable_bit;
7637 tw32_f(ofs, val);
7639 for (i = 0; i < MAX_WAIT_CNT; i++) {
7640 udelay(100);
7641 val = tr32(ofs);
7642 if ((val & enable_bit) == 0)
7643 break;
7646 if (i == MAX_WAIT_CNT && !silent) {
7647 dev_err(&tp->pdev->dev,
7648 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7649 ofs, enable_bit);
7650 return -ENODEV;
7653 return 0;
7656 /* tp->lock is held. */
7657 static int tg3_abort_hw(struct tg3 *tp, int silent)
7659 int i, err;
7661 tg3_disable_ints(tp);
7663 tp->rx_mode &= ~RX_MODE_ENABLE;
7664 tw32_f(MAC_RX_MODE, tp->rx_mode);
7665 udelay(10);
7667 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7668 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7669 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7670 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7671 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7672 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7674 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7675 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7676 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7677 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7678 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7679 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7680 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7682 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7683 tw32_f(MAC_MODE, tp->mac_mode);
7684 udelay(40);
7686 tp->tx_mode &= ~TX_MODE_ENABLE;
7687 tw32_f(MAC_TX_MODE, tp->tx_mode);
7689 for (i = 0; i < MAX_WAIT_CNT; i++) {
7690 udelay(100);
7691 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7692 break;
7694 if (i >= MAX_WAIT_CNT) {
7695 dev_err(&tp->pdev->dev,
7696 "%s timed out, TX_MODE_ENABLE will not clear "
7697 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7698 err |= -ENODEV;
7701 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7702 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7703 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7705 tw32(FTQ_RESET, 0xffffffff);
7706 tw32(FTQ_RESET, 0x00000000);
7708 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7709 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7711 for (i = 0; i < tp->irq_cnt; i++) {
7712 struct tg3_napi *tnapi = &tp->napi[i];
7713 if (tnapi->hw_status)
7714 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7717 return err;
7720 /* Save PCI command register before chip reset */
7721 static void tg3_save_pci_state(struct tg3 *tp)
7723 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7726 /* Restore PCI state after chip reset */
7727 static void tg3_restore_pci_state(struct tg3 *tp)
7729 u32 val;
7731 /* Re-enable indirect register accesses. */
7732 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7733 tp->misc_host_ctrl);
7735 /* Set MAX PCI retry to zero. */
7736 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7737 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7738 tg3_flag(tp, PCIX_MODE))
7739 val |= PCISTATE_RETRY_SAME_DMA;
7740 /* Allow reads and writes to the APE register and memory space. */
7741 if (tg3_flag(tp, ENABLE_APE))
7742 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7743 PCISTATE_ALLOW_APE_SHMEM_WR |
7744 PCISTATE_ALLOW_APE_PSPACE_WR;
7745 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7747 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7749 if (!tg3_flag(tp, PCI_EXPRESS)) {
7750 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7751 tp->pci_cacheline_sz);
7752 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7753 tp->pci_lat_timer);
7756 /* Make sure PCI-X relaxed ordering bit is clear. */
7757 if (tg3_flag(tp, PCIX_MODE)) {
7758 u16 pcix_cmd;
7760 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7761 &pcix_cmd);
7762 pcix_cmd &= ~PCI_X_CMD_ERO;
7763 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7764 pcix_cmd);
7767 if (tg3_flag(tp, 5780_CLASS)) {
7769 /* Chip reset on 5780 will reset MSI enable bit,
7770 * so need to restore it.
7772 if (tg3_flag(tp, USING_MSI)) {
7773 u16 ctrl;
7775 pci_read_config_word(tp->pdev,
7776 tp->msi_cap + PCI_MSI_FLAGS,
7777 &ctrl);
7778 pci_write_config_word(tp->pdev,
7779 tp->msi_cap + PCI_MSI_FLAGS,
7780 ctrl | PCI_MSI_FLAGS_ENABLE);
7781 val = tr32(MSGINT_MODE);
7782 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7787 /* tp->lock is held. */
7788 static int tg3_chip_reset(struct tg3 *tp)
7790 u32 val;
7791 void (*write_op)(struct tg3 *, u32, u32);
7792 int i, err;
7794 tg3_nvram_lock(tp);
7796 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7798 /* No matching tg3_nvram_unlock() after this because
7799 * chip reset below will undo the nvram lock.
7801 tp->nvram_lock_cnt = 0;
7803 /* GRC_MISC_CFG core clock reset will clear the memory
7804 * enable bit in PCI register 4 and the MSI enable bit
7805 * on some chips, so we save relevant registers here.
7807 tg3_save_pci_state(tp);
7809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7810 tg3_flag(tp, 5755_PLUS))
7811 tw32(GRC_FASTBOOT_PC, 0);
7814 * We must avoid the readl() that normally takes place.
7815 * It locks machines, causes machine checks, and other
7816 * fun things. So, temporarily disable the 5701
7817 * hardware workaround, while we do the reset.
7819 write_op = tp->write32;
7820 if (write_op == tg3_write_flush_reg32)
7821 tp->write32 = tg3_write32;
7823 /* Prevent the irq handler from reading or writing PCI registers
7824 * during chip reset when the memory enable bit in the PCI command
7825 * register may be cleared. The chip does not generate interrupt
7826 * at this time, but the irq handler may still be called due to irq
7827 * sharing or irqpoll.
7829 tg3_flag_set(tp, CHIP_RESETTING);
7830 for (i = 0; i < tp->irq_cnt; i++) {
7831 struct tg3_napi *tnapi = &tp->napi[i];
7832 if (tnapi->hw_status) {
7833 tnapi->hw_status->status = 0;
7834 tnapi->hw_status->status_tag = 0;
7836 tnapi->last_tag = 0;
7837 tnapi->last_irq_tag = 0;
7839 smp_mb();
7841 for (i = 0; i < tp->irq_cnt; i++)
7842 synchronize_irq(tp->napi[i].irq_vec);
7844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7845 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7846 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7849 /* do the reset */
7850 val = GRC_MISC_CFG_CORECLK_RESET;
7852 if (tg3_flag(tp, PCI_EXPRESS)) {
7853 /* Force PCIe 1.0a mode */
7854 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7855 !tg3_flag(tp, 57765_PLUS) &&
7856 tr32(TG3_PCIE_PHY_TSTCTL) ==
7857 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7858 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7860 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7861 tw32(GRC_MISC_CFG, (1 << 29));
7862 val |= (1 << 29);
7866 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7867 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7868 tw32(GRC_VCPU_EXT_CTRL,
7869 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7872 /* Manage gphy power for all CPMU absent PCIe devices. */
7873 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7874 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7876 tw32(GRC_MISC_CFG, val);
7878 /* restore 5701 hardware bug workaround write method */
7879 tp->write32 = write_op;
7881 /* Unfortunately, we have to delay before the PCI read back.
7882 * Some 575X chips even will not respond to a PCI cfg access
7883 * when the reset command is given to the chip.
7885 * How do these hardware designers expect things to work
7886 * properly if the PCI write is posted for a long period
7887 * of time? It is always necessary to have some method by
7888 * which a register read back can occur to push the write
7889 * out which does the reset.
7891 * For most tg3 variants the trick below was working.
7892 * Ho hum...
7894 udelay(120);
7896 /* Flush PCI posted writes. The normal MMIO registers
7897 * are inaccessible at this time so this is the only
7898 * way to make this reliably (actually, this is no longer
7899 * the case, see above). I tried to use indirect
7900 * register read/write but this upset some 5701 variants.
7902 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7904 udelay(120);
7906 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7907 u16 val16;
7909 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7910 int i;
7911 u32 cfg_val;
7913 /* Wait for link training to complete. */
7914 for (i = 0; i < 5000; i++)
7915 udelay(100);
7917 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7918 pci_write_config_dword(tp->pdev, 0xc4,
7919 cfg_val | (1 << 15));
7922 /* Clear the "no snoop" and "relaxed ordering" bits. */
7923 pci_read_config_word(tp->pdev,
7924 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7925 &val16);
7926 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7927 PCI_EXP_DEVCTL_NOSNOOP_EN);
7929 * Older PCIe devices only support the 128 byte
7930 * MPS setting. Enforce the restriction.
7932 if (!tg3_flag(tp, CPMU_PRESENT))
7933 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7934 pci_write_config_word(tp->pdev,
7935 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7936 val16);
7938 /* Clear error status */
7939 pci_write_config_word(tp->pdev,
7940 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7941 PCI_EXP_DEVSTA_CED |
7942 PCI_EXP_DEVSTA_NFED |
7943 PCI_EXP_DEVSTA_FED |
7944 PCI_EXP_DEVSTA_URD);
7947 tg3_restore_pci_state(tp);
7949 tg3_flag_clear(tp, CHIP_RESETTING);
7950 tg3_flag_clear(tp, ERROR_PROCESSED);
7952 val = 0;
7953 if (tg3_flag(tp, 5780_CLASS))
7954 val = tr32(MEMARB_MODE);
7955 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7957 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7958 tg3_stop_fw(tp);
7959 tw32(0x5000, 0x400);
7962 tw32(GRC_MODE, tp->grc_mode);
7964 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7965 val = tr32(0xc4);
7967 tw32(0xc4, val | (1 << 15));
7970 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7972 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7973 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7974 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7975 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7978 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7979 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7980 val = tp->mac_mode;
7981 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7982 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7983 val = tp->mac_mode;
7984 } else
7985 val = 0;
7987 tw32_f(MAC_MODE, val);
7988 udelay(40);
7990 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7992 err = tg3_poll_fw(tp);
7993 if (err)
7994 return err;
7996 tg3_mdio_start(tp);
7998 if (tg3_flag(tp, PCI_EXPRESS) &&
7999 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8000 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8001 !tg3_flag(tp, 57765_PLUS)) {
8002 val = tr32(0x7c00);
8004 tw32(0x7c00, val | (1 << 25));
8007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8008 val = tr32(TG3_CPMU_CLCK_ORIDE);
8009 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8012 /* Reprobe ASF enable state. */
8013 tg3_flag_clear(tp, ENABLE_ASF);
8014 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8015 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8016 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8017 u32 nic_cfg;
8019 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8020 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8021 tg3_flag_set(tp, ENABLE_ASF);
8022 tp->last_event_jiffies = jiffies;
8023 if (tg3_flag(tp, 5750_PLUS))
8024 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8028 return 0;
8031 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
8032 struct rtnl_link_stats64 *);
8033 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
8034 struct tg3_ethtool_stats *);
8036 /* tp->lock is held. */
8037 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8039 int err;
8041 tg3_stop_fw(tp);
8043 tg3_write_sig_pre_reset(tp, kind);
8045 tg3_abort_hw(tp, silent);
8046 err = tg3_chip_reset(tp);
8048 __tg3_set_mac_addr(tp, 0);
8050 tg3_write_sig_legacy(tp, kind);
8051 tg3_write_sig_post_reset(tp, kind);
8053 if (tp->hw_stats) {
8054 /* Save the stats across chip resets... */
8055 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
8056 tg3_get_estats(tp, &tp->estats_prev);
8058 /* And make sure the next sample is new data */
8059 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8062 if (err)
8063 return err;
8065 return 0;
8068 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8070 struct tg3 *tp = netdev_priv(dev);
8071 struct sockaddr *addr = p;
8072 int err = 0, skip_mac_1 = 0;
8074 if (!is_valid_ether_addr(addr->sa_data))
8075 return -EINVAL;
8077 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8079 if (!netif_running(dev))
8080 return 0;
8082 if (tg3_flag(tp, ENABLE_ASF)) {
8083 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8085 addr0_high = tr32(MAC_ADDR_0_HIGH);
8086 addr0_low = tr32(MAC_ADDR_0_LOW);
8087 addr1_high = tr32(MAC_ADDR_1_HIGH);
8088 addr1_low = tr32(MAC_ADDR_1_LOW);
8090 /* Skip MAC addr 1 if ASF is using it. */
8091 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8092 !(addr1_high == 0 && addr1_low == 0))
8093 skip_mac_1 = 1;
8095 spin_lock_bh(&tp->lock);
8096 __tg3_set_mac_addr(tp, skip_mac_1);
8097 spin_unlock_bh(&tp->lock);
8099 return err;
8102 /* tp->lock is held. */
8103 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8104 dma_addr_t mapping, u32 maxlen_flags,
8105 u32 nic_addr)
8107 tg3_write_mem(tp,
8108 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8109 ((u64) mapping >> 32));
8110 tg3_write_mem(tp,
8111 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8112 ((u64) mapping & 0xffffffff));
8113 tg3_write_mem(tp,
8114 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8115 maxlen_flags);
8117 if (!tg3_flag(tp, 5705_PLUS))
8118 tg3_write_mem(tp,
8119 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8120 nic_addr);
8123 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8125 int i;
8127 if (!tg3_flag(tp, ENABLE_TSS)) {
8128 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8129 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8130 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8131 } else {
8132 tw32(HOSTCC_TXCOL_TICKS, 0);
8133 tw32(HOSTCC_TXMAX_FRAMES, 0);
8134 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8137 if (!tg3_flag(tp, ENABLE_RSS)) {
8138 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8139 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8140 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8141 } else {
8142 tw32(HOSTCC_RXCOL_TICKS, 0);
8143 tw32(HOSTCC_RXMAX_FRAMES, 0);
8144 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8147 if (!tg3_flag(tp, 5705_PLUS)) {
8148 u32 val = ec->stats_block_coalesce_usecs;
8150 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8151 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8153 if (!netif_carrier_ok(tp->dev))
8154 val = 0;
8156 tw32(HOSTCC_STAT_COAL_TICKS, val);
8159 for (i = 0; i < tp->irq_cnt - 1; i++) {
8160 u32 reg;
8162 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8163 tw32(reg, ec->rx_coalesce_usecs);
8164 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8165 tw32(reg, ec->rx_max_coalesced_frames);
8166 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8167 tw32(reg, ec->rx_max_coalesced_frames_irq);
8169 if (tg3_flag(tp, ENABLE_TSS)) {
8170 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8171 tw32(reg, ec->tx_coalesce_usecs);
8172 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8173 tw32(reg, ec->tx_max_coalesced_frames);
8174 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8175 tw32(reg, ec->tx_max_coalesced_frames_irq);
8179 for (; i < tp->irq_max - 1; i++) {
8180 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8181 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8182 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8184 if (tg3_flag(tp, ENABLE_TSS)) {
8185 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8186 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8187 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8192 /* tp->lock is held. */
8193 static void tg3_rings_reset(struct tg3 *tp)
8195 int i;
8196 u32 stblk, txrcb, rxrcb, limit;
8197 struct tg3_napi *tnapi = &tp->napi[0];
8199 /* Disable all transmit rings but the first. */
8200 if (!tg3_flag(tp, 5705_PLUS))
8201 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8202 else if (tg3_flag(tp, 5717_PLUS))
8203 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8204 else if (tg3_flag(tp, 57765_CLASS))
8205 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8206 else
8207 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8209 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8210 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8211 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8212 BDINFO_FLAGS_DISABLED);
8215 /* Disable all receive return rings but the first. */
8216 if (tg3_flag(tp, 5717_PLUS))
8217 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8218 else if (!tg3_flag(tp, 5705_PLUS))
8219 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8220 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8221 tg3_flag(tp, 57765_CLASS))
8222 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8223 else
8224 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8226 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8227 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8228 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8229 BDINFO_FLAGS_DISABLED);
8231 /* Disable interrupts */
8232 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8233 tp->napi[0].chk_msi_cnt = 0;
8234 tp->napi[0].last_rx_cons = 0;
8235 tp->napi[0].last_tx_cons = 0;
8237 /* Zero mailbox registers. */
8238 if (tg3_flag(tp, SUPPORT_MSIX)) {
8239 for (i = 1; i < tp->irq_max; i++) {
8240 tp->napi[i].tx_prod = 0;
8241 tp->napi[i].tx_cons = 0;
8242 if (tg3_flag(tp, ENABLE_TSS))
8243 tw32_mailbox(tp->napi[i].prodmbox, 0);
8244 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8245 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8246 tp->napi[i].chk_msi_cnt = 0;
8247 tp->napi[i].last_rx_cons = 0;
8248 tp->napi[i].last_tx_cons = 0;
8250 if (!tg3_flag(tp, ENABLE_TSS))
8251 tw32_mailbox(tp->napi[0].prodmbox, 0);
8252 } else {
8253 tp->napi[0].tx_prod = 0;
8254 tp->napi[0].tx_cons = 0;
8255 tw32_mailbox(tp->napi[0].prodmbox, 0);
8256 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8259 /* Make sure the NIC-based send BD rings are disabled. */
8260 if (!tg3_flag(tp, 5705_PLUS)) {
8261 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8262 for (i = 0; i < 16; i++)
8263 tw32_tx_mbox(mbox + i * 8, 0);
8266 txrcb = NIC_SRAM_SEND_RCB;
8267 rxrcb = NIC_SRAM_RCV_RET_RCB;
8269 /* Clear status block in ram. */
8270 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8272 /* Set status block DMA address */
8273 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8274 ((u64) tnapi->status_mapping >> 32));
8275 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8276 ((u64) tnapi->status_mapping & 0xffffffff));
8278 if (tnapi->tx_ring) {
8279 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8280 (TG3_TX_RING_SIZE <<
8281 BDINFO_FLAGS_MAXLEN_SHIFT),
8282 NIC_SRAM_TX_BUFFER_DESC);
8283 txrcb += TG3_BDINFO_SIZE;
8286 if (tnapi->rx_rcb) {
8287 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8288 (tp->rx_ret_ring_mask + 1) <<
8289 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8290 rxrcb += TG3_BDINFO_SIZE;
8293 stblk = HOSTCC_STATBLCK_RING1;
8295 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8296 u64 mapping = (u64)tnapi->status_mapping;
8297 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8298 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8300 /* Clear status block in ram. */
8301 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8303 if (tnapi->tx_ring) {
8304 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8305 (TG3_TX_RING_SIZE <<
8306 BDINFO_FLAGS_MAXLEN_SHIFT),
8307 NIC_SRAM_TX_BUFFER_DESC);
8308 txrcb += TG3_BDINFO_SIZE;
8311 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8312 ((tp->rx_ret_ring_mask + 1) <<
8313 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8315 stblk += 8;
8316 rxrcb += TG3_BDINFO_SIZE;
8320 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8322 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8324 if (!tg3_flag(tp, 5750_PLUS) ||
8325 tg3_flag(tp, 5780_CLASS) ||
8326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8327 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8328 tg3_flag(tp, 57765_PLUS))
8329 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8330 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8332 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8333 else
8334 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8336 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8337 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8339 val = min(nic_rep_thresh, host_rep_thresh);
8340 tw32(RCVBDI_STD_THRESH, val);
8342 if (tg3_flag(tp, 57765_PLUS))
8343 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8345 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8346 return;
8348 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8350 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8352 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8353 tw32(RCVBDI_JUMBO_THRESH, val);
8355 if (tg3_flag(tp, 57765_PLUS))
8356 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8359 static inline u32 calc_crc(unsigned char *buf, int len)
8361 u32 reg;
8362 u32 tmp;
8363 int j, k;
8365 reg = 0xffffffff;
8367 for (j = 0; j < len; j++) {
8368 reg ^= buf[j];
8370 for (k = 0; k < 8; k++) {
8371 tmp = reg & 0x01;
8373 reg >>= 1;
8375 if (tmp)
8376 reg ^= 0xedb88320;
8380 return ~reg;
8383 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8385 /* accept or reject all multicast frames */
8386 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8387 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8388 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8389 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8392 static void __tg3_set_rx_mode(struct net_device *dev)
8394 struct tg3 *tp = netdev_priv(dev);
8395 u32 rx_mode;
8397 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8398 RX_MODE_KEEP_VLAN_TAG);
8400 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8401 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8402 * flag clear.
8404 if (!tg3_flag(tp, ENABLE_ASF))
8405 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8406 #endif
8408 if (dev->flags & IFF_PROMISC) {
8409 /* Promiscuous mode. */
8410 rx_mode |= RX_MODE_PROMISC;
8411 } else if (dev->flags & IFF_ALLMULTI) {
8412 /* Accept all multicast. */
8413 tg3_set_multi(tp, 1);
8414 } else if (netdev_mc_empty(dev)) {
8415 /* Reject all multicast. */
8416 tg3_set_multi(tp, 0);
8417 } else {
8418 /* Accept one or more multicast(s). */
8419 struct netdev_hw_addr *ha;
8420 u32 mc_filter[4] = { 0, };
8421 u32 regidx;
8422 u32 bit;
8423 u32 crc;
8425 netdev_for_each_mc_addr(ha, dev) {
8426 crc = calc_crc(ha->addr, ETH_ALEN);
8427 bit = ~crc & 0x7f;
8428 regidx = (bit & 0x60) >> 5;
8429 bit &= 0x1f;
8430 mc_filter[regidx] |= (1 << bit);
8433 tw32(MAC_HASH_REG_0, mc_filter[0]);
8434 tw32(MAC_HASH_REG_1, mc_filter[1]);
8435 tw32(MAC_HASH_REG_2, mc_filter[2]);
8436 tw32(MAC_HASH_REG_3, mc_filter[3]);
8439 if (rx_mode != tp->rx_mode) {
8440 tp->rx_mode = rx_mode;
8441 tw32_f(MAC_RX_MODE, rx_mode);
8442 udelay(10);
8446 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8448 int i;
8450 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8451 tp->rss_ind_tbl[i] =
8452 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8455 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8457 int i;
8459 if (!tg3_flag(tp, SUPPORT_MSIX))
8460 return;
8462 if (tp->irq_cnt <= 2) {
8463 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8464 return;
8467 /* Validate table against current IRQ count */
8468 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8469 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8470 break;
8473 if (i != TG3_RSS_INDIR_TBL_SIZE)
8474 tg3_rss_init_dflt_indir_tbl(tp);
8477 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8479 int i = 0;
8480 u32 reg = MAC_RSS_INDIR_TBL_0;
8482 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8483 u32 val = tp->rss_ind_tbl[i];
8484 i++;
8485 for (; i % 8; i++) {
8486 val <<= 4;
8487 val |= tp->rss_ind_tbl[i];
8489 tw32(reg, val);
8490 reg += 4;
8494 /* tp->lock is held. */
8495 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8497 u32 val, rdmac_mode;
8498 int i, err, limit;
8499 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8501 tg3_disable_ints(tp);
8503 tg3_stop_fw(tp);
8505 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8507 if (tg3_flag(tp, INIT_COMPLETE))
8508 tg3_abort_hw(tp, 1);
8510 /* Enable MAC control of LPI */
8511 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8512 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8513 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8514 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8516 tw32_f(TG3_CPMU_EEE_CTRL,
8517 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8519 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8520 TG3_CPMU_EEEMD_LPI_IN_TX |
8521 TG3_CPMU_EEEMD_LPI_IN_RX |
8522 TG3_CPMU_EEEMD_EEE_ENABLE;
8524 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8525 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8527 if (tg3_flag(tp, ENABLE_APE))
8528 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8530 tw32_f(TG3_CPMU_EEE_MODE, val);
8532 tw32_f(TG3_CPMU_EEE_DBTMR1,
8533 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8534 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8536 tw32_f(TG3_CPMU_EEE_DBTMR2,
8537 TG3_CPMU_DBTMR2_APE_TX_2047US |
8538 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8541 if (reset_phy)
8542 tg3_phy_reset(tp);
8544 err = tg3_chip_reset(tp);
8545 if (err)
8546 return err;
8548 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8550 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8551 val = tr32(TG3_CPMU_CTRL);
8552 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8553 tw32(TG3_CPMU_CTRL, val);
8555 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8556 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8557 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8558 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8560 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8561 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8562 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8563 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8565 val = tr32(TG3_CPMU_HST_ACC);
8566 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8567 val |= CPMU_HST_ACC_MACCLK_6_25;
8568 tw32(TG3_CPMU_HST_ACC, val);
8571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8572 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8573 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8574 PCIE_PWR_MGMT_L1_THRESH_4MS;
8575 tw32(PCIE_PWR_MGMT_THRESH, val);
8577 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8578 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8580 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8582 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8583 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8586 if (tg3_flag(tp, L1PLLPD_EN)) {
8587 u32 grc_mode = tr32(GRC_MODE);
8589 /* Access the lower 1K of PL PCIE block registers. */
8590 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8591 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8593 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8594 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8595 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8597 tw32(GRC_MODE, grc_mode);
8600 if (tg3_flag(tp, 57765_CLASS)) {
8601 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8602 u32 grc_mode = tr32(GRC_MODE);
8604 /* Access the lower 1K of PL PCIE block registers. */
8605 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8606 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8608 val = tr32(TG3_PCIE_TLDLPL_PORT +
8609 TG3_PCIE_PL_LO_PHYCTL5);
8610 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8611 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8613 tw32(GRC_MODE, grc_mode);
8616 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8617 u32 grc_mode = tr32(GRC_MODE);
8619 /* Access the lower 1K of DL PCIE block registers. */
8620 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8621 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8623 val = tr32(TG3_PCIE_TLDLPL_PORT +
8624 TG3_PCIE_DL_LO_FTSMAX);
8625 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8626 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8627 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8629 tw32(GRC_MODE, grc_mode);
8632 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8633 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8634 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8635 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8638 /* This works around an issue with Athlon chipsets on
8639 * B3 tigon3 silicon. This bit has no effect on any
8640 * other revision. But do not set this on PCI Express
8641 * chips and don't even touch the clocks if the CPMU is present.
8643 if (!tg3_flag(tp, CPMU_PRESENT)) {
8644 if (!tg3_flag(tp, PCI_EXPRESS))
8645 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8646 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8649 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8650 tg3_flag(tp, PCIX_MODE)) {
8651 val = tr32(TG3PCI_PCISTATE);
8652 val |= PCISTATE_RETRY_SAME_DMA;
8653 tw32(TG3PCI_PCISTATE, val);
8656 if (tg3_flag(tp, ENABLE_APE)) {
8657 /* Allow reads and writes to the
8658 * APE register and memory space.
8660 val = tr32(TG3PCI_PCISTATE);
8661 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8662 PCISTATE_ALLOW_APE_SHMEM_WR |
8663 PCISTATE_ALLOW_APE_PSPACE_WR;
8664 tw32(TG3PCI_PCISTATE, val);
8667 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8668 /* Enable some hw fixes. */
8669 val = tr32(TG3PCI_MSI_DATA);
8670 val |= (1 << 26) | (1 << 28) | (1 << 29);
8671 tw32(TG3PCI_MSI_DATA, val);
8674 /* Descriptor ring init may make accesses to the
8675 * NIC SRAM area to setup the TX descriptors, so we
8676 * can only do this after the hardware has been
8677 * successfully reset.
8679 err = tg3_init_rings(tp);
8680 if (err)
8681 return err;
8683 if (tg3_flag(tp, 57765_PLUS)) {
8684 val = tr32(TG3PCI_DMA_RW_CTRL) &
8685 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8686 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8687 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8688 if (!tg3_flag(tp, 57765_CLASS) &&
8689 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8690 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8691 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8692 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8693 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8694 /* This value is determined during the probe time DMA
8695 * engine test, tg3_test_dma.
8697 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8700 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8701 GRC_MODE_4X_NIC_SEND_RINGS |
8702 GRC_MODE_NO_TX_PHDR_CSUM |
8703 GRC_MODE_NO_RX_PHDR_CSUM);
8704 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8706 /* Pseudo-header checksum is done by hardware logic and not
8707 * the offload processers, so make the chip do the pseudo-
8708 * header checksums on receive. For transmit it is more
8709 * convenient to do the pseudo-header checksum in software
8710 * as Linux does that on transmit for us in all cases.
8712 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8714 tw32(GRC_MODE,
8715 tp->grc_mode |
8716 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8718 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8719 val = tr32(GRC_MISC_CFG);
8720 val &= ~0xff;
8721 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8722 tw32(GRC_MISC_CFG, val);
8724 /* Initialize MBUF/DESC pool. */
8725 if (tg3_flag(tp, 5750_PLUS)) {
8726 /* Do nothing. */
8727 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8728 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8729 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8730 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8731 else
8732 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8733 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8734 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8735 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8736 int fw_len;
8738 fw_len = tp->fw_len;
8739 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8740 tw32(BUFMGR_MB_POOL_ADDR,
8741 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8742 tw32(BUFMGR_MB_POOL_SIZE,
8743 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8746 if (tp->dev->mtu <= ETH_DATA_LEN) {
8747 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8748 tp->bufmgr_config.mbuf_read_dma_low_water);
8749 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8750 tp->bufmgr_config.mbuf_mac_rx_low_water);
8751 tw32(BUFMGR_MB_HIGH_WATER,
8752 tp->bufmgr_config.mbuf_high_water);
8753 } else {
8754 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8755 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8756 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8757 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8758 tw32(BUFMGR_MB_HIGH_WATER,
8759 tp->bufmgr_config.mbuf_high_water_jumbo);
8761 tw32(BUFMGR_DMA_LOW_WATER,
8762 tp->bufmgr_config.dma_low_water);
8763 tw32(BUFMGR_DMA_HIGH_WATER,
8764 tp->bufmgr_config.dma_high_water);
8766 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8768 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8770 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8771 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8772 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8773 tw32(BUFMGR_MODE, val);
8774 for (i = 0; i < 2000; i++) {
8775 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8776 break;
8777 udelay(10);
8779 if (i >= 2000) {
8780 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8781 return -ENODEV;
8784 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8785 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8787 tg3_setup_rxbd_thresholds(tp);
8789 /* Initialize TG3_BDINFO's at:
8790 * RCVDBDI_STD_BD: standard eth size rx ring
8791 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8792 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8794 * like so:
8795 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8796 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8797 * ring attribute flags
8798 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8800 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8801 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8803 * The size of each ring is fixed in the firmware, but the location is
8804 * configurable.
8806 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8807 ((u64) tpr->rx_std_mapping >> 32));
8808 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8809 ((u64) tpr->rx_std_mapping & 0xffffffff));
8810 if (!tg3_flag(tp, 5717_PLUS))
8811 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8812 NIC_SRAM_RX_BUFFER_DESC);
8814 /* Disable the mini ring */
8815 if (!tg3_flag(tp, 5705_PLUS))
8816 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8817 BDINFO_FLAGS_DISABLED);
8819 /* Program the jumbo buffer descriptor ring control
8820 * blocks on those devices that have them.
8822 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8823 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8825 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8826 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8827 ((u64) tpr->rx_jmb_mapping >> 32));
8828 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8829 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8830 val = TG3_RX_JMB_RING_SIZE(tp) <<
8831 BDINFO_FLAGS_MAXLEN_SHIFT;
8832 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8833 val | BDINFO_FLAGS_USE_EXT_RECV);
8834 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8835 tg3_flag(tp, 57765_CLASS))
8836 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8837 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8838 } else {
8839 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8840 BDINFO_FLAGS_DISABLED);
8843 if (tg3_flag(tp, 57765_PLUS)) {
8844 val = TG3_RX_STD_RING_SIZE(tp);
8845 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8846 val |= (TG3_RX_STD_DMA_SZ << 2);
8847 } else
8848 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8849 } else
8850 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8852 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8854 tpr->rx_std_prod_idx = tp->rx_pending;
8855 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8857 tpr->rx_jmb_prod_idx =
8858 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8859 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8861 tg3_rings_reset(tp);
8863 /* Initialize MAC address and backoff seed. */
8864 __tg3_set_mac_addr(tp, 0);
8866 /* MTU + ethernet header + FCS + optional VLAN tag */
8867 tw32(MAC_RX_MTU_SIZE,
8868 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8870 /* The slot time is changed by tg3_setup_phy if we
8871 * run at gigabit with half duplex.
8873 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8874 (6 << TX_LENGTHS_IPG_SHIFT) |
8875 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8877 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8878 val |= tr32(MAC_TX_LENGTHS) &
8879 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8880 TX_LENGTHS_CNT_DWN_VAL_MSK);
8882 tw32(MAC_TX_LENGTHS, val);
8884 /* Receive rules. */
8885 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8886 tw32(RCVLPC_CONFIG, 0x0181);
8888 /* Calculate RDMAC_MODE setting early, we need it to determine
8889 * the RCVLPC_STATE_ENABLE mask.
8891 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8892 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8893 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8894 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8895 RDMAC_MODE_LNGREAD_ENAB);
8897 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8898 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8902 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8903 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8904 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8905 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8908 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8909 if (tg3_flag(tp, TSO_CAPABLE) &&
8910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8911 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8912 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8913 !tg3_flag(tp, IS_5788)) {
8914 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8918 if (tg3_flag(tp, PCI_EXPRESS))
8919 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8921 if (tg3_flag(tp, HW_TSO_1) ||
8922 tg3_flag(tp, HW_TSO_2) ||
8923 tg3_flag(tp, HW_TSO_3))
8924 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8926 if (tg3_flag(tp, 57765_PLUS) ||
8927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8929 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8931 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8932 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8938 tg3_flag(tp, 57765_PLUS)) {
8939 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8940 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8941 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8942 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8943 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8944 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8945 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8946 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8947 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8949 tw32(TG3_RDMA_RSRVCTRL_REG,
8950 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8953 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8954 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8955 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8956 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8957 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8958 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8961 /* Receive/send statistics. */
8962 if (tg3_flag(tp, 5750_PLUS)) {
8963 val = tr32(RCVLPC_STATS_ENABLE);
8964 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8965 tw32(RCVLPC_STATS_ENABLE, val);
8966 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8967 tg3_flag(tp, TSO_CAPABLE)) {
8968 val = tr32(RCVLPC_STATS_ENABLE);
8969 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8970 tw32(RCVLPC_STATS_ENABLE, val);
8971 } else {
8972 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8974 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8975 tw32(SNDDATAI_STATSENAB, 0xffffff);
8976 tw32(SNDDATAI_STATSCTRL,
8977 (SNDDATAI_SCTRL_ENABLE |
8978 SNDDATAI_SCTRL_FASTUPD));
8980 /* Setup host coalescing engine. */
8981 tw32(HOSTCC_MODE, 0);
8982 for (i = 0; i < 2000; i++) {
8983 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8984 break;
8985 udelay(10);
8988 __tg3_set_coalesce(tp, &tp->coal);
8990 if (!tg3_flag(tp, 5705_PLUS)) {
8991 /* Status/statistics block address. See tg3_timer,
8992 * the tg3_periodic_fetch_stats call there, and
8993 * tg3_get_stats to see how this works for 5705/5750 chips.
8995 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8996 ((u64) tp->stats_mapping >> 32));
8997 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8998 ((u64) tp->stats_mapping & 0xffffffff));
8999 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9001 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9003 /* Clear statistics and status block memory areas */
9004 for (i = NIC_SRAM_STATS_BLK;
9005 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9006 i += sizeof(u32)) {
9007 tg3_write_mem(tp, i, 0);
9008 udelay(40);
9012 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9014 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9015 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9016 if (!tg3_flag(tp, 5705_PLUS))
9017 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9019 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9020 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9021 /* reset to prevent losing 1st rx packet intermittently */
9022 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9023 udelay(10);
9026 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9027 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9028 MAC_MODE_FHDE_ENABLE;
9029 if (tg3_flag(tp, ENABLE_APE))
9030 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9031 if (!tg3_flag(tp, 5705_PLUS) &&
9032 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9033 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9034 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9035 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9036 udelay(40);
9038 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9039 * If TG3_FLAG_IS_NIC is zero, we should read the
9040 * register to preserve the GPIO settings for LOMs. The GPIOs,
9041 * whether used as inputs or outputs, are set by boot code after
9042 * reset.
9044 if (!tg3_flag(tp, IS_NIC)) {
9045 u32 gpio_mask;
9047 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9048 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9049 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9052 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9053 GRC_LCLCTRL_GPIO_OUTPUT3;
9055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9056 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9058 tp->grc_local_ctrl &= ~gpio_mask;
9059 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9061 /* GPIO1 must be driven high for eeprom write protect */
9062 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9063 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9064 GRC_LCLCTRL_GPIO_OUTPUT1);
9066 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9067 udelay(100);
9069 if (tg3_flag(tp, USING_MSIX)) {
9070 val = tr32(MSGINT_MODE);
9071 val |= MSGINT_MODE_ENABLE;
9072 if (tp->irq_cnt > 1)
9073 val |= MSGINT_MODE_MULTIVEC_EN;
9074 if (!tg3_flag(tp, 1SHOT_MSI))
9075 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9076 tw32(MSGINT_MODE, val);
9079 if (!tg3_flag(tp, 5705_PLUS)) {
9080 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9081 udelay(40);
9084 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9085 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9086 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9087 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9088 WDMAC_MODE_LNGREAD_ENAB);
9090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9091 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9092 if (tg3_flag(tp, TSO_CAPABLE) &&
9093 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9094 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9095 /* nothing */
9096 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9097 !tg3_flag(tp, IS_5788)) {
9098 val |= WDMAC_MODE_RX_ACCEL;
9102 /* Enable host coalescing bug fix */
9103 if (tg3_flag(tp, 5755_PLUS))
9104 val |= WDMAC_MODE_STATUS_TAG_FIX;
9106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9107 val |= WDMAC_MODE_BURST_ALL_DATA;
9109 tw32_f(WDMAC_MODE, val);
9110 udelay(40);
9112 if (tg3_flag(tp, PCIX_MODE)) {
9113 u16 pcix_cmd;
9115 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9116 &pcix_cmd);
9117 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9118 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9119 pcix_cmd |= PCI_X_CMD_READ_2K;
9120 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9121 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9122 pcix_cmd |= PCI_X_CMD_READ_2K;
9124 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9125 pcix_cmd);
9128 tw32_f(RDMAC_MODE, rdmac_mode);
9129 udelay(40);
9131 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9132 if (!tg3_flag(tp, 5705_PLUS))
9133 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9136 tw32(SNDDATAC_MODE,
9137 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9138 else
9139 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9141 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9142 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9143 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9144 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9145 val |= RCVDBDI_MODE_LRG_RING_SZ;
9146 tw32(RCVDBDI_MODE, val);
9147 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9148 if (tg3_flag(tp, HW_TSO_1) ||
9149 tg3_flag(tp, HW_TSO_2) ||
9150 tg3_flag(tp, HW_TSO_3))
9151 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9152 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9153 if (tg3_flag(tp, ENABLE_TSS))
9154 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9155 tw32(SNDBDI_MODE, val);
9156 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9158 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9159 err = tg3_load_5701_a0_firmware_fix(tp);
9160 if (err)
9161 return err;
9164 if (tg3_flag(tp, TSO_CAPABLE)) {
9165 err = tg3_load_tso_firmware(tp);
9166 if (err)
9167 return err;
9170 tp->tx_mode = TX_MODE_ENABLE;
9172 if (tg3_flag(tp, 5755_PLUS) ||
9173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9174 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9176 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9177 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9178 tp->tx_mode &= ~val;
9179 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9182 tw32_f(MAC_TX_MODE, tp->tx_mode);
9183 udelay(100);
9185 if (tg3_flag(tp, ENABLE_RSS)) {
9186 tg3_rss_write_indir_tbl(tp);
9188 /* Setup the "secret" hash key. */
9189 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9190 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9191 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9192 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9193 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9194 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9195 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9196 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9197 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9198 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9201 tp->rx_mode = RX_MODE_ENABLE;
9202 if (tg3_flag(tp, 5755_PLUS))
9203 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9205 if (tg3_flag(tp, ENABLE_RSS))
9206 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9207 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9208 RX_MODE_RSS_IPV6_HASH_EN |
9209 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9210 RX_MODE_RSS_IPV4_HASH_EN |
9211 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9213 tw32_f(MAC_RX_MODE, tp->rx_mode);
9214 udelay(10);
9216 tw32(MAC_LED_CTRL, tp->led_ctrl);
9218 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9219 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9220 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9221 udelay(10);
9223 tw32_f(MAC_RX_MODE, tp->rx_mode);
9224 udelay(10);
9226 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9227 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9228 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9229 /* Set drive transmission level to 1.2V */
9230 /* only if the signal pre-emphasis bit is not set */
9231 val = tr32(MAC_SERDES_CFG);
9232 val &= 0xfffff000;
9233 val |= 0x880;
9234 tw32(MAC_SERDES_CFG, val);
9236 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9237 tw32(MAC_SERDES_CFG, 0x616000);
9240 /* Prevent chip from dropping frames when flow control
9241 * is enabled.
9243 if (tg3_flag(tp, 57765_CLASS))
9244 val = 1;
9245 else
9246 val = 2;
9247 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9249 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9250 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9251 /* Use hardware link auto-negotiation */
9252 tg3_flag_set(tp, HW_AUTONEG);
9255 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9257 u32 tmp;
9259 tmp = tr32(SERDES_RX_CTRL);
9260 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9261 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9262 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9263 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9266 if (!tg3_flag(tp, USE_PHYLIB)) {
9267 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9268 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9269 tp->link_config.speed = tp->link_config.orig_speed;
9270 tp->link_config.duplex = tp->link_config.orig_duplex;
9271 tp->link_config.autoneg = tp->link_config.orig_autoneg;
9274 err = tg3_setup_phy(tp, 0);
9275 if (err)
9276 return err;
9278 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9279 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9280 u32 tmp;
9282 /* Clear CRC stats. */
9283 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9284 tg3_writephy(tp, MII_TG3_TEST1,
9285 tmp | MII_TG3_TEST1_CRC_EN);
9286 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9291 __tg3_set_rx_mode(tp->dev);
9293 /* Initialize receive rules. */
9294 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9295 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9296 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9297 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9299 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9300 limit = 8;
9301 else
9302 limit = 16;
9303 if (tg3_flag(tp, ENABLE_ASF))
9304 limit -= 4;
9305 switch (limit) {
9306 case 16:
9307 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9308 case 15:
9309 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9310 case 14:
9311 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9312 case 13:
9313 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9314 case 12:
9315 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9316 case 11:
9317 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9318 case 10:
9319 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9320 case 9:
9321 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9322 case 8:
9323 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9324 case 7:
9325 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9326 case 6:
9327 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9328 case 5:
9329 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9330 case 4:
9331 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9332 case 3:
9333 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9334 case 2:
9335 case 1:
9337 default:
9338 break;
9341 if (tg3_flag(tp, ENABLE_APE))
9342 /* Write our heartbeat update interval to APE. */
9343 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9344 APE_HOST_HEARTBEAT_INT_DISABLE);
9346 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9348 return 0;
9351 /* Called at device open time to get the chip ready for
9352 * packet processing. Invoked with tp->lock held.
9354 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9356 tg3_switch_clocks(tp);
9358 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9360 return tg3_reset_hw(tp, reset_phy);
9363 /* Restart hardware after configuration changes, self-test, etc.
9364 * Invoked with tp->lock held.
9366 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9367 __releases(tp->lock)
9368 __acquires(tp->lock)
9370 int err;
9372 err = tg3_init_hw(tp, reset_phy);
9373 if (err) {
9374 netdev_err(tp->dev,
9375 "Failed to re-initialize device, aborting\n");
9376 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9377 tg3_full_unlock(tp);
9378 del_timer_sync(&tp->timer);
9379 tp->irq_sync = 0;
9380 tg3_napi_enable(tp);
9381 dev_close(tp->dev);
9382 tg3_full_lock(tp, 0);
9384 return err;
9387 static void tg3_reset_task(struct work_struct *work)
9389 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9390 int err;
9392 tg3_full_lock(tp, 0);
9394 if (!netif_running(tp->dev)) {
9395 tg3_flag_clear(tp, RESET_TASK_PENDING);
9396 tg3_full_unlock(tp);
9397 return;
9400 tg3_full_unlock(tp);
9402 tg3_phy_stop(tp);
9404 tg3_netif_stop(tp);
9406 tg3_full_lock(tp, 1);
9408 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9409 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9410 tp->write32_rx_mbox = tg3_write_flush_reg32;
9411 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9412 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9415 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9416 err = tg3_init_hw(tp, 1);
9417 if (err)
9418 goto out;
9420 tg3_netif_start(tp);
9422 out:
9423 tg3_full_unlock(tp);
9425 if (!err)
9426 tg3_phy_start(tp);
9428 tg3_flag_clear(tp, RESET_TASK_PENDING);
9431 #define TG3_STAT_ADD32(PSTAT, REG) \
9432 do { u32 __val = tr32(REG); \
9433 (PSTAT)->low += __val; \
9434 if ((PSTAT)->low < __val) \
9435 (PSTAT)->high += 1; \
9436 } while (0)
9438 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9440 struct tg3_hw_stats *sp = tp->hw_stats;
9442 if (!netif_carrier_ok(tp->dev))
9443 return;
9445 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9446 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9447 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9448 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9449 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9450 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9451 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9452 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9453 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9454 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9455 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9456 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9457 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9459 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9460 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9461 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9462 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9463 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9464 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9465 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9466 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9467 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9468 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9469 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9470 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9471 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9472 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9474 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9475 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9476 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9477 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9478 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9479 } else {
9480 u32 val = tr32(HOSTCC_FLOW_ATTN);
9481 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9482 if (val) {
9483 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9484 sp->rx_discards.low += val;
9485 if (sp->rx_discards.low < val)
9486 sp->rx_discards.high += 1;
9488 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9490 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9493 static void tg3_chk_missed_msi(struct tg3 *tp)
9495 u32 i;
9497 for (i = 0; i < tp->irq_cnt; i++) {
9498 struct tg3_napi *tnapi = &tp->napi[i];
9500 if (tg3_has_work(tnapi)) {
9501 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9502 tnapi->last_tx_cons == tnapi->tx_cons) {
9503 if (tnapi->chk_msi_cnt < 1) {
9504 tnapi->chk_msi_cnt++;
9505 return;
9507 tg3_msi(0, tnapi);
9510 tnapi->chk_msi_cnt = 0;
9511 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9512 tnapi->last_tx_cons = tnapi->tx_cons;
9516 static void tg3_timer(unsigned long __opaque)
9518 struct tg3 *tp = (struct tg3 *) __opaque;
9520 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9521 goto restart_timer;
9523 spin_lock(&tp->lock);
9525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9526 tg3_flag(tp, 57765_CLASS))
9527 tg3_chk_missed_msi(tp);
9529 if (!tg3_flag(tp, TAGGED_STATUS)) {
9530 /* All of this garbage is because when using non-tagged
9531 * IRQ status the mailbox/status_block protocol the chip
9532 * uses with the cpu is race prone.
9534 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9535 tw32(GRC_LOCAL_CTRL,
9536 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9537 } else {
9538 tw32(HOSTCC_MODE, tp->coalesce_mode |
9539 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9542 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9543 spin_unlock(&tp->lock);
9544 tg3_reset_task_schedule(tp);
9545 goto restart_timer;
9549 /* This part only runs once per second. */
9550 if (!--tp->timer_counter) {
9551 if (tg3_flag(tp, 5705_PLUS))
9552 tg3_periodic_fetch_stats(tp);
9554 if (tp->setlpicnt && !--tp->setlpicnt)
9555 tg3_phy_eee_enable(tp);
9557 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9558 u32 mac_stat;
9559 int phy_event;
9561 mac_stat = tr32(MAC_STATUS);
9563 phy_event = 0;
9564 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9565 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9566 phy_event = 1;
9567 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9568 phy_event = 1;
9570 if (phy_event)
9571 tg3_setup_phy(tp, 0);
9572 } else if (tg3_flag(tp, POLL_SERDES)) {
9573 u32 mac_stat = tr32(MAC_STATUS);
9574 int need_setup = 0;
9576 if (netif_carrier_ok(tp->dev) &&
9577 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9578 need_setup = 1;
9580 if (!netif_carrier_ok(tp->dev) &&
9581 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9582 MAC_STATUS_SIGNAL_DET))) {
9583 need_setup = 1;
9585 if (need_setup) {
9586 if (!tp->serdes_counter) {
9587 tw32_f(MAC_MODE,
9588 (tp->mac_mode &
9589 ~MAC_MODE_PORT_MODE_MASK));
9590 udelay(40);
9591 tw32_f(MAC_MODE, tp->mac_mode);
9592 udelay(40);
9594 tg3_setup_phy(tp, 0);
9596 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9597 tg3_flag(tp, 5780_CLASS)) {
9598 tg3_serdes_parallel_detect(tp);
9601 tp->timer_counter = tp->timer_multiplier;
9604 /* Heartbeat is only sent once every 2 seconds.
9606 * The heartbeat is to tell the ASF firmware that the host
9607 * driver is still alive. In the event that the OS crashes,
9608 * ASF needs to reset the hardware to free up the FIFO space
9609 * that may be filled with rx packets destined for the host.
9610 * If the FIFO is full, ASF will no longer function properly.
9612 * Unintended resets have been reported on real time kernels
9613 * where the timer doesn't run on time. Netpoll will also have
9614 * same problem.
9616 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9617 * to check the ring condition when the heartbeat is expiring
9618 * before doing the reset. This will prevent most unintended
9619 * resets.
9621 if (!--tp->asf_counter) {
9622 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9623 tg3_wait_for_event_ack(tp);
9625 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9626 FWCMD_NICDRV_ALIVE3);
9627 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9628 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9629 TG3_FW_UPDATE_TIMEOUT_SEC);
9631 tg3_generate_fw_event(tp);
9633 tp->asf_counter = tp->asf_multiplier;
9636 spin_unlock(&tp->lock);
9638 restart_timer:
9639 tp->timer.expires = jiffies + tp->timer_offset;
9640 add_timer(&tp->timer);
9643 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9645 irq_handler_t fn;
9646 unsigned long flags;
9647 char *name;
9648 struct tg3_napi *tnapi = &tp->napi[irq_num];
9650 if (tp->irq_cnt == 1)
9651 name = tp->dev->name;
9652 else {
9653 name = &tnapi->irq_lbl[0];
9654 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9655 name[IFNAMSIZ-1] = 0;
9658 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9659 fn = tg3_msi;
9660 if (tg3_flag(tp, 1SHOT_MSI))
9661 fn = tg3_msi_1shot;
9662 flags = 0;
9663 } else {
9664 fn = tg3_interrupt;
9665 if (tg3_flag(tp, TAGGED_STATUS))
9666 fn = tg3_interrupt_tagged;
9667 flags = IRQF_SHARED;
9670 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9673 static int tg3_test_interrupt(struct tg3 *tp)
9675 struct tg3_napi *tnapi = &tp->napi[0];
9676 struct net_device *dev = tp->dev;
9677 int err, i, intr_ok = 0;
9678 u32 val;
9680 if (!netif_running(dev))
9681 return -ENODEV;
9683 tg3_disable_ints(tp);
9685 free_irq(tnapi->irq_vec, tnapi);
9688 * Turn off MSI one shot mode. Otherwise this test has no
9689 * observable way to know whether the interrupt was delivered.
9691 if (tg3_flag(tp, 57765_PLUS)) {
9692 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9693 tw32(MSGINT_MODE, val);
9696 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9697 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9698 if (err)
9699 return err;
9701 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9702 tg3_enable_ints(tp);
9704 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9705 tnapi->coal_now);
9707 for (i = 0; i < 5; i++) {
9708 u32 int_mbox, misc_host_ctrl;
9710 int_mbox = tr32_mailbox(tnapi->int_mbox);
9711 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9713 if ((int_mbox != 0) ||
9714 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9715 intr_ok = 1;
9716 break;
9719 if (tg3_flag(tp, 57765_PLUS) &&
9720 tnapi->hw_status->status_tag != tnapi->last_tag)
9721 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9723 msleep(10);
9726 tg3_disable_ints(tp);
9728 free_irq(tnapi->irq_vec, tnapi);
9730 err = tg3_request_irq(tp, 0);
9732 if (err)
9733 return err;
9735 if (intr_ok) {
9736 /* Reenable MSI one shot mode. */
9737 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9738 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9739 tw32(MSGINT_MODE, val);
9741 return 0;
9744 return -EIO;
9747 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9748 * successfully restored
9750 static int tg3_test_msi(struct tg3 *tp)
9752 int err;
9753 u16 pci_cmd;
9755 if (!tg3_flag(tp, USING_MSI))
9756 return 0;
9758 /* Turn off SERR reporting in case MSI terminates with Master
9759 * Abort.
9761 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9762 pci_write_config_word(tp->pdev, PCI_COMMAND,
9763 pci_cmd & ~PCI_COMMAND_SERR);
9765 err = tg3_test_interrupt(tp);
9767 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9769 if (!err)
9770 return 0;
9772 /* other failures */
9773 if (err != -EIO)
9774 return err;
9776 /* MSI test failed, go back to INTx mode */
9777 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9778 "to INTx mode. Please report this failure to the PCI "
9779 "maintainer and include system chipset information\n");
9781 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9783 pci_disable_msi(tp->pdev);
9785 tg3_flag_clear(tp, USING_MSI);
9786 tp->napi[0].irq_vec = tp->pdev->irq;
9788 err = tg3_request_irq(tp, 0);
9789 if (err)
9790 return err;
9792 /* Need to reset the chip because the MSI cycle may have terminated
9793 * with Master Abort.
9795 tg3_full_lock(tp, 1);
9797 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9798 err = tg3_init_hw(tp, 1);
9800 tg3_full_unlock(tp);
9802 if (err)
9803 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9805 return err;
9808 static int tg3_request_firmware(struct tg3 *tp)
9810 const __be32 *fw_data;
9812 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9813 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9814 tp->fw_needed);
9815 return -ENOENT;
9818 fw_data = (void *)tp->fw->data;
9820 /* Firmware blob starts with version numbers, followed by
9821 * start address and _full_ length including BSS sections
9822 * (which must be longer than the actual data, of course
9825 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9826 if (tp->fw_len < (tp->fw->size - 12)) {
9827 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9828 tp->fw_len, tp->fw_needed);
9829 release_firmware(tp->fw);
9830 tp->fw = NULL;
9831 return -EINVAL;
9834 /* We no longer need firmware; we have it. */
9835 tp->fw_needed = NULL;
9836 return 0;
9839 static bool tg3_enable_msix(struct tg3 *tp)
9841 int i, rc;
9842 struct msix_entry msix_ent[tp->irq_max];
9844 tp->irq_cnt = num_online_cpus();
9845 if (tp->irq_cnt > 1) {
9846 /* We want as many rx rings enabled as there are cpus.
9847 * In multiqueue MSI-X mode, the first MSI-X vector
9848 * only deals with link interrupts, etc, so we add
9849 * one to the number of vectors we are requesting.
9851 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9854 for (i = 0; i < tp->irq_max; i++) {
9855 msix_ent[i].entry = i;
9856 msix_ent[i].vector = 0;
9859 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9860 if (rc < 0) {
9861 return false;
9862 } else if (rc != 0) {
9863 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9864 return false;
9865 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9866 tp->irq_cnt, rc);
9867 tp->irq_cnt = rc;
9870 for (i = 0; i < tp->irq_max; i++)
9871 tp->napi[i].irq_vec = msix_ent[i].vector;
9873 netif_set_real_num_tx_queues(tp->dev, 1);
9874 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9875 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9876 pci_disable_msix(tp->pdev);
9877 return false;
9880 if (tp->irq_cnt > 1) {
9881 tg3_flag_set(tp, ENABLE_RSS);
9883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9885 tg3_flag_set(tp, ENABLE_TSS);
9886 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9890 return true;
9893 static void tg3_ints_init(struct tg3 *tp)
9895 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9896 !tg3_flag(tp, TAGGED_STATUS)) {
9897 /* All MSI supporting chips should support tagged
9898 * status. Assert that this is the case.
9900 netdev_warn(tp->dev,
9901 "MSI without TAGGED_STATUS? Not using MSI\n");
9902 goto defcfg;
9905 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9906 tg3_flag_set(tp, USING_MSIX);
9907 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9908 tg3_flag_set(tp, USING_MSI);
9910 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9911 u32 msi_mode = tr32(MSGINT_MODE);
9912 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9913 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9914 if (!tg3_flag(tp, 1SHOT_MSI))
9915 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9916 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9918 defcfg:
9919 if (!tg3_flag(tp, USING_MSIX)) {
9920 tp->irq_cnt = 1;
9921 tp->napi[0].irq_vec = tp->pdev->irq;
9922 netif_set_real_num_tx_queues(tp->dev, 1);
9923 netif_set_real_num_rx_queues(tp->dev, 1);
9927 static void tg3_ints_fini(struct tg3 *tp)
9929 if (tg3_flag(tp, USING_MSIX))
9930 pci_disable_msix(tp->pdev);
9931 else if (tg3_flag(tp, USING_MSI))
9932 pci_disable_msi(tp->pdev);
9933 tg3_flag_clear(tp, USING_MSI);
9934 tg3_flag_clear(tp, USING_MSIX);
9935 tg3_flag_clear(tp, ENABLE_RSS);
9936 tg3_flag_clear(tp, ENABLE_TSS);
9939 static int tg3_open(struct net_device *dev)
9941 struct tg3 *tp = netdev_priv(dev);
9942 int i, err;
9944 if (tp->fw_needed) {
9945 err = tg3_request_firmware(tp);
9946 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9947 if (err)
9948 return err;
9949 } else if (err) {
9950 netdev_warn(tp->dev, "TSO capability disabled\n");
9951 tg3_flag_clear(tp, TSO_CAPABLE);
9952 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9953 netdev_notice(tp->dev, "TSO capability restored\n");
9954 tg3_flag_set(tp, TSO_CAPABLE);
9958 netif_carrier_off(tp->dev);
9960 err = tg3_power_up(tp);
9961 if (err)
9962 return err;
9964 tg3_full_lock(tp, 0);
9966 tg3_disable_ints(tp);
9967 tg3_flag_clear(tp, INIT_COMPLETE);
9969 tg3_full_unlock(tp);
9972 * Setup interrupts first so we know how
9973 * many NAPI resources to allocate
9975 tg3_ints_init(tp);
9977 tg3_rss_check_indir_tbl(tp);
9979 /* The placement of this call is tied
9980 * to the setup and use of Host TX descriptors.
9982 err = tg3_alloc_consistent(tp);
9983 if (err)
9984 goto err_out1;
9986 tg3_napi_init(tp);
9988 tg3_napi_enable(tp);
9990 for (i = 0; i < tp->irq_cnt; i++) {
9991 struct tg3_napi *tnapi = &tp->napi[i];
9992 err = tg3_request_irq(tp, i);
9993 if (err) {
9994 for (i--; i >= 0; i--) {
9995 tnapi = &tp->napi[i];
9996 free_irq(tnapi->irq_vec, tnapi);
9998 goto err_out2;
10002 tg3_full_lock(tp, 0);
10004 err = tg3_init_hw(tp, 1);
10005 if (err) {
10006 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10007 tg3_free_rings(tp);
10008 } else {
10009 if (tg3_flag(tp, TAGGED_STATUS) &&
10010 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10011 !tg3_flag(tp, 57765_CLASS))
10012 tp->timer_offset = HZ;
10013 else
10014 tp->timer_offset = HZ / 10;
10016 BUG_ON(tp->timer_offset > HZ);
10017 tp->timer_counter = tp->timer_multiplier =
10018 (HZ / tp->timer_offset);
10019 tp->asf_counter = tp->asf_multiplier =
10020 ((HZ / tp->timer_offset) * 2);
10022 init_timer(&tp->timer);
10023 tp->timer.expires = jiffies + tp->timer_offset;
10024 tp->timer.data = (unsigned long) tp;
10025 tp->timer.function = tg3_timer;
10028 tg3_full_unlock(tp);
10030 if (err)
10031 goto err_out3;
10033 if (tg3_flag(tp, USING_MSI)) {
10034 err = tg3_test_msi(tp);
10036 if (err) {
10037 tg3_full_lock(tp, 0);
10038 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10039 tg3_free_rings(tp);
10040 tg3_full_unlock(tp);
10042 goto err_out2;
10045 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10046 u32 val = tr32(PCIE_TRANSACTION_CFG);
10048 tw32(PCIE_TRANSACTION_CFG,
10049 val | PCIE_TRANS_CFG_1SHOT_MSI);
10053 tg3_phy_start(tp);
10055 tg3_full_lock(tp, 0);
10057 add_timer(&tp->timer);
10058 tg3_flag_set(tp, INIT_COMPLETE);
10059 tg3_enable_ints(tp);
10061 tg3_full_unlock(tp);
10063 netif_tx_start_all_queues(dev);
10066 * Reset loopback feature if it was turned on while the device was down
10067 * make sure that it's installed properly now.
10069 if (dev->features & NETIF_F_LOOPBACK)
10070 tg3_set_loopback(dev, dev->features);
10072 return 0;
10074 err_out3:
10075 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10076 struct tg3_napi *tnapi = &tp->napi[i];
10077 free_irq(tnapi->irq_vec, tnapi);
10080 err_out2:
10081 tg3_napi_disable(tp);
10082 tg3_napi_fini(tp);
10083 tg3_free_consistent(tp);
10085 err_out1:
10086 tg3_ints_fini(tp);
10087 tg3_frob_aux_power(tp, false);
10088 pci_set_power_state(tp->pdev, PCI_D3hot);
10089 return err;
10092 static int tg3_close(struct net_device *dev)
10094 int i;
10095 struct tg3 *tp = netdev_priv(dev);
10097 tg3_napi_disable(tp);
10098 tg3_reset_task_cancel(tp);
10100 netif_tx_stop_all_queues(dev);
10102 del_timer_sync(&tp->timer);
10104 tg3_phy_stop(tp);
10106 tg3_full_lock(tp, 1);
10108 tg3_disable_ints(tp);
10110 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10111 tg3_free_rings(tp);
10112 tg3_flag_clear(tp, INIT_COMPLETE);
10114 tg3_full_unlock(tp);
10116 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10117 struct tg3_napi *tnapi = &tp->napi[i];
10118 free_irq(tnapi->irq_vec, tnapi);
10121 tg3_ints_fini(tp);
10123 /* Clear stats across close / open calls */
10124 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10125 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10127 tg3_napi_fini(tp);
10129 tg3_free_consistent(tp);
10131 tg3_power_down(tp);
10133 netif_carrier_off(tp->dev);
10135 return 0;
10138 static inline u64 get_stat64(tg3_stat64_t *val)
10140 return ((u64)val->high << 32) | ((u64)val->low);
10143 static u64 calc_crc_errors(struct tg3 *tp)
10145 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10147 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10148 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10149 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10150 u32 val;
10152 spin_lock_bh(&tp->lock);
10153 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10154 tg3_writephy(tp, MII_TG3_TEST1,
10155 val | MII_TG3_TEST1_CRC_EN);
10156 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10157 } else
10158 val = 0;
10159 spin_unlock_bh(&tp->lock);
10161 tp->phy_crc_errors += val;
10163 return tp->phy_crc_errors;
10166 return get_stat64(&hw_stats->rx_fcs_errors);
10169 #define ESTAT_ADD(member) \
10170 estats->member = old_estats->member + \
10171 get_stat64(&hw_stats->member)
10173 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
10174 struct tg3_ethtool_stats *estats)
10176 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10177 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10179 ESTAT_ADD(rx_octets);
10180 ESTAT_ADD(rx_fragments);
10181 ESTAT_ADD(rx_ucast_packets);
10182 ESTAT_ADD(rx_mcast_packets);
10183 ESTAT_ADD(rx_bcast_packets);
10184 ESTAT_ADD(rx_fcs_errors);
10185 ESTAT_ADD(rx_align_errors);
10186 ESTAT_ADD(rx_xon_pause_rcvd);
10187 ESTAT_ADD(rx_xoff_pause_rcvd);
10188 ESTAT_ADD(rx_mac_ctrl_rcvd);
10189 ESTAT_ADD(rx_xoff_entered);
10190 ESTAT_ADD(rx_frame_too_long_errors);
10191 ESTAT_ADD(rx_jabbers);
10192 ESTAT_ADD(rx_undersize_packets);
10193 ESTAT_ADD(rx_in_length_errors);
10194 ESTAT_ADD(rx_out_length_errors);
10195 ESTAT_ADD(rx_64_or_less_octet_packets);
10196 ESTAT_ADD(rx_65_to_127_octet_packets);
10197 ESTAT_ADD(rx_128_to_255_octet_packets);
10198 ESTAT_ADD(rx_256_to_511_octet_packets);
10199 ESTAT_ADD(rx_512_to_1023_octet_packets);
10200 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10201 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10202 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10203 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10204 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10206 ESTAT_ADD(tx_octets);
10207 ESTAT_ADD(tx_collisions);
10208 ESTAT_ADD(tx_xon_sent);
10209 ESTAT_ADD(tx_xoff_sent);
10210 ESTAT_ADD(tx_flow_control);
10211 ESTAT_ADD(tx_mac_errors);
10212 ESTAT_ADD(tx_single_collisions);
10213 ESTAT_ADD(tx_mult_collisions);
10214 ESTAT_ADD(tx_deferred);
10215 ESTAT_ADD(tx_excessive_collisions);
10216 ESTAT_ADD(tx_late_collisions);
10217 ESTAT_ADD(tx_collide_2times);
10218 ESTAT_ADD(tx_collide_3times);
10219 ESTAT_ADD(tx_collide_4times);
10220 ESTAT_ADD(tx_collide_5times);
10221 ESTAT_ADD(tx_collide_6times);
10222 ESTAT_ADD(tx_collide_7times);
10223 ESTAT_ADD(tx_collide_8times);
10224 ESTAT_ADD(tx_collide_9times);
10225 ESTAT_ADD(tx_collide_10times);
10226 ESTAT_ADD(tx_collide_11times);
10227 ESTAT_ADD(tx_collide_12times);
10228 ESTAT_ADD(tx_collide_13times);
10229 ESTAT_ADD(tx_collide_14times);
10230 ESTAT_ADD(tx_collide_15times);
10231 ESTAT_ADD(tx_ucast_packets);
10232 ESTAT_ADD(tx_mcast_packets);
10233 ESTAT_ADD(tx_bcast_packets);
10234 ESTAT_ADD(tx_carrier_sense_errors);
10235 ESTAT_ADD(tx_discards);
10236 ESTAT_ADD(tx_errors);
10238 ESTAT_ADD(dma_writeq_full);
10239 ESTAT_ADD(dma_write_prioq_full);
10240 ESTAT_ADD(rxbds_empty);
10241 ESTAT_ADD(rx_discards);
10242 ESTAT_ADD(rx_errors);
10243 ESTAT_ADD(rx_threshold_hit);
10245 ESTAT_ADD(dma_readq_full);
10246 ESTAT_ADD(dma_read_prioq_full);
10247 ESTAT_ADD(tx_comp_queue_full);
10249 ESTAT_ADD(ring_set_send_prod_index);
10250 ESTAT_ADD(ring_status_update);
10251 ESTAT_ADD(nic_irqs);
10252 ESTAT_ADD(nic_avoided_irqs);
10253 ESTAT_ADD(nic_tx_threshold_hit);
10255 ESTAT_ADD(mbuf_lwm_thresh_hit);
10257 return estats;
10260 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
10261 struct rtnl_link_stats64 *stats)
10263 struct tg3 *tp = netdev_priv(dev);
10264 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10265 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10267 if (!hw_stats)
10268 return old_stats;
10270 stats->rx_packets = old_stats->rx_packets +
10271 get_stat64(&hw_stats->rx_ucast_packets) +
10272 get_stat64(&hw_stats->rx_mcast_packets) +
10273 get_stat64(&hw_stats->rx_bcast_packets);
10275 stats->tx_packets = old_stats->tx_packets +
10276 get_stat64(&hw_stats->tx_ucast_packets) +
10277 get_stat64(&hw_stats->tx_mcast_packets) +
10278 get_stat64(&hw_stats->tx_bcast_packets);
10280 stats->rx_bytes = old_stats->rx_bytes +
10281 get_stat64(&hw_stats->rx_octets);
10282 stats->tx_bytes = old_stats->tx_bytes +
10283 get_stat64(&hw_stats->tx_octets);
10285 stats->rx_errors = old_stats->rx_errors +
10286 get_stat64(&hw_stats->rx_errors);
10287 stats->tx_errors = old_stats->tx_errors +
10288 get_stat64(&hw_stats->tx_errors) +
10289 get_stat64(&hw_stats->tx_mac_errors) +
10290 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10291 get_stat64(&hw_stats->tx_discards);
10293 stats->multicast = old_stats->multicast +
10294 get_stat64(&hw_stats->rx_mcast_packets);
10295 stats->collisions = old_stats->collisions +
10296 get_stat64(&hw_stats->tx_collisions);
10298 stats->rx_length_errors = old_stats->rx_length_errors +
10299 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10300 get_stat64(&hw_stats->rx_undersize_packets);
10302 stats->rx_over_errors = old_stats->rx_over_errors +
10303 get_stat64(&hw_stats->rxbds_empty);
10304 stats->rx_frame_errors = old_stats->rx_frame_errors +
10305 get_stat64(&hw_stats->rx_align_errors);
10306 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10307 get_stat64(&hw_stats->tx_discards);
10308 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10309 get_stat64(&hw_stats->tx_carrier_sense_errors);
10311 stats->rx_crc_errors = old_stats->rx_crc_errors +
10312 calc_crc_errors(tp);
10314 stats->rx_missed_errors = old_stats->rx_missed_errors +
10315 get_stat64(&hw_stats->rx_discards);
10317 stats->rx_dropped = tp->rx_dropped;
10318 stats->tx_dropped = tp->tx_dropped;
10320 return stats;
10323 static int tg3_get_regs_len(struct net_device *dev)
10325 return TG3_REG_BLK_SIZE;
10328 static void tg3_get_regs(struct net_device *dev,
10329 struct ethtool_regs *regs, void *_p)
10331 struct tg3 *tp = netdev_priv(dev);
10333 regs->version = 0;
10335 memset(_p, 0, TG3_REG_BLK_SIZE);
10337 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10338 return;
10340 tg3_full_lock(tp, 0);
10342 tg3_dump_legacy_regs(tp, (u32 *)_p);
10344 tg3_full_unlock(tp);
10347 static int tg3_get_eeprom_len(struct net_device *dev)
10349 struct tg3 *tp = netdev_priv(dev);
10351 return tp->nvram_size;
10354 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10356 struct tg3 *tp = netdev_priv(dev);
10357 int ret;
10358 u8 *pd;
10359 u32 i, offset, len, b_offset, b_count;
10360 __be32 val;
10362 if (tg3_flag(tp, NO_NVRAM))
10363 return -EINVAL;
10365 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10366 return -EAGAIN;
10368 offset = eeprom->offset;
10369 len = eeprom->len;
10370 eeprom->len = 0;
10372 eeprom->magic = TG3_EEPROM_MAGIC;
10374 if (offset & 3) {
10375 /* adjustments to start on required 4 byte boundary */
10376 b_offset = offset & 3;
10377 b_count = 4 - b_offset;
10378 if (b_count > len) {
10379 /* i.e. offset=1 len=2 */
10380 b_count = len;
10382 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10383 if (ret)
10384 return ret;
10385 memcpy(data, ((char *)&val) + b_offset, b_count);
10386 len -= b_count;
10387 offset += b_count;
10388 eeprom->len += b_count;
10391 /* read bytes up to the last 4 byte boundary */
10392 pd = &data[eeprom->len];
10393 for (i = 0; i < (len - (len & 3)); i += 4) {
10394 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10395 if (ret) {
10396 eeprom->len += i;
10397 return ret;
10399 memcpy(pd + i, &val, 4);
10401 eeprom->len += i;
10403 if (len & 3) {
10404 /* read last bytes not ending on 4 byte boundary */
10405 pd = &data[eeprom->len];
10406 b_count = len & 3;
10407 b_offset = offset + len - b_count;
10408 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10409 if (ret)
10410 return ret;
10411 memcpy(pd, &val, b_count);
10412 eeprom->len += b_count;
10414 return 0;
10417 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10419 struct tg3 *tp = netdev_priv(dev);
10420 int ret;
10421 u32 offset, len, b_offset, odd_len;
10422 u8 *buf;
10423 __be32 start, end;
10425 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10426 return -EAGAIN;
10428 if (tg3_flag(tp, NO_NVRAM) ||
10429 eeprom->magic != TG3_EEPROM_MAGIC)
10430 return -EINVAL;
10432 offset = eeprom->offset;
10433 len = eeprom->len;
10435 if ((b_offset = (offset & 3))) {
10436 /* adjustments to start on required 4 byte boundary */
10437 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10438 if (ret)
10439 return ret;
10440 len += b_offset;
10441 offset &= ~3;
10442 if (len < 4)
10443 len = 4;
10446 odd_len = 0;
10447 if (len & 3) {
10448 /* adjustments to end on required 4 byte boundary */
10449 odd_len = 1;
10450 len = (len + 3) & ~3;
10451 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10452 if (ret)
10453 return ret;
10456 buf = data;
10457 if (b_offset || odd_len) {
10458 buf = kmalloc(len, GFP_KERNEL);
10459 if (!buf)
10460 return -ENOMEM;
10461 if (b_offset)
10462 memcpy(buf, &start, 4);
10463 if (odd_len)
10464 memcpy(buf+len-4, &end, 4);
10465 memcpy(buf + b_offset, data, eeprom->len);
10468 ret = tg3_nvram_write_block(tp, offset, len, buf);
10470 if (buf != data)
10471 kfree(buf);
10473 return ret;
10476 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10478 struct tg3 *tp = netdev_priv(dev);
10480 if (tg3_flag(tp, USE_PHYLIB)) {
10481 struct phy_device *phydev;
10482 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10483 return -EAGAIN;
10484 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10485 return phy_ethtool_gset(phydev, cmd);
10488 cmd->supported = (SUPPORTED_Autoneg);
10490 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10491 cmd->supported |= (SUPPORTED_1000baseT_Half |
10492 SUPPORTED_1000baseT_Full);
10494 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10495 cmd->supported |= (SUPPORTED_100baseT_Half |
10496 SUPPORTED_100baseT_Full |
10497 SUPPORTED_10baseT_Half |
10498 SUPPORTED_10baseT_Full |
10499 SUPPORTED_TP);
10500 cmd->port = PORT_TP;
10501 } else {
10502 cmd->supported |= SUPPORTED_FIBRE;
10503 cmd->port = PORT_FIBRE;
10506 cmd->advertising = tp->link_config.advertising;
10507 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10508 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10509 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10510 cmd->advertising |= ADVERTISED_Pause;
10511 } else {
10512 cmd->advertising |= ADVERTISED_Pause |
10513 ADVERTISED_Asym_Pause;
10515 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10516 cmd->advertising |= ADVERTISED_Asym_Pause;
10519 if (netif_running(dev) && netif_carrier_ok(dev)) {
10520 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10521 cmd->duplex = tp->link_config.active_duplex;
10522 cmd->lp_advertising = tp->link_config.rmt_adv;
10523 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10524 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10525 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10526 else
10527 cmd->eth_tp_mdix = ETH_TP_MDI;
10529 } else {
10530 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10531 cmd->duplex = DUPLEX_INVALID;
10532 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10534 cmd->phy_address = tp->phy_addr;
10535 cmd->transceiver = XCVR_INTERNAL;
10536 cmd->autoneg = tp->link_config.autoneg;
10537 cmd->maxtxpkt = 0;
10538 cmd->maxrxpkt = 0;
10539 return 0;
10542 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10544 struct tg3 *tp = netdev_priv(dev);
10545 u32 speed = ethtool_cmd_speed(cmd);
10547 if (tg3_flag(tp, USE_PHYLIB)) {
10548 struct phy_device *phydev;
10549 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10550 return -EAGAIN;
10551 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10552 return phy_ethtool_sset(phydev, cmd);
10555 if (cmd->autoneg != AUTONEG_ENABLE &&
10556 cmd->autoneg != AUTONEG_DISABLE)
10557 return -EINVAL;
10559 if (cmd->autoneg == AUTONEG_DISABLE &&
10560 cmd->duplex != DUPLEX_FULL &&
10561 cmd->duplex != DUPLEX_HALF)
10562 return -EINVAL;
10564 if (cmd->autoneg == AUTONEG_ENABLE) {
10565 u32 mask = ADVERTISED_Autoneg |
10566 ADVERTISED_Pause |
10567 ADVERTISED_Asym_Pause;
10569 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10570 mask |= ADVERTISED_1000baseT_Half |
10571 ADVERTISED_1000baseT_Full;
10573 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10574 mask |= ADVERTISED_100baseT_Half |
10575 ADVERTISED_100baseT_Full |
10576 ADVERTISED_10baseT_Half |
10577 ADVERTISED_10baseT_Full |
10578 ADVERTISED_TP;
10579 else
10580 mask |= ADVERTISED_FIBRE;
10582 if (cmd->advertising & ~mask)
10583 return -EINVAL;
10585 mask &= (ADVERTISED_1000baseT_Half |
10586 ADVERTISED_1000baseT_Full |
10587 ADVERTISED_100baseT_Half |
10588 ADVERTISED_100baseT_Full |
10589 ADVERTISED_10baseT_Half |
10590 ADVERTISED_10baseT_Full);
10592 cmd->advertising &= mask;
10593 } else {
10594 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10595 if (speed != SPEED_1000)
10596 return -EINVAL;
10598 if (cmd->duplex != DUPLEX_FULL)
10599 return -EINVAL;
10600 } else {
10601 if (speed != SPEED_100 &&
10602 speed != SPEED_10)
10603 return -EINVAL;
10607 tg3_full_lock(tp, 0);
10609 tp->link_config.autoneg = cmd->autoneg;
10610 if (cmd->autoneg == AUTONEG_ENABLE) {
10611 tp->link_config.advertising = (cmd->advertising |
10612 ADVERTISED_Autoneg);
10613 tp->link_config.speed = SPEED_INVALID;
10614 tp->link_config.duplex = DUPLEX_INVALID;
10615 } else {
10616 tp->link_config.advertising = 0;
10617 tp->link_config.speed = speed;
10618 tp->link_config.duplex = cmd->duplex;
10621 tp->link_config.orig_speed = tp->link_config.speed;
10622 tp->link_config.orig_duplex = tp->link_config.duplex;
10623 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10625 if (netif_running(dev))
10626 tg3_setup_phy(tp, 1);
10628 tg3_full_unlock(tp);
10630 return 0;
10633 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10635 struct tg3 *tp = netdev_priv(dev);
10637 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10638 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10639 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10640 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10643 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10645 struct tg3 *tp = netdev_priv(dev);
10647 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10648 wol->supported = WAKE_MAGIC;
10649 else
10650 wol->supported = 0;
10651 wol->wolopts = 0;
10652 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10653 wol->wolopts = WAKE_MAGIC;
10654 memset(&wol->sopass, 0, sizeof(wol->sopass));
10657 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10659 struct tg3 *tp = netdev_priv(dev);
10660 struct device *dp = &tp->pdev->dev;
10662 if (wol->wolopts & ~WAKE_MAGIC)
10663 return -EINVAL;
10664 if ((wol->wolopts & WAKE_MAGIC) &&
10665 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10666 return -EINVAL;
10668 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10670 spin_lock_bh(&tp->lock);
10671 if (device_may_wakeup(dp))
10672 tg3_flag_set(tp, WOL_ENABLE);
10673 else
10674 tg3_flag_clear(tp, WOL_ENABLE);
10675 spin_unlock_bh(&tp->lock);
10677 return 0;
10680 static u32 tg3_get_msglevel(struct net_device *dev)
10682 struct tg3 *tp = netdev_priv(dev);
10683 return tp->msg_enable;
10686 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10688 struct tg3 *tp = netdev_priv(dev);
10689 tp->msg_enable = value;
10692 static int tg3_nway_reset(struct net_device *dev)
10694 struct tg3 *tp = netdev_priv(dev);
10695 int r;
10697 if (!netif_running(dev))
10698 return -EAGAIN;
10700 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10701 return -EINVAL;
10703 if (tg3_flag(tp, USE_PHYLIB)) {
10704 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10705 return -EAGAIN;
10706 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10707 } else {
10708 u32 bmcr;
10710 spin_lock_bh(&tp->lock);
10711 r = -EINVAL;
10712 tg3_readphy(tp, MII_BMCR, &bmcr);
10713 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10714 ((bmcr & BMCR_ANENABLE) ||
10715 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10716 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10717 BMCR_ANENABLE);
10718 r = 0;
10720 spin_unlock_bh(&tp->lock);
10723 return r;
10726 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10728 struct tg3 *tp = netdev_priv(dev);
10730 ering->rx_max_pending = tp->rx_std_ring_mask;
10731 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10732 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10733 else
10734 ering->rx_jumbo_max_pending = 0;
10736 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10738 ering->rx_pending = tp->rx_pending;
10739 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10740 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10741 else
10742 ering->rx_jumbo_pending = 0;
10744 ering->tx_pending = tp->napi[0].tx_pending;
10747 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10749 struct tg3 *tp = netdev_priv(dev);
10750 int i, irq_sync = 0, err = 0;
10752 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10753 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10754 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10755 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10756 (tg3_flag(tp, TSO_BUG) &&
10757 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10758 return -EINVAL;
10760 if (netif_running(dev)) {
10761 tg3_phy_stop(tp);
10762 tg3_netif_stop(tp);
10763 irq_sync = 1;
10766 tg3_full_lock(tp, irq_sync);
10768 tp->rx_pending = ering->rx_pending;
10770 if (tg3_flag(tp, MAX_RXPEND_64) &&
10771 tp->rx_pending > 63)
10772 tp->rx_pending = 63;
10773 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10775 for (i = 0; i < tp->irq_max; i++)
10776 tp->napi[i].tx_pending = ering->tx_pending;
10778 if (netif_running(dev)) {
10779 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10780 err = tg3_restart_hw(tp, 1);
10781 if (!err)
10782 tg3_netif_start(tp);
10785 tg3_full_unlock(tp);
10787 if (irq_sync && !err)
10788 tg3_phy_start(tp);
10790 return err;
10793 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10795 struct tg3 *tp = netdev_priv(dev);
10797 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10799 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10800 epause->rx_pause = 1;
10801 else
10802 epause->rx_pause = 0;
10804 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10805 epause->tx_pause = 1;
10806 else
10807 epause->tx_pause = 0;
10810 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10812 struct tg3 *tp = netdev_priv(dev);
10813 int err = 0;
10815 if (tg3_flag(tp, USE_PHYLIB)) {
10816 u32 newadv;
10817 struct phy_device *phydev;
10819 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10821 if (!(phydev->supported & SUPPORTED_Pause) ||
10822 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10823 (epause->rx_pause != epause->tx_pause)))
10824 return -EINVAL;
10826 tp->link_config.flowctrl = 0;
10827 if (epause->rx_pause) {
10828 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10830 if (epause->tx_pause) {
10831 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10832 newadv = ADVERTISED_Pause;
10833 } else
10834 newadv = ADVERTISED_Pause |
10835 ADVERTISED_Asym_Pause;
10836 } else if (epause->tx_pause) {
10837 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10838 newadv = ADVERTISED_Asym_Pause;
10839 } else
10840 newadv = 0;
10842 if (epause->autoneg)
10843 tg3_flag_set(tp, PAUSE_AUTONEG);
10844 else
10845 tg3_flag_clear(tp, PAUSE_AUTONEG);
10847 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10848 u32 oldadv = phydev->advertising &
10849 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10850 if (oldadv != newadv) {
10851 phydev->advertising &=
10852 ~(ADVERTISED_Pause |
10853 ADVERTISED_Asym_Pause);
10854 phydev->advertising |= newadv;
10855 if (phydev->autoneg) {
10857 * Always renegotiate the link to
10858 * inform our link partner of our
10859 * flow control settings, even if the
10860 * flow control is forced. Let
10861 * tg3_adjust_link() do the final
10862 * flow control setup.
10864 return phy_start_aneg(phydev);
10868 if (!epause->autoneg)
10869 tg3_setup_flow_control(tp, 0, 0);
10870 } else {
10871 tp->link_config.orig_advertising &=
10872 ~(ADVERTISED_Pause |
10873 ADVERTISED_Asym_Pause);
10874 tp->link_config.orig_advertising |= newadv;
10876 } else {
10877 int irq_sync = 0;
10879 if (netif_running(dev)) {
10880 tg3_netif_stop(tp);
10881 irq_sync = 1;
10884 tg3_full_lock(tp, irq_sync);
10886 if (epause->autoneg)
10887 tg3_flag_set(tp, PAUSE_AUTONEG);
10888 else
10889 tg3_flag_clear(tp, PAUSE_AUTONEG);
10890 if (epause->rx_pause)
10891 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10892 else
10893 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10894 if (epause->tx_pause)
10895 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10896 else
10897 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10899 if (netif_running(dev)) {
10900 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10901 err = tg3_restart_hw(tp, 1);
10902 if (!err)
10903 tg3_netif_start(tp);
10906 tg3_full_unlock(tp);
10909 return err;
10912 static int tg3_get_sset_count(struct net_device *dev, int sset)
10914 switch (sset) {
10915 case ETH_SS_TEST:
10916 return TG3_NUM_TEST;
10917 case ETH_SS_STATS:
10918 return TG3_NUM_STATS;
10919 default:
10920 return -EOPNOTSUPP;
10924 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10925 u32 *rules __always_unused)
10927 struct tg3 *tp = netdev_priv(dev);
10929 if (!tg3_flag(tp, SUPPORT_MSIX))
10930 return -EOPNOTSUPP;
10932 switch (info->cmd) {
10933 case ETHTOOL_GRXRINGS:
10934 if (netif_running(tp->dev))
10935 info->data = tp->irq_cnt;
10936 else {
10937 info->data = num_online_cpus();
10938 if (info->data > TG3_IRQ_MAX_VECS_RSS)
10939 info->data = TG3_IRQ_MAX_VECS_RSS;
10942 /* The first interrupt vector only
10943 * handles link interrupts.
10945 info->data -= 1;
10946 return 0;
10948 default:
10949 return -EOPNOTSUPP;
10953 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10955 u32 size = 0;
10956 struct tg3 *tp = netdev_priv(dev);
10958 if (tg3_flag(tp, SUPPORT_MSIX))
10959 size = TG3_RSS_INDIR_TBL_SIZE;
10961 return size;
10964 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10966 struct tg3 *tp = netdev_priv(dev);
10967 int i;
10969 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10970 indir[i] = tp->rss_ind_tbl[i];
10972 return 0;
10975 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10977 struct tg3 *tp = netdev_priv(dev);
10978 size_t i;
10980 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10981 tp->rss_ind_tbl[i] = indir[i];
10983 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10984 return 0;
10986 /* It is legal to write the indirection
10987 * table while the device is running.
10989 tg3_full_lock(tp, 0);
10990 tg3_rss_write_indir_tbl(tp);
10991 tg3_full_unlock(tp);
10993 return 0;
10996 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10998 switch (stringset) {
10999 case ETH_SS_STATS:
11000 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11001 break;
11002 case ETH_SS_TEST:
11003 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11004 break;
11005 default:
11006 WARN_ON(1); /* we need a WARN() */
11007 break;
11011 static int tg3_set_phys_id(struct net_device *dev,
11012 enum ethtool_phys_id_state state)
11014 struct tg3 *tp = netdev_priv(dev);
11016 if (!netif_running(tp->dev))
11017 return -EAGAIN;
11019 switch (state) {
11020 case ETHTOOL_ID_ACTIVE:
11021 return 1; /* cycle on/off once per second */
11023 case ETHTOOL_ID_ON:
11024 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11025 LED_CTRL_1000MBPS_ON |
11026 LED_CTRL_100MBPS_ON |
11027 LED_CTRL_10MBPS_ON |
11028 LED_CTRL_TRAFFIC_OVERRIDE |
11029 LED_CTRL_TRAFFIC_BLINK |
11030 LED_CTRL_TRAFFIC_LED);
11031 break;
11033 case ETHTOOL_ID_OFF:
11034 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11035 LED_CTRL_TRAFFIC_OVERRIDE);
11036 break;
11038 case ETHTOOL_ID_INACTIVE:
11039 tw32(MAC_LED_CTRL, tp->led_ctrl);
11040 break;
11043 return 0;
11046 static void tg3_get_ethtool_stats(struct net_device *dev,
11047 struct ethtool_stats *estats, u64 *tmp_stats)
11049 struct tg3 *tp = netdev_priv(dev);
11051 if (tp->hw_stats)
11052 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11053 else
11054 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11057 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11059 int i;
11060 __be32 *buf;
11061 u32 offset = 0, len = 0;
11062 u32 magic, val;
11064 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11065 return NULL;
11067 if (magic == TG3_EEPROM_MAGIC) {
11068 for (offset = TG3_NVM_DIR_START;
11069 offset < TG3_NVM_DIR_END;
11070 offset += TG3_NVM_DIRENT_SIZE) {
11071 if (tg3_nvram_read(tp, offset, &val))
11072 return NULL;
11074 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11075 TG3_NVM_DIRTYPE_EXTVPD)
11076 break;
11079 if (offset != TG3_NVM_DIR_END) {
11080 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11081 if (tg3_nvram_read(tp, offset + 4, &offset))
11082 return NULL;
11084 offset = tg3_nvram_logical_addr(tp, offset);
11088 if (!offset || !len) {
11089 offset = TG3_NVM_VPD_OFF;
11090 len = TG3_NVM_VPD_LEN;
11093 buf = kmalloc(len, GFP_KERNEL);
11094 if (buf == NULL)
11095 return NULL;
11097 if (magic == TG3_EEPROM_MAGIC) {
11098 for (i = 0; i < len; i += 4) {
11099 /* The data is in little-endian format in NVRAM.
11100 * Use the big-endian read routines to preserve
11101 * the byte order as it exists in NVRAM.
11103 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11104 goto error;
11106 } else {
11107 u8 *ptr;
11108 ssize_t cnt;
11109 unsigned int pos = 0;
11111 ptr = (u8 *)&buf[0];
11112 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11113 cnt = pci_read_vpd(tp->pdev, pos,
11114 len - pos, ptr);
11115 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11116 cnt = 0;
11117 else if (cnt < 0)
11118 goto error;
11120 if (pos != len)
11121 goto error;
11124 *vpdlen = len;
11126 return buf;
11128 error:
11129 kfree(buf);
11130 return NULL;
11133 #define NVRAM_TEST_SIZE 0x100
11134 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11135 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11136 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11137 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11138 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11139 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11140 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11141 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11143 static int tg3_test_nvram(struct tg3 *tp)
11145 u32 csum, magic, len;
11146 __be32 *buf;
11147 int i, j, k, err = 0, size;
11149 if (tg3_flag(tp, NO_NVRAM))
11150 return 0;
11152 if (tg3_nvram_read(tp, 0, &magic) != 0)
11153 return -EIO;
11155 if (magic == TG3_EEPROM_MAGIC)
11156 size = NVRAM_TEST_SIZE;
11157 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11158 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11159 TG3_EEPROM_SB_FORMAT_1) {
11160 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11161 case TG3_EEPROM_SB_REVISION_0:
11162 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11163 break;
11164 case TG3_EEPROM_SB_REVISION_2:
11165 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11166 break;
11167 case TG3_EEPROM_SB_REVISION_3:
11168 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11169 break;
11170 case TG3_EEPROM_SB_REVISION_4:
11171 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11172 break;
11173 case TG3_EEPROM_SB_REVISION_5:
11174 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11175 break;
11176 case TG3_EEPROM_SB_REVISION_6:
11177 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11178 break;
11179 default:
11180 return -EIO;
11182 } else
11183 return 0;
11184 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11185 size = NVRAM_SELFBOOT_HW_SIZE;
11186 else
11187 return -EIO;
11189 buf = kmalloc(size, GFP_KERNEL);
11190 if (buf == NULL)
11191 return -ENOMEM;
11193 err = -EIO;
11194 for (i = 0, j = 0; i < size; i += 4, j++) {
11195 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11196 if (err)
11197 break;
11199 if (i < size)
11200 goto out;
11202 /* Selfboot format */
11203 magic = be32_to_cpu(buf[0]);
11204 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11205 TG3_EEPROM_MAGIC_FW) {
11206 u8 *buf8 = (u8 *) buf, csum8 = 0;
11208 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11209 TG3_EEPROM_SB_REVISION_2) {
11210 /* For rev 2, the csum doesn't include the MBA. */
11211 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11212 csum8 += buf8[i];
11213 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11214 csum8 += buf8[i];
11215 } else {
11216 for (i = 0; i < size; i++)
11217 csum8 += buf8[i];
11220 if (csum8 == 0) {
11221 err = 0;
11222 goto out;
11225 err = -EIO;
11226 goto out;
11229 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11230 TG3_EEPROM_MAGIC_HW) {
11231 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11232 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11233 u8 *buf8 = (u8 *) buf;
11235 /* Separate the parity bits and the data bytes. */
11236 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11237 if ((i == 0) || (i == 8)) {
11238 int l;
11239 u8 msk;
11241 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11242 parity[k++] = buf8[i] & msk;
11243 i++;
11244 } else if (i == 16) {
11245 int l;
11246 u8 msk;
11248 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11249 parity[k++] = buf8[i] & msk;
11250 i++;
11252 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11253 parity[k++] = buf8[i] & msk;
11254 i++;
11256 data[j++] = buf8[i];
11259 err = -EIO;
11260 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11261 u8 hw8 = hweight8(data[i]);
11263 if ((hw8 & 0x1) && parity[i])
11264 goto out;
11265 else if (!(hw8 & 0x1) && !parity[i])
11266 goto out;
11268 err = 0;
11269 goto out;
11272 err = -EIO;
11274 /* Bootstrap checksum at offset 0x10 */
11275 csum = calc_crc((unsigned char *) buf, 0x10);
11276 if (csum != le32_to_cpu(buf[0x10/4]))
11277 goto out;
11279 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11280 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11281 if (csum != le32_to_cpu(buf[0xfc/4]))
11282 goto out;
11284 kfree(buf);
11286 buf = tg3_vpd_readblock(tp, &len);
11287 if (!buf)
11288 return -ENOMEM;
11290 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11291 if (i > 0) {
11292 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11293 if (j < 0)
11294 goto out;
11296 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11297 goto out;
11299 i += PCI_VPD_LRDT_TAG_SIZE;
11300 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11301 PCI_VPD_RO_KEYWORD_CHKSUM);
11302 if (j > 0) {
11303 u8 csum8 = 0;
11305 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11307 for (i = 0; i <= j; i++)
11308 csum8 += ((u8 *)buf)[i];
11310 if (csum8)
11311 goto out;
11315 err = 0;
11317 out:
11318 kfree(buf);
11319 return err;
11322 #define TG3_SERDES_TIMEOUT_SEC 2
11323 #define TG3_COPPER_TIMEOUT_SEC 6
11325 static int tg3_test_link(struct tg3 *tp)
11327 int i, max;
11329 if (!netif_running(tp->dev))
11330 return -ENODEV;
11332 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11333 max = TG3_SERDES_TIMEOUT_SEC;
11334 else
11335 max = TG3_COPPER_TIMEOUT_SEC;
11337 for (i = 0; i < max; i++) {
11338 if (netif_carrier_ok(tp->dev))
11339 return 0;
11341 if (msleep_interruptible(1000))
11342 break;
11345 return -EIO;
11348 /* Only test the commonly used registers */
11349 static int tg3_test_registers(struct tg3 *tp)
11351 int i, is_5705, is_5750;
11352 u32 offset, read_mask, write_mask, val, save_val, read_val;
11353 static struct {
11354 u16 offset;
11355 u16 flags;
11356 #define TG3_FL_5705 0x1
11357 #define TG3_FL_NOT_5705 0x2
11358 #define TG3_FL_NOT_5788 0x4
11359 #define TG3_FL_NOT_5750 0x8
11360 u32 read_mask;
11361 u32 write_mask;
11362 } reg_tbl[] = {
11363 /* MAC Control Registers */
11364 { MAC_MODE, TG3_FL_NOT_5705,
11365 0x00000000, 0x00ef6f8c },
11366 { MAC_MODE, TG3_FL_5705,
11367 0x00000000, 0x01ef6b8c },
11368 { MAC_STATUS, TG3_FL_NOT_5705,
11369 0x03800107, 0x00000000 },
11370 { MAC_STATUS, TG3_FL_5705,
11371 0x03800100, 0x00000000 },
11372 { MAC_ADDR_0_HIGH, 0x0000,
11373 0x00000000, 0x0000ffff },
11374 { MAC_ADDR_0_LOW, 0x0000,
11375 0x00000000, 0xffffffff },
11376 { MAC_RX_MTU_SIZE, 0x0000,
11377 0x00000000, 0x0000ffff },
11378 { MAC_TX_MODE, 0x0000,
11379 0x00000000, 0x00000070 },
11380 { MAC_TX_LENGTHS, 0x0000,
11381 0x00000000, 0x00003fff },
11382 { MAC_RX_MODE, TG3_FL_NOT_5705,
11383 0x00000000, 0x000007fc },
11384 { MAC_RX_MODE, TG3_FL_5705,
11385 0x00000000, 0x000007dc },
11386 { MAC_HASH_REG_0, 0x0000,
11387 0x00000000, 0xffffffff },
11388 { MAC_HASH_REG_1, 0x0000,
11389 0x00000000, 0xffffffff },
11390 { MAC_HASH_REG_2, 0x0000,
11391 0x00000000, 0xffffffff },
11392 { MAC_HASH_REG_3, 0x0000,
11393 0x00000000, 0xffffffff },
11395 /* Receive Data and Receive BD Initiator Control Registers. */
11396 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11397 0x00000000, 0xffffffff },
11398 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11399 0x00000000, 0xffffffff },
11400 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11401 0x00000000, 0x00000003 },
11402 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11403 0x00000000, 0xffffffff },
11404 { RCVDBDI_STD_BD+0, 0x0000,
11405 0x00000000, 0xffffffff },
11406 { RCVDBDI_STD_BD+4, 0x0000,
11407 0x00000000, 0xffffffff },
11408 { RCVDBDI_STD_BD+8, 0x0000,
11409 0x00000000, 0xffff0002 },
11410 { RCVDBDI_STD_BD+0xc, 0x0000,
11411 0x00000000, 0xffffffff },
11413 /* Receive BD Initiator Control Registers. */
11414 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11415 0x00000000, 0xffffffff },
11416 { RCVBDI_STD_THRESH, TG3_FL_5705,
11417 0x00000000, 0x000003ff },
11418 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11419 0x00000000, 0xffffffff },
11421 /* Host Coalescing Control Registers. */
11422 { HOSTCC_MODE, TG3_FL_NOT_5705,
11423 0x00000000, 0x00000004 },
11424 { HOSTCC_MODE, TG3_FL_5705,
11425 0x00000000, 0x000000f6 },
11426 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11427 0x00000000, 0xffffffff },
11428 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11429 0x00000000, 0x000003ff },
11430 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11431 0x00000000, 0xffffffff },
11432 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11433 0x00000000, 0x000003ff },
11434 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11435 0x00000000, 0xffffffff },
11436 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11437 0x00000000, 0x000000ff },
11438 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11439 0x00000000, 0xffffffff },
11440 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11441 0x00000000, 0x000000ff },
11442 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11443 0x00000000, 0xffffffff },
11444 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11445 0x00000000, 0xffffffff },
11446 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11447 0x00000000, 0xffffffff },
11448 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11449 0x00000000, 0x000000ff },
11450 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11451 0x00000000, 0xffffffff },
11452 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11453 0x00000000, 0x000000ff },
11454 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11455 0x00000000, 0xffffffff },
11456 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11457 0x00000000, 0xffffffff },
11458 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11459 0x00000000, 0xffffffff },
11460 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11461 0x00000000, 0xffffffff },
11462 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11463 0x00000000, 0xffffffff },
11464 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11465 0xffffffff, 0x00000000 },
11466 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11467 0xffffffff, 0x00000000 },
11469 /* Buffer Manager Control Registers. */
11470 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11471 0x00000000, 0x007fff80 },
11472 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11473 0x00000000, 0x007fffff },
11474 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11475 0x00000000, 0x0000003f },
11476 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11477 0x00000000, 0x000001ff },
11478 { BUFMGR_MB_HIGH_WATER, 0x0000,
11479 0x00000000, 0x000001ff },
11480 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11481 0xffffffff, 0x00000000 },
11482 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11483 0xffffffff, 0x00000000 },
11485 /* Mailbox Registers */
11486 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11487 0x00000000, 0x000001ff },
11488 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11489 0x00000000, 0x000001ff },
11490 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11491 0x00000000, 0x000007ff },
11492 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11493 0x00000000, 0x000001ff },
11495 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11498 is_5705 = is_5750 = 0;
11499 if (tg3_flag(tp, 5705_PLUS)) {
11500 is_5705 = 1;
11501 if (tg3_flag(tp, 5750_PLUS))
11502 is_5750 = 1;
11505 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11506 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11507 continue;
11509 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11510 continue;
11512 if (tg3_flag(tp, IS_5788) &&
11513 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11514 continue;
11516 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11517 continue;
11519 offset = (u32) reg_tbl[i].offset;
11520 read_mask = reg_tbl[i].read_mask;
11521 write_mask = reg_tbl[i].write_mask;
11523 /* Save the original register content */
11524 save_val = tr32(offset);
11526 /* Determine the read-only value. */
11527 read_val = save_val & read_mask;
11529 /* Write zero to the register, then make sure the read-only bits
11530 * are not changed and the read/write bits are all zeros.
11532 tw32(offset, 0);
11534 val = tr32(offset);
11536 /* Test the read-only and read/write bits. */
11537 if (((val & read_mask) != read_val) || (val & write_mask))
11538 goto out;
11540 /* Write ones to all the bits defined by RdMask and WrMask, then
11541 * make sure the read-only bits are not changed and the
11542 * read/write bits are all ones.
11544 tw32(offset, read_mask | write_mask);
11546 val = tr32(offset);
11548 /* Test the read-only bits. */
11549 if ((val & read_mask) != read_val)
11550 goto out;
11552 /* Test the read/write bits. */
11553 if ((val & write_mask) != write_mask)
11554 goto out;
11556 tw32(offset, save_val);
11559 return 0;
11561 out:
11562 if (netif_msg_hw(tp))
11563 netdev_err(tp->dev,
11564 "Register test failed at offset %x\n", offset);
11565 tw32(offset, save_val);
11566 return -EIO;
11569 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11571 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11572 int i;
11573 u32 j;
11575 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11576 for (j = 0; j < len; j += 4) {
11577 u32 val;
11579 tg3_write_mem(tp, offset + j, test_pattern[i]);
11580 tg3_read_mem(tp, offset + j, &val);
11581 if (val != test_pattern[i])
11582 return -EIO;
11585 return 0;
11588 static int tg3_test_memory(struct tg3 *tp)
11590 static struct mem_entry {
11591 u32 offset;
11592 u32 len;
11593 } mem_tbl_570x[] = {
11594 { 0x00000000, 0x00b50},
11595 { 0x00002000, 0x1c000},
11596 { 0xffffffff, 0x00000}
11597 }, mem_tbl_5705[] = {
11598 { 0x00000100, 0x0000c},
11599 { 0x00000200, 0x00008},
11600 { 0x00004000, 0x00800},
11601 { 0x00006000, 0x01000},
11602 { 0x00008000, 0x02000},
11603 { 0x00010000, 0x0e000},
11604 { 0xffffffff, 0x00000}
11605 }, mem_tbl_5755[] = {
11606 { 0x00000200, 0x00008},
11607 { 0x00004000, 0x00800},
11608 { 0x00006000, 0x00800},
11609 { 0x00008000, 0x02000},
11610 { 0x00010000, 0x0c000},
11611 { 0xffffffff, 0x00000}
11612 }, mem_tbl_5906[] = {
11613 { 0x00000200, 0x00008},
11614 { 0x00004000, 0x00400},
11615 { 0x00006000, 0x00400},
11616 { 0x00008000, 0x01000},
11617 { 0x00010000, 0x01000},
11618 { 0xffffffff, 0x00000}
11619 }, mem_tbl_5717[] = {
11620 { 0x00000200, 0x00008},
11621 { 0x00010000, 0x0a000},
11622 { 0x00020000, 0x13c00},
11623 { 0xffffffff, 0x00000}
11624 }, mem_tbl_57765[] = {
11625 { 0x00000200, 0x00008},
11626 { 0x00004000, 0x00800},
11627 { 0x00006000, 0x09800},
11628 { 0x00010000, 0x0a000},
11629 { 0xffffffff, 0x00000}
11631 struct mem_entry *mem_tbl;
11632 int err = 0;
11633 int i;
11635 if (tg3_flag(tp, 5717_PLUS))
11636 mem_tbl = mem_tbl_5717;
11637 else if (tg3_flag(tp, 57765_CLASS))
11638 mem_tbl = mem_tbl_57765;
11639 else if (tg3_flag(tp, 5755_PLUS))
11640 mem_tbl = mem_tbl_5755;
11641 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11642 mem_tbl = mem_tbl_5906;
11643 else if (tg3_flag(tp, 5705_PLUS))
11644 mem_tbl = mem_tbl_5705;
11645 else
11646 mem_tbl = mem_tbl_570x;
11648 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11649 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11650 if (err)
11651 break;
11654 return err;
11657 #define TG3_TSO_MSS 500
11659 #define TG3_TSO_IP_HDR_LEN 20
11660 #define TG3_TSO_TCP_HDR_LEN 20
11661 #define TG3_TSO_TCP_OPT_LEN 12
11663 static const u8 tg3_tso_header[] = {
11664 0x08, 0x00,
11665 0x45, 0x00, 0x00, 0x00,
11666 0x00, 0x00, 0x40, 0x00,
11667 0x40, 0x06, 0x00, 0x00,
11668 0x0a, 0x00, 0x00, 0x01,
11669 0x0a, 0x00, 0x00, 0x02,
11670 0x0d, 0x00, 0xe0, 0x00,
11671 0x00, 0x00, 0x01, 0x00,
11672 0x00, 0x00, 0x02, 0x00,
11673 0x80, 0x10, 0x10, 0x00,
11674 0x14, 0x09, 0x00, 0x00,
11675 0x01, 0x01, 0x08, 0x0a,
11676 0x11, 0x11, 0x11, 0x11,
11677 0x11, 0x11, 0x11, 0x11,
11680 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11682 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11683 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11684 u32 budget;
11685 struct sk_buff *skb;
11686 u8 *tx_data, *rx_data;
11687 dma_addr_t map;
11688 int num_pkts, tx_len, rx_len, i, err;
11689 struct tg3_rx_buffer_desc *desc;
11690 struct tg3_napi *tnapi, *rnapi;
11691 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11693 tnapi = &tp->napi[0];
11694 rnapi = &tp->napi[0];
11695 if (tp->irq_cnt > 1) {
11696 if (tg3_flag(tp, ENABLE_RSS))
11697 rnapi = &tp->napi[1];
11698 if (tg3_flag(tp, ENABLE_TSS))
11699 tnapi = &tp->napi[1];
11701 coal_now = tnapi->coal_now | rnapi->coal_now;
11703 err = -EIO;
11705 tx_len = pktsz;
11706 skb = netdev_alloc_skb(tp->dev, tx_len);
11707 if (!skb)
11708 return -ENOMEM;
11710 tx_data = skb_put(skb, tx_len);
11711 memcpy(tx_data, tp->dev->dev_addr, 6);
11712 memset(tx_data + 6, 0x0, 8);
11714 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11716 if (tso_loopback) {
11717 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11719 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11720 TG3_TSO_TCP_OPT_LEN;
11722 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11723 sizeof(tg3_tso_header));
11724 mss = TG3_TSO_MSS;
11726 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11727 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11729 /* Set the total length field in the IP header */
11730 iph->tot_len = htons((u16)(mss + hdr_len));
11732 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11733 TXD_FLAG_CPU_POST_DMA);
11735 if (tg3_flag(tp, HW_TSO_1) ||
11736 tg3_flag(tp, HW_TSO_2) ||
11737 tg3_flag(tp, HW_TSO_3)) {
11738 struct tcphdr *th;
11739 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11740 th = (struct tcphdr *)&tx_data[val];
11741 th->check = 0;
11742 } else
11743 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11745 if (tg3_flag(tp, HW_TSO_3)) {
11746 mss |= (hdr_len & 0xc) << 12;
11747 if (hdr_len & 0x10)
11748 base_flags |= 0x00000010;
11749 base_flags |= (hdr_len & 0x3e0) << 5;
11750 } else if (tg3_flag(tp, HW_TSO_2))
11751 mss |= hdr_len << 9;
11752 else if (tg3_flag(tp, HW_TSO_1) ||
11753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11754 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11755 } else {
11756 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11759 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11760 } else {
11761 num_pkts = 1;
11762 data_off = ETH_HLEN;
11765 for (i = data_off; i < tx_len; i++)
11766 tx_data[i] = (u8) (i & 0xff);
11768 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11769 if (pci_dma_mapping_error(tp->pdev, map)) {
11770 dev_kfree_skb(skb);
11771 return -EIO;
11774 val = tnapi->tx_prod;
11775 tnapi->tx_buffers[val].skb = skb;
11776 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11778 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11779 rnapi->coal_now);
11781 udelay(10);
11783 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11785 budget = tg3_tx_avail(tnapi);
11786 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11787 base_flags | TXD_FLAG_END, mss, 0)) {
11788 tnapi->tx_buffers[val].skb = NULL;
11789 dev_kfree_skb(skb);
11790 return -EIO;
11793 tnapi->tx_prod++;
11795 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11796 tr32_mailbox(tnapi->prodmbox);
11798 udelay(10);
11800 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11801 for (i = 0; i < 35; i++) {
11802 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11803 coal_now);
11805 udelay(10);
11807 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11808 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11809 if ((tx_idx == tnapi->tx_prod) &&
11810 (rx_idx == (rx_start_idx + num_pkts)))
11811 break;
11814 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11815 dev_kfree_skb(skb);
11817 if (tx_idx != tnapi->tx_prod)
11818 goto out;
11820 if (rx_idx != rx_start_idx + num_pkts)
11821 goto out;
11823 val = data_off;
11824 while (rx_idx != rx_start_idx) {
11825 desc = &rnapi->rx_rcb[rx_start_idx++];
11826 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11827 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11829 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11830 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11831 goto out;
11833 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11834 - ETH_FCS_LEN;
11836 if (!tso_loopback) {
11837 if (rx_len != tx_len)
11838 goto out;
11840 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11841 if (opaque_key != RXD_OPAQUE_RING_STD)
11842 goto out;
11843 } else {
11844 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11845 goto out;
11847 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11848 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11849 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11850 goto out;
11853 if (opaque_key == RXD_OPAQUE_RING_STD) {
11854 rx_data = tpr->rx_std_buffers[desc_idx].data;
11855 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11856 mapping);
11857 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11858 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11859 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11860 mapping);
11861 } else
11862 goto out;
11864 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11865 PCI_DMA_FROMDEVICE);
11867 rx_data += TG3_RX_OFFSET(tp);
11868 for (i = data_off; i < rx_len; i++, val++) {
11869 if (*(rx_data + i) != (u8) (val & 0xff))
11870 goto out;
11874 err = 0;
11876 /* tg3_free_rings will unmap and free the rx_data */
11877 out:
11878 return err;
11881 #define TG3_STD_LOOPBACK_FAILED 1
11882 #define TG3_JMB_LOOPBACK_FAILED 2
11883 #define TG3_TSO_LOOPBACK_FAILED 4
11884 #define TG3_LOOPBACK_FAILED \
11885 (TG3_STD_LOOPBACK_FAILED | \
11886 TG3_JMB_LOOPBACK_FAILED | \
11887 TG3_TSO_LOOPBACK_FAILED)
11889 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11891 int err = -EIO;
11892 u32 eee_cap;
11894 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11895 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11897 if (!netif_running(tp->dev)) {
11898 data[0] = TG3_LOOPBACK_FAILED;
11899 data[1] = TG3_LOOPBACK_FAILED;
11900 if (do_extlpbk)
11901 data[2] = TG3_LOOPBACK_FAILED;
11902 goto done;
11905 err = tg3_reset_hw(tp, 1);
11906 if (err) {
11907 data[0] = TG3_LOOPBACK_FAILED;
11908 data[1] = TG3_LOOPBACK_FAILED;
11909 if (do_extlpbk)
11910 data[2] = TG3_LOOPBACK_FAILED;
11911 goto done;
11914 if (tg3_flag(tp, ENABLE_RSS)) {
11915 int i;
11917 /* Reroute all rx packets to the 1st queue */
11918 for (i = MAC_RSS_INDIR_TBL_0;
11919 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11920 tw32(i, 0x0);
11923 /* HW errata - mac loopback fails in some cases on 5780.
11924 * Normal traffic and PHY loopback are not affected by
11925 * errata. Also, the MAC loopback test is deprecated for
11926 * all newer ASIC revisions.
11928 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11929 !tg3_flag(tp, CPMU_PRESENT)) {
11930 tg3_mac_loopback(tp, true);
11932 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11933 data[0] |= TG3_STD_LOOPBACK_FAILED;
11935 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11936 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11937 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11939 tg3_mac_loopback(tp, false);
11942 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11943 !tg3_flag(tp, USE_PHYLIB)) {
11944 int i;
11946 tg3_phy_lpbk_set(tp, 0, false);
11948 /* Wait for link */
11949 for (i = 0; i < 100; i++) {
11950 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11951 break;
11952 mdelay(1);
11955 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11956 data[1] |= TG3_STD_LOOPBACK_FAILED;
11957 if (tg3_flag(tp, TSO_CAPABLE) &&
11958 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11959 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11960 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11961 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11962 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11964 if (do_extlpbk) {
11965 tg3_phy_lpbk_set(tp, 0, true);
11967 /* All link indications report up, but the hardware
11968 * isn't really ready for about 20 msec. Double it
11969 * to be sure.
11971 mdelay(40);
11973 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11974 data[2] |= TG3_STD_LOOPBACK_FAILED;
11975 if (tg3_flag(tp, TSO_CAPABLE) &&
11976 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11977 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11978 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11979 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11980 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11983 /* Re-enable gphy autopowerdown. */
11984 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11985 tg3_phy_toggle_apd(tp, true);
11988 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11990 done:
11991 tp->phy_flags |= eee_cap;
11993 return err;
11996 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11997 u64 *data)
11999 struct tg3 *tp = netdev_priv(dev);
12000 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12002 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12003 tg3_power_up(tp)) {
12004 etest->flags |= ETH_TEST_FL_FAILED;
12005 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12006 return;
12009 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12011 if (tg3_test_nvram(tp) != 0) {
12012 etest->flags |= ETH_TEST_FL_FAILED;
12013 data[0] = 1;
12015 if (!doextlpbk && tg3_test_link(tp)) {
12016 etest->flags |= ETH_TEST_FL_FAILED;
12017 data[1] = 1;
12019 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12020 int err, err2 = 0, irq_sync = 0;
12022 if (netif_running(dev)) {
12023 tg3_phy_stop(tp);
12024 tg3_netif_stop(tp);
12025 irq_sync = 1;
12028 tg3_full_lock(tp, irq_sync);
12030 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12031 err = tg3_nvram_lock(tp);
12032 tg3_halt_cpu(tp, RX_CPU_BASE);
12033 if (!tg3_flag(tp, 5705_PLUS))
12034 tg3_halt_cpu(tp, TX_CPU_BASE);
12035 if (!err)
12036 tg3_nvram_unlock(tp);
12038 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12039 tg3_phy_reset(tp);
12041 if (tg3_test_registers(tp) != 0) {
12042 etest->flags |= ETH_TEST_FL_FAILED;
12043 data[2] = 1;
12046 if (tg3_test_memory(tp) != 0) {
12047 etest->flags |= ETH_TEST_FL_FAILED;
12048 data[3] = 1;
12051 if (doextlpbk)
12052 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12054 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12055 etest->flags |= ETH_TEST_FL_FAILED;
12057 tg3_full_unlock(tp);
12059 if (tg3_test_interrupt(tp) != 0) {
12060 etest->flags |= ETH_TEST_FL_FAILED;
12061 data[7] = 1;
12064 tg3_full_lock(tp, 0);
12066 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12067 if (netif_running(dev)) {
12068 tg3_flag_set(tp, INIT_COMPLETE);
12069 err2 = tg3_restart_hw(tp, 1);
12070 if (!err2)
12071 tg3_netif_start(tp);
12074 tg3_full_unlock(tp);
12076 if (irq_sync && !err2)
12077 tg3_phy_start(tp);
12079 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12080 tg3_power_down(tp);
12084 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12086 struct mii_ioctl_data *data = if_mii(ifr);
12087 struct tg3 *tp = netdev_priv(dev);
12088 int err;
12090 if (tg3_flag(tp, USE_PHYLIB)) {
12091 struct phy_device *phydev;
12092 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12093 return -EAGAIN;
12094 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12095 return phy_mii_ioctl(phydev, ifr, cmd);
12098 switch (cmd) {
12099 case SIOCGMIIPHY:
12100 data->phy_id = tp->phy_addr;
12102 /* fallthru */
12103 case SIOCGMIIREG: {
12104 u32 mii_regval;
12106 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12107 break; /* We have no PHY */
12109 if (!netif_running(dev))
12110 return -EAGAIN;
12112 spin_lock_bh(&tp->lock);
12113 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12114 spin_unlock_bh(&tp->lock);
12116 data->val_out = mii_regval;
12118 return err;
12121 case SIOCSMIIREG:
12122 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12123 break; /* We have no PHY */
12125 if (!netif_running(dev))
12126 return -EAGAIN;
12128 spin_lock_bh(&tp->lock);
12129 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12130 spin_unlock_bh(&tp->lock);
12132 return err;
12134 default:
12135 /* do nothing */
12136 break;
12138 return -EOPNOTSUPP;
12141 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12143 struct tg3 *tp = netdev_priv(dev);
12145 memcpy(ec, &tp->coal, sizeof(*ec));
12146 return 0;
12149 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12151 struct tg3 *tp = netdev_priv(dev);
12152 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12153 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12155 if (!tg3_flag(tp, 5705_PLUS)) {
12156 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12157 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12158 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12159 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12162 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12163 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12164 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12165 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12166 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12167 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12168 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12169 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12170 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12171 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12172 return -EINVAL;
12174 /* No rx interrupts will be generated if both are zero */
12175 if ((ec->rx_coalesce_usecs == 0) &&
12176 (ec->rx_max_coalesced_frames == 0))
12177 return -EINVAL;
12179 /* No tx interrupts will be generated if both are zero */
12180 if ((ec->tx_coalesce_usecs == 0) &&
12181 (ec->tx_max_coalesced_frames == 0))
12182 return -EINVAL;
12184 /* Only copy relevant parameters, ignore all others. */
12185 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12186 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12187 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12188 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12189 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12190 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12191 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12192 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12193 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12195 if (netif_running(dev)) {
12196 tg3_full_lock(tp, 0);
12197 __tg3_set_coalesce(tp, &tp->coal);
12198 tg3_full_unlock(tp);
12200 return 0;
12203 static const struct ethtool_ops tg3_ethtool_ops = {
12204 .get_settings = tg3_get_settings,
12205 .set_settings = tg3_set_settings,
12206 .get_drvinfo = tg3_get_drvinfo,
12207 .get_regs_len = tg3_get_regs_len,
12208 .get_regs = tg3_get_regs,
12209 .get_wol = tg3_get_wol,
12210 .set_wol = tg3_set_wol,
12211 .get_msglevel = tg3_get_msglevel,
12212 .set_msglevel = tg3_set_msglevel,
12213 .nway_reset = tg3_nway_reset,
12214 .get_link = ethtool_op_get_link,
12215 .get_eeprom_len = tg3_get_eeprom_len,
12216 .get_eeprom = tg3_get_eeprom,
12217 .set_eeprom = tg3_set_eeprom,
12218 .get_ringparam = tg3_get_ringparam,
12219 .set_ringparam = tg3_set_ringparam,
12220 .get_pauseparam = tg3_get_pauseparam,
12221 .set_pauseparam = tg3_set_pauseparam,
12222 .self_test = tg3_self_test,
12223 .get_strings = tg3_get_strings,
12224 .set_phys_id = tg3_set_phys_id,
12225 .get_ethtool_stats = tg3_get_ethtool_stats,
12226 .get_coalesce = tg3_get_coalesce,
12227 .set_coalesce = tg3_set_coalesce,
12228 .get_sset_count = tg3_get_sset_count,
12229 .get_rxnfc = tg3_get_rxnfc,
12230 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12231 .get_rxfh_indir = tg3_get_rxfh_indir,
12232 .set_rxfh_indir = tg3_set_rxfh_indir,
12235 static void tg3_set_rx_mode(struct net_device *dev)
12237 struct tg3 *tp = netdev_priv(dev);
12239 if (!netif_running(dev))
12240 return;
12242 tg3_full_lock(tp, 0);
12243 __tg3_set_rx_mode(dev);
12244 tg3_full_unlock(tp);
12247 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12248 int new_mtu)
12250 dev->mtu = new_mtu;
12252 if (new_mtu > ETH_DATA_LEN) {
12253 if (tg3_flag(tp, 5780_CLASS)) {
12254 netdev_update_features(dev);
12255 tg3_flag_clear(tp, TSO_CAPABLE);
12256 } else {
12257 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12259 } else {
12260 if (tg3_flag(tp, 5780_CLASS)) {
12261 tg3_flag_set(tp, TSO_CAPABLE);
12262 netdev_update_features(dev);
12264 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12268 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12270 struct tg3 *tp = netdev_priv(dev);
12271 int err;
12273 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12274 return -EINVAL;
12276 if (!netif_running(dev)) {
12277 /* We'll just catch it later when the
12278 * device is up'd.
12280 tg3_set_mtu(dev, tp, new_mtu);
12281 return 0;
12284 tg3_phy_stop(tp);
12286 tg3_netif_stop(tp);
12288 tg3_full_lock(tp, 1);
12290 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12292 tg3_set_mtu(dev, tp, new_mtu);
12294 err = tg3_restart_hw(tp, 0);
12296 if (!err)
12297 tg3_netif_start(tp);
12299 tg3_full_unlock(tp);
12301 if (!err)
12302 tg3_phy_start(tp);
12304 return err;
12307 static const struct net_device_ops tg3_netdev_ops = {
12308 .ndo_open = tg3_open,
12309 .ndo_stop = tg3_close,
12310 .ndo_start_xmit = tg3_start_xmit,
12311 .ndo_get_stats64 = tg3_get_stats64,
12312 .ndo_validate_addr = eth_validate_addr,
12313 .ndo_set_rx_mode = tg3_set_rx_mode,
12314 .ndo_set_mac_address = tg3_set_mac_addr,
12315 .ndo_do_ioctl = tg3_ioctl,
12316 .ndo_tx_timeout = tg3_tx_timeout,
12317 .ndo_change_mtu = tg3_change_mtu,
12318 .ndo_fix_features = tg3_fix_features,
12319 .ndo_set_features = tg3_set_features,
12320 #ifdef CONFIG_NET_POLL_CONTROLLER
12321 .ndo_poll_controller = tg3_poll_controller,
12322 #endif
12325 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12327 u32 cursize, val, magic;
12329 tp->nvram_size = EEPROM_CHIP_SIZE;
12331 if (tg3_nvram_read(tp, 0, &magic) != 0)
12332 return;
12334 if ((magic != TG3_EEPROM_MAGIC) &&
12335 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12336 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12337 return;
12340 * Size the chip by reading offsets at increasing powers of two.
12341 * When we encounter our validation signature, we know the addressing
12342 * has wrapped around, and thus have our chip size.
12344 cursize = 0x10;
12346 while (cursize < tp->nvram_size) {
12347 if (tg3_nvram_read(tp, cursize, &val) != 0)
12348 return;
12350 if (val == magic)
12351 break;
12353 cursize <<= 1;
12356 tp->nvram_size = cursize;
12359 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12361 u32 val;
12363 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12364 return;
12366 /* Selfboot format */
12367 if (val != TG3_EEPROM_MAGIC) {
12368 tg3_get_eeprom_size(tp);
12369 return;
12372 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12373 if (val != 0) {
12374 /* This is confusing. We want to operate on the
12375 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12376 * call will read from NVRAM and byteswap the data
12377 * according to the byteswapping settings for all
12378 * other register accesses. This ensures the data we
12379 * want will always reside in the lower 16-bits.
12380 * However, the data in NVRAM is in LE format, which
12381 * means the data from the NVRAM read will always be
12382 * opposite the endianness of the CPU. The 16-bit
12383 * byteswap then brings the data to CPU endianness.
12385 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12386 return;
12389 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12392 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12394 u32 nvcfg1;
12396 nvcfg1 = tr32(NVRAM_CFG1);
12397 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12398 tg3_flag_set(tp, FLASH);
12399 } else {
12400 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12401 tw32(NVRAM_CFG1, nvcfg1);
12404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12405 tg3_flag(tp, 5780_CLASS)) {
12406 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12407 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12408 tp->nvram_jedecnum = JEDEC_ATMEL;
12409 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12410 tg3_flag_set(tp, NVRAM_BUFFERED);
12411 break;
12412 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12413 tp->nvram_jedecnum = JEDEC_ATMEL;
12414 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12415 break;
12416 case FLASH_VENDOR_ATMEL_EEPROM:
12417 tp->nvram_jedecnum = JEDEC_ATMEL;
12418 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12419 tg3_flag_set(tp, NVRAM_BUFFERED);
12420 break;
12421 case FLASH_VENDOR_ST:
12422 tp->nvram_jedecnum = JEDEC_ST;
12423 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12424 tg3_flag_set(tp, NVRAM_BUFFERED);
12425 break;
12426 case FLASH_VENDOR_SAIFUN:
12427 tp->nvram_jedecnum = JEDEC_SAIFUN;
12428 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12429 break;
12430 case FLASH_VENDOR_SST_SMALL:
12431 case FLASH_VENDOR_SST_LARGE:
12432 tp->nvram_jedecnum = JEDEC_SST;
12433 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12434 break;
12436 } else {
12437 tp->nvram_jedecnum = JEDEC_ATMEL;
12438 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12439 tg3_flag_set(tp, NVRAM_BUFFERED);
12443 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12445 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12446 case FLASH_5752PAGE_SIZE_256:
12447 tp->nvram_pagesize = 256;
12448 break;
12449 case FLASH_5752PAGE_SIZE_512:
12450 tp->nvram_pagesize = 512;
12451 break;
12452 case FLASH_5752PAGE_SIZE_1K:
12453 tp->nvram_pagesize = 1024;
12454 break;
12455 case FLASH_5752PAGE_SIZE_2K:
12456 tp->nvram_pagesize = 2048;
12457 break;
12458 case FLASH_5752PAGE_SIZE_4K:
12459 tp->nvram_pagesize = 4096;
12460 break;
12461 case FLASH_5752PAGE_SIZE_264:
12462 tp->nvram_pagesize = 264;
12463 break;
12464 case FLASH_5752PAGE_SIZE_528:
12465 tp->nvram_pagesize = 528;
12466 break;
12470 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12472 u32 nvcfg1;
12474 nvcfg1 = tr32(NVRAM_CFG1);
12476 /* NVRAM protection for TPM */
12477 if (nvcfg1 & (1 << 27))
12478 tg3_flag_set(tp, PROTECTED_NVRAM);
12480 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12481 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12482 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12483 tp->nvram_jedecnum = JEDEC_ATMEL;
12484 tg3_flag_set(tp, NVRAM_BUFFERED);
12485 break;
12486 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12487 tp->nvram_jedecnum = JEDEC_ATMEL;
12488 tg3_flag_set(tp, NVRAM_BUFFERED);
12489 tg3_flag_set(tp, FLASH);
12490 break;
12491 case FLASH_5752VENDOR_ST_M45PE10:
12492 case FLASH_5752VENDOR_ST_M45PE20:
12493 case FLASH_5752VENDOR_ST_M45PE40:
12494 tp->nvram_jedecnum = JEDEC_ST;
12495 tg3_flag_set(tp, NVRAM_BUFFERED);
12496 tg3_flag_set(tp, FLASH);
12497 break;
12500 if (tg3_flag(tp, FLASH)) {
12501 tg3_nvram_get_pagesize(tp, nvcfg1);
12502 } else {
12503 /* For eeprom, set pagesize to maximum eeprom size */
12504 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12506 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12507 tw32(NVRAM_CFG1, nvcfg1);
12511 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12513 u32 nvcfg1, protect = 0;
12515 nvcfg1 = tr32(NVRAM_CFG1);
12517 /* NVRAM protection for TPM */
12518 if (nvcfg1 & (1 << 27)) {
12519 tg3_flag_set(tp, PROTECTED_NVRAM);
12520 protect = 1;
12523 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12524 switch (nvcfg1) {
12525 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12526 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12527 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12528 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12529 tp->nvram_jedecnum = JEDEC_ATMEL;
12530 tg3_flag_set(tp, NVRAM_BUFFERED);
12531 tg3_flag_set(tp, FLASH);
12532 tp->nvram_pagesize = 264;
12533 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12534 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12535 tp->nvram_size = (protect ? 0x3e200 :
12536 TG3_NVRAM_SIZE_512KB);
12537 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12538 tp->nvram_size = (protect ? 0x1f200 :
12539 TG3_NVRAM_SIZE_256KB);
12540 else
12541 tp->nvram_size = (protect ? 0x1f200 :
12542 TG3_NVRAM_SIZE_128KB);
12543 break;
12544 case FLASH_5752VENDOR_ST_M45PE10:
12545 case FLASH_5752VENDOR_ST_M45PE20:
12546 case FLASH_5752VENDOR_ST_M45PE40:
12547 tp->nvram_jedecnum = JEDEC_ST;
12548 tg3_flag_set(tp, NVRAM_BUFFERED);
12549 tg3_flag_set(tp, FLASH);
12550 tp->nvram_pagesize = 256;
12551 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12552 tp->nvram_size = (protect ?
12553 TG3_NVRAM_SIZE_64KB :
12554 TG3_NVRAM_SIZE_128KB);
12555 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12556 tp->nvram_size = (protect ?
12557 TG3_NVRAM_SIZE_64KB :
12558 TG3_NVRAM_SIZE_256KB);
12559 else
12560 tp->nvram_size = (protect ?
12561 TG3_NVRAM_SIZE_128KB :
12562 TG3_NVRAM_SIZE_512KB);
12563 break;
12567 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12569 u32 nvcfg1;
12571 nvcfg1 = tr32(NVRAM_CFG1);
12573 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12574 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12575 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12576 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12577 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12578 tp->nvram_jedecnum = JEDEC_ATMEL;
12579 tg3_flag_set(tp, NVRAM_BUFFERED);
12580 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12582 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12583 tw32(NVRAM_CFG1, nvcfg1);
12584 break;
12585 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12586 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12587 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12588 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12589 tp->nvram_jedecnum = JEDEC_ATMEL;
12590 tg3_flag_set(tp, NVRAM_BUFFERED);
12591 tg3_flag_set(tp, FLASH);
12592 tp->nvram_pagesize = 264;
12593 break;
12594 case FLASH_5752VENDOR_ST_M45PE10:
12595 case FLASH_5752VENDOR_ST_M45PE20:
12596 case FLASH_5752VENDOR_ST_M45PE40:
12597 tp->nvram_jedecnum = JEDEC_ST;
12598 tg3_flag_set(tp, NVRAM_BUFFERED);
12599 tg3_flag_set(tp, FLASH);
12600 tp->nvram_pagesize = 256;
12601 break;
12605 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12607 u32 nvcfg1, protect = 0;
12609 nvcfg1 = tr32(NVRAM_CFG1);
12611 /* NVRAM protection for TPM */
12612 if (nvcfg1 & (1 << 27)) {
12613 tg3_flag_set(tp, PROTECTED_NVRAM);
12614 protect = 1;
12617 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12618 switch (nvcfg1) {
12619 case FLASH_5761VENDOR_ATMEL_ADB021D:
12620 case FLASH_5761VENDOR_ATMEL_ADB041D:
12621 case FLASH_5761VENDOR_ATMEL_ADB081D:
12622 case FLASH_5761VENDOR_ATMEL_ADB161D:
12623 case FLASH_5761VENDOR_ATMEL_MDB021D:
12624 case FLASH_5761VENDOR_ATMEL_MDB041D:
12625 case FLASH_5761VENDOR_ATMEL_MDB081D:
12626 case FLASH_5761VENDOR_ATMEL_MDB161D:
12627 tp->nvram_jedecnum = JEDEC_ATMEL;
12628 tg3_flag_set(tp, NVRAM_BUFFERED);
12629 tg3_flag_set(tp, FLASH);
12630 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12631 tp->nvram_pagesize = 256;
12632 break;
12633 case FLASH_5761VENDOR_ST_A_M45PE20:
12634 case FLASH_5761VENDOR_ST_A_M45PE40:
12635 case FLASH_5761VENDOR_ST_A_M45PE80:
12636 case FLASH_5761VENDOR_ST_A_M45PE16:
12637 case FLASH_5761VENDOR_ST_M_M45PE20:
12638 case FLASH_5761VENDOR_ST_M_M45PE40:
12639 case FLASH_5761VENDOR_ST_M_M45PE80:
12640 case FLASH_5761VENDOR_ST_M_M45PE16:
12641 tp->nvram_jedecnum = JEDEC_ST;
12642 tg3_flag_set(tp, NVRAM_BUFFERED);
12643 tg3_flag_set(tp, FLASH);
12644 tp->nvram_pagesize = 256;
12645 break;
12648 if (protect) {
12649 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12650 } else {
12651 switch (nvcfg1) {
12652 case FLASH_5761VENDOR_ATMEL_ADB161D:
12653 case FLASH_5761VENDOR_ATMEL_MDB161D:
12654 case FLASH_5761VENDOR_ST_A_M45PE16:
12655 case FLASH_5761VENDOR_ST_M_M45PE16:
12656 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12657 break;
12658 case FLASH_5761VENDOR_ATMEL_ADB081D:
12659 case FLASH_5761VENDOR_ATMEL_MDB081D:
12660 case FLASH_5761VENDOR_ST_A_M45PE80:
12661 case FLASH_5761VENDOR_ST_M_M45PE80:
12662 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12663 break;
12664 case FLASH_5761VENDOR_ATMEL_ADB041D:
12665 case FLASH_5761VENDOR_ATMEL_MDB041D:
12666 case FLASH_5761VENDOR_ST_A_M45PE40:
12667 case FLASH_5761VENDOR_ST_M_M45PE40:
12668 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12669 break;
12670 case FLASH_5761VENDOR_ATMEL_ADB021D:
12671 case FLASH_5761VENDOR_ATMEL_MDB021D:
12672 case FLASH_5761VENDOR_ST_A_M45PE20:
12673 case FLASH_5761VENDOR_ST_M_M45PE20:
12674 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12675 break;
12680 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12682 tp->nvram_jedecnum = JEDEC_ATMEL;
12683 tg3_flag_set(tp, NVRAM_BUFFERED);
12684 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12687 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12689 u32 nvcfg1;
12691 nvcfg1 = tr32(NVRAM_CFG1);
12693 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12694 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12695 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12696 tp->nvram_jedecnum = JEDEC_ATMEL;
12697 tg3_flag_set(tp, NVRAM_BUFFERED);
12698 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12700 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12701 tw32(NVRAM_CFG1, nvcfg1);
12702 return;
12703 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12704 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12705 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12706 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12707 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12708 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12709 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12710 tp->nvram_jedecnum = JEDEC_ATMEL;
12711 tg3_flag_set(tp, NVRAM_BUFFERED);
12712 tg3_flag_set(tp, FLASH);
12714 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12715 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12716 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12717 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12718 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12719 break;
12720 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12721 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12722 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12723 break;
12724 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12725 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12726 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12727 break;
12729 break;
12730 case FLASH_5752VENDOR_ST_M45PE10:
12731 case FLASH_5752VENDOR_ST_M45PE20:
12732 case FLASH_5752VENDOR_ST_M45PE40:
12733 tp->nvram_jedecnum = JEDEC_ST;
12734 tg3_flag_set(tp, NVRAM_BUFFERED);
12735 tg3_flag_set(tp, FLASH);
12737 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12738 case FLASH_5752VENDOR_ST_M45PE10:
12739 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12740 break;
12741 case FLASH_5752VENDOR_ST_M45PE20:
12742 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12743 break;
12744 case FLASH_5752VENDOR_ST_M45PE40:
12745 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12746 break;
12748 break;
12749 default:
12750 tg3_flag_set(tp, NO_NVRAM);
12751 return;
12754 tg3_nvram_get_pagesize(tp, nvcfg1);
12755 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12756 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12760 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12762 u32 nvcfg1;
12764 nvcfg1 = tr32(NVRAM_CFG1);
12766 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12767 case FLASH_5717VENDOR_ATMEL_EEPROM:
12768 case FLASH_5717VENDOR_MICRO_EEPROM:
12769 tp->nvram_jedecnum = JEDEC_ATMEL;
12770 tg3_flag_set(tp, NVRAM_BUFFERED);
12771 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12773 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12774 tw32(NVRAM_CFG1, nvcfg1);
12775 return;
12776 case FLASH_5717VENDOR_ATMEL_MDB011D:
12777 case FLASH_5717VENDOR_ATMEL_ADB011B:
12778 case FLASH_5717VENDOR_ATMEL_ADB011D:
12779 case FLASH_5717VENDOR_ATMEL_MDB021D:
12780 case FLASH_5717VENDOR_ATMEL_ADB021B:
12781 case FLASH_5717VENDOR_ATMEL_ADB021D:
12782 case FLASH_5717VENDOR_ATMEL_45USPT:
12783 tp->nvram_jedecnum = JEDEC_ATMEL;
12784 tg3_flag_set(tp, NVRAM_BUFFERED);
12785 tg3_flag_set(tp, FLASH);
12787 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12788 case FLASH_5717VENDOR_ATMEL_MDB021D:
12789 /* Detect size with tg3_nvram_get_size() */
12790 break;
12791 case FLASH_5717VENDOR_ATMEL_ADB021B:
12792 case FLASH_5717VENDOR_ATMEL_ADB021D:
12793 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12794 break;
12795 default:
12796 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12797 break;
12799 break;
12800 case FLASH_5717VENDOR_ST_M_M25PE10:
12801 case FLASH_5717VENDOR_ST_A_M25PE10:
12802 case FLASH_5717VENDOR_ST_M_M45PE10:
12803 case FLASH_5717VENDOR_ST_A_M45PE10:
12804 case FLASH_5717VENDOR_ST_M_M25PE20:
12805 case FLASH_5717VENDOR_ST_A_M25PE20:
12806 case FLASH_5717VENDOR_ST_M_M45PE20:
12807 case FLASH_5717VENDOR_ST_A_M45PE20:
12808 case FLASH_5717VENDOR_ST_25USPT:
12809 case FLASH_5717VENDOR_ST_45USPT:
12810 tp->nvram_jedecnum = JEDEC_ST;
12811 tg3_flag_set(tp, NVRAM_BUFFERED);
12812 tg3_flag_set(tp, FLASH);
12814 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12815 case FLASH_5717VENDOR_ST_M_M25PE20:
12816 case FLASH_5717VENDOR_ST_M_M45PE20:
12817 /* Detect size with tg3_nvram_get_size() */
12818 break;
12819 case FLASH_5717VENDOR_ST_A_M25PE20:
12820 case FLASH_5717VENDOR_ST_A_M45PE20:
12821 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12822 break;
12823 default:
12824 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12825 break;
12827 break;
12828 default:
12829 tg3_flag_set(tp, NO_NVRAM);
12830 return;
12833 tg3_nvram_get_pagesize(tp, nvcfg1);
12834 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12835 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12838 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12840 u32 nvcfg1, nvmpinstrp;
12842 nvcfg1 = tr32(NVRAM_CFG1);
12843 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12845 switch (nvmpinstrp) {
12846 case FLASH_5720_EEPROM_HD:
12847 case FLASH_5720_EEPROM_LD:
12848 tp->nvram_jedecnum = JEDEC_ATMEL;
12849 tg3_flag_set(tp, NVRAM_BUFFERED);
12851 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12852 tw32(NVRAM_CFG1, nvcfg1);
12853 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12854 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12855 else
12856 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12857 return;
12858 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12859 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12860 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12861 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12862 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12863 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12864 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12865 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12866 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12867 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12868 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12869 case FLASH_5720VENDOR_ATMEL_45USPT:
12870 tp->nvram_jedecnum = JEDEC_ATMEL;
12871 tg3_flag_set(tp, NVRAM_BUFFERED);
12872 tg3_flag_set(tp, FLASH);
12874 switch (nvmpinstrp) {
12875 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12876 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12877 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12878 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12879 break;
12880 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12881 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12882 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12883 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12884 break;
12885 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12886 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12887 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12888 break;
12889 default:
12890 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12891 break;
12893 break;
12894 case FLASH_5720VENDOR_M_ST_M25PE10:
12895 case FLASH_5720VENDOR_M_ST_M45PE10:
12896 case FLASH_5720VENDOR_A_ST_M25PE10:
12897 case FLASH_5720VENDOR_A_ST_M45PE10:
12898 case FLASH_5720VENDOR_M_ST_M25PE20:
12899 case FLASH_5720VENDOR_M_ST_M45PE20:
12900 case FLASH_5720VENDOR_A_ST_M25PE20:
12901 case FLASH_5720VENDOR_A_ST_M45PE20:
12902 case FLASH_5720VENDOR_M_ST_M25PE40:
12903 case FLASH_5720VENDOR_M_ST_M45PE40:
12904 case FLASH_5720VENDOR_A_ST_M25PE40:
12905 case FLASH_5720VENDOR_A_ST_M45PE40:
12906 case FLASH_5720VENDOR_M_ST_M25PE80:
12907 case FLASH_5720VENDOR_M_ST_M45PE80:
12908 case FLASH_5720VENDOR_A_ST_M25PE80:
12909 case FLASH_5720VENDOR_A_ST_M45PE80:
12910 case FLASH_5720VENDOR_ST_25USPT:
12911 case FLASH_5720VENDOR_ST_45USPT:
12912 tp->nvram_jedecnum = JEDEC_ST;
12913 tg3_flag_set(tp, NVRAM_BUFFERED);
12914 tg3_flag_set(tp, FLASH);
12916 switch (nvmpinstrp) {
12917 case FLASH_5720VENDOR_M_ST_M25PE20:
12918 case FLASH_5720VENDOR_M_ST_M45PE20:
12919 case FLASH_5720VENDOR_A_ST_M25PE20:
12920 case FLASH_5720VENDOR_A_ST_M45PE20:
12921 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12922 break;
12923 case FLASH_5720VENDOR_M_ST_M25PE40:
12924 case FLASH_5720VENDOR_M_ST_M45PE40:
12925 case FLASH_5720VENDOR_A_ST_M25PE40:
12926 case FLASH_5720VENDOR_A_ST_M45PE40:
12927 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12928 break;
12929 case FLASH_5720VENDOR_M_ST_M25PE80:
12930 case FLASH_5720VENDOR_M_ST_M45PE80:
12931 case FLASH_5720VENDOR_A_ST_M25PE80:
12932 case FLASH_5720VENDOR_A_ST_M45PE80:
12933 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12934 break;
12935 default:
12936 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12937 break;
12939 break;
12940 default:
12941 tg3_flag_set(tp, NO_NVRAM);
12942 return;
12945 tg3_nvram_get_pagesize(tp, nvcfg1);
12946 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12947 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12950 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12951 static void __devinit tg3_nvram_init(struct tg3 *tp)
12953 tw32_f(GRC_EEPROM_ADDR,
12954 (EEPROM_ADDR_FSM_RESET |
12955 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12956 EEPROM_ADDR_CLKPERD_SHIFT)));
12958 msleep(1);
12960 /* Enable seeprom accesses. */
12961 tw32_f(GRC_LOCAL_CTRL,
12962 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12963 udelay(100);
12965 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12966 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12967 tg3_flag_set(tp, NVRAM);
12969 if (tg3_nvram_lock(tp)) {
12970 netdev_warn(tp->dev,
12971 "Cannot get nvram lock, %s failed\n",
12972 __func__);
12973 return;
12975 tg3_enable_nvram_access(tp);
12977 tp->nvram_size = 0;
12979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12980 tg3_get_5752_nvram_info(tp);
12981 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12982 tg3_get_5755_nvram_info(tp);
12983 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12984 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12986 tg3_get_5787_nvram_info(tp);
12987 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12988 tg3_get_5761_nvram_info(tp);
12989 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12990 tg3_get_5906_nvram_info(tp);
12991 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12992 tg3_flag(tp, 57765_CLASS))
12993 tg3_get_57780_nvram_info(tp);
12994 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12996 tg3_get_5717_nvram_info(tp);
12997 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12998 tg3_get_5720_nvram_info(tp);
12999 else
13000 tg3_get_nvram_info(tp);
13002 if (tp->nvram_size == 0)
13003 tg3_get_nvram_size(tp);
13005 tg3_disable_nvram_access(tp);
13006 tg3_nvram_unlock(tp);
13008 } else {
13009 tg3_flag_clear(tp, NVRAM);
13010 tg3_flag_clear(tp, NVRAM_BUFFERED);
13012 tg3_get_eeprom_size(tp);
13016 struct subsys_tbl_ent {
13017 u16 subsys_vendor, subsys_devid;
13018 u32 phy_id;
13021 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13022 /* Broadcom boards. */
13023 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13024 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13025 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13026 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13027 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13028 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13029 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13030 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13031 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13032 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13033 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13034 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13035 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13036 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13037 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13038 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13039 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13040 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13041 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13042 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13043 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13044 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13046 /* 3com boards. */
13047 { TG3PCI_SUBVENDOR_ID_3COM,
13048 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13049 { TG3PCI_SUBVENDOR_ID_3COM,
13050 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13051 { TG3PCI_SUBVENDOR_ID_3COM,
13052 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13053 { TG3PCI_SUBVENDOR_ID_3COM,
13054 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13055 { TG3PCI_SUBVENDOR_ID_3COM,
13056 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13058 /* DELL boards. */
13059 { TG3PCI_SUBVENDOR_ID_DELL,
13060 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13061 { TG3PCI_SUBVENDOR_ID_DELL,
13062 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13063 { TG3PCI_SUBVENDOR_ID_DELL,
13064 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13065 { TG3PCI_SUBVENDOR_ID_DELL,
13066 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13068 /* Compaq boards. */
13069 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13070 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13071 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13072 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13073 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13074 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13075 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13076 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13077 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13078 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13080 /* IBM boards. */
13081 { TG3PCI_SUBVENDOR_ID_IBM,
13082 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13085 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13087 int i;
13089 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13090 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13091 tp->pdev->subsystem_vendor) &&
13092 (subsys_id_to_phy_id[i].subsys_devid ==
13093 tp->pdev->subsystem_device))
13094 return &subsys_id_to_phy_id[i];
13096 return NULL;
13099 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13101 u32 val;
13103 tp->phy_id = TG3_PHY_ID_INVALID;
13104 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13106 /* Assume an onboard device and WOL capable by default. */
13107 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13108 tg3_flag_set(tp, WOL_CAP);
13110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13111 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13112 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13113 tg3_flag_set(tp, IS_NIC);
13115 val = tr32(VCPU_CFGSHDW);
13116 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13117 tg3_flag_set(tp, ASPM_WORKAROUND);
13118 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13119 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13120 tg3_flag_set(tp, WOL_ENABLE);
13121 device_set_wakeup_enable(&tp->pdev->dev, true);
13123 goto done;
13126 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13127 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13128 u32 nic_cfg, led_cfg;
13129 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13130 int eeprom_phy_serdes = 0;
13132 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13133 tp->nic_sram_data_cfg = nic_cfg;
13135 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13136 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13137 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13138 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13139 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13140 (ver > 0) && (ver < 0x100))
13141 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13144 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13146 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13147 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13148 eeprom_phy_serdes = 1;
13150 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13151 if (nic_phy_id != 0) {
13152 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13153 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13155 eeprom_phy_id = (id1 >> 16) << 10;
13156 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13157 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13158 } else
13159 eeprom_phy_id = 0;
13161 tp->phy_id = eeprom_phy_id;
13162 if (eeprom_phy_serdes) {
13163 if (!tg3_flag(tp, 5705_PLUS))
13164 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13165 else
13166 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13169 if (tg3_flag(tp, 5750_PLUS))
13170 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13171 SHASTA_EXT_LED_MODE_MASK);
13172 else
13173 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13175 switch (led_cfg) {
13176 default:
13177 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13178 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13179 break;
13181 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13182 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13183 break;
13185 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13186 tp->led_ctrl = LED_CTRL_MODE_MAC;
13188 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13189 * read on some older 5700/5701 bootcode.
13191 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13192 ASIC_REV_5700 ||
13193 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13194 ASIC_REV_5701)
13195 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13197 break;
13199 case SHASTA_EXT_LED_SHARED:
13200 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13201 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13202 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13203 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13204 LED_CTRL_MODE_PHY_2);
13205 break;
13207 case SHASTA_EXT_LED_MAC:
13208 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13209 break;
13211 case SHASTA_EXT_LED_COMBO:
13212 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13213 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13214 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13215 LED_CTRL_MODE_PHY_2);
13216 break;
13220 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13222 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13223 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13225 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13226 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13228 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13229 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13230 if ((tp->pdev->subsystem_vendor ==
13231 PCI_VENDOR_ID_ARIMA) &&
13232 (tp->pdev->subsystem_device == 0x205a ||
13233 tp->pdev->subsystem_device == 0x2063))
13234 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13235 } else {
13236 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13237 tg3_flag_set(tp, IS_NIC);
13240 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13241 tg3_flag_set(tp, ENABLE_ASF);
13242 if (tg3_flag(tp, 5750_PLUS))
13243 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13246 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13247 tg3_flag(tp, 5750_PLUS))
13248 tg3_flag_set(tp, ENABLE_APE);
13250 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13251 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13252 tg3_flag_clear(tp, WOL_CAP);
13254 if (tg3_flag(tp, WOL_CAP) &&
13255 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13256 tg3_flag_set(tp, WOL_ENABLE);
13257 device_set_wakeup_enable(&tp->pdev->dev, true);
13260 if (cfg2 & (1 << 17))
13261 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13263 /* serdes signal pre-emphasis in register 0x590 set by */
13264 /* bootcode if bit 18 is set */
13265 if (cfg2 & (1 << 18))
13266 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13268 if ((tg3_flag(tp, 57765_PLUS) ||
13269 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13270 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13271 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13272 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13274 if (tg3_flag(tp, PCI_EXPRESS) &&
13275 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13276 !tg3_flag(tp, 57765_PLUS)) {
13277 u32 cfg3;
13279 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13280 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13281 tg3_flag_set(tp, ASPM_WORKAROUND);
13284 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13285 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13286 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13287 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13288 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13289 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13291 done:
13292 if (tg3_flag(tp, WOL_CAP))
13293 device_set_wakeup_enable(&tp->pdev->dev,
13294 tg3_flag(tp, WOL_ENABLE));
13295 else
13296 device_set_wakeup_capable(&tp->pdev->dev, false);
13299 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13301 int i;
13302 u32 val;
13304 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13305 tw32(OTP_CTRL, cmd);
13307 /* Wait for up to 1 ms for command to execute. */
13308 for (i = 0; i < 100; i++) {
13309 val = tr32(OTP_STATUS);
13310 if (val & OTP_STATUS_CMD_DONE)
13311 break;
13312 udelay(10);
13315 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13318 /* Read the gphy configuration from the OTP region of the chip. The gphy
13319 * configuration is a 32-bit value that straddles the alignment boundary.
13320 * We do two 32-bit reads and then shift and merge the results.
13322 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13324 u32 bhalf_otp, thalf_otp;
13326 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13328 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13329 return 0;
13331 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13333 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13334 return 0;
13336 thalf_otp = tr32(OTP_READ_DATA);
13338 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13340 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13341 return 0;
13343 bhalf_otp = tr32(OTP_READ_DATA);
13345 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13348 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13350 u32 adv = ADVERTISED_Autoneg;
13352 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13353 adv |= ADVERTISED_1000baseT_Half |
13354 ADVERTISED_1000baseT_Full;
13356 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13357 adv |= ADVERTISED_100baseT_Half |
13358 ADVERTISED_100baseT_Full |
13359 ADVERTISED_10baseT_Half |
13360 ADVERTISED_10baseT_Full |
13361 ADVERTISED_TP;
13362 else
13363 adv |= ADVERTISED_FIBRE;
13365 tp->link_config.advertising = adv;
13366 tp->link_config.speed = SPEED_INVALID;
13367 tp->link_config.duplex = DUPLEX_INVALID;
13368 tp->link_config.autoneg = AUTONEG_ENABLE;
13369 tp->link_config.active_speed = SPEED_INVALID;
13370 tp->link_config.active_duplex = DUPLEX_INVALID;
13371 tp->link_config.orig_speed = SPEED_INVALID;
13372 tp->link_config.orig_duplex = DUPLEX_INVALID;
13373 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13376 static int __devinit tg3_phy_probe(struct tg3 *tp)
13378 u32 hw_phy_id_1, hw_phy_id_2;
13379 u32 hw_phy_id, hw_phy_id_masked;
13380 int err;
13382 /* flow control autonegotiation is default behavior */
13383 tg3_flag_set(tp, PAUSE_AUTONEG);
13384 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13386 if (tg3_flag(tp, USE_PHYLIB))
13387 return tg3_phy_init(tp);
13389 /* Reading the PHY ID register can conflict with ASF
13390 * firmware access to the PHY hardware.
13392 err = 0;
13393 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13394 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13395 } else {
13396 /* Now read the physical PHY_ID from the chip and verify
13397 * that it is sane. If it doesn't look good, we fall back
13398 * to either the hard-coded table based PHY_ID and failing
13399 * that the value found in the eeprom area.
13401 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13402 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13404 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13405 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13406 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13408 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13411 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13412 tp->phy_id = hw_phy_id;
13413 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13414 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13415 else
13416 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13417 } else {
13418 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13419 /* Do nothing, phy ID already set up in
13420 * tg3_get_eeprom_hw_cfg().
13422 } else {
13423 struct subsys_tbl_ent *p;
13425 /* No eeprom signature? Try the hardcoded
13426 * subsys device table.
13428 p = tg3_lookup_by_subsys(tp);
13429 if (!p)
13430 return -ENODEV;
13432 tp->phy_id = p->phy_id;
13433 if (!tp->phy_id ||
13434 tp->phy_id == TG3_PHY_ID_BCM8002)
13435 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13439 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13440 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13442 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13443 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13444 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13445 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13446 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13448 tg3_phy_init_link_config(tp);
13450 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13451 !tg3_flag(tp, ENABLE_APE) &&
13452 !tg3_flag(tp, ENABLE_ASF)) {
13453 u32 bmsr, dummy;
13455 tg3_readphy(tp, MII_BMSR, &bmsr);
13456 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13457 (bmsr & BMSR_LSTATUS))
13458 goto skip_phy_reset;
13460 err = tg3_phy_reset(tp);
13461 if (err)
13462 return err;
13464 tg3_phy_set_wirespeed(tp);
13466 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13467 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13468 tp->link_config.flowctrl);
13470 tg3_writephy(tp, MII_BMCR,
13471 BMCR_ANENABLE | BMCR_ANRESTART);
13475 skip_phy_reset:
13476 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13477 err = tg3_init_5401phy_dsp(tp);
13478 if (err)
13479 return err;
13481 err = tg3_init_5401phy_dsp(tp);
13484 return err;
13487 static void __devinit tg3_read_vpd(struct tg3 *tp)
13489 u8 *vpd_data;
13490 unsigned int block_end, rosize, len;
13491 u32 vpdlen;
13492 int j, i = 0;
13494 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13495 if (!vpd_data)
13496 goto out_no_vpd;
13498 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13499 if (i < 0)
13500 goto out_not_found;
13502 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13503 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13504 i += PCI_VPD_LRDT_TAG_SIZE;
13506 if (block_end > vpdlen)
13507 goto out_not_found;
13509 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13510 PCI_VPD_RO_KEYWORD_MFR_ID);
13511 if (j > 0) {
13512 len = pci_vpd_info_field_size(&vpd_data[j]);
13514 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13515 if (j + len > block_end || len != 4 ||
13516 memcmp(&vpd_data[j], "1028", 4))
13517 goto partno;
13519 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13520 PCI_VPD_RO_KEYWORD_VENDOR0);
13521 if (j < 0)
13522 goto partno;
13524 len = pci_vpd_info_field_size(&vpd_data[j]);
13526 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13527 if (j + len > block_end)
13528 goto partno;
13530 memcpy(tp->fw_ver, &vpd_data[j], len);
13531 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13534 partno:
13535 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13536 PCI_VPD_RO_KEYWORD_PARTNO);
13537 if (i < 0)
13538 goto out_not_found;
13540 len = pci_vpd_info_field_size(&vpd_data[i]);
13542 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13543 if (len > TG3_BPN_SIZE ||
13544 (len + i) > vpdlen)
13545 goto out_not_found;
13547 memcpy(tp->board_part_number, &vpd_data[i], len);
13549 out_not_found:
13550 kfree(vpd_data);
13551 if (tp->board_part_number[0])
13552 return;
13554 out_no_vpd:
13555 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13556 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13557 strcpy(tp->board_part_number, "BCM5717");
13558 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13559 strcpy(tp->board_part_number, "BCM5718");
13560 else
13561 goto nomatch;
13562 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13563 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13564 strcpy(tp->board_part_number, "BCM57780");
13565 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13566 strcpy(tp->board_part_number, "BCM57760");
13567 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13568 strcpy(tp->board_part_number, "BCM57790");
13569 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13570 strcpy(tp->board_part_number, "BCM57788");
13571 else
13572 goto nomatch;
13573 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13574 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13575 strcpy(tp->board_part_number, "BCM57761");
13576 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13577 strcpy(tp->board_part_number, "BCM57765");
13578 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13579 strcpy(tp->board_part_number, "BCM57781");
13580 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13581 strcpy(tp->board_part_number, "BCM57785");
13582 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13583 strcpy(tp->board_part_number, "BCM57791");
13584 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13585 strcpy(tp->board_part_number, "BCM57795");
13586 else
13587 goto nomatch;
13588 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13589 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13590 strcpy(tp->board_part_number, "BCM57762");
13591 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13592 strcpy(tp->board_part_number, "BCM57766");
13593 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13594 strcpy(tp->board_part_number, "BCM57782");
13595 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13596 strcpy(tp->board_part_number, "BCM57786");
13597 else
13598 goto nomatch;
13599 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13600 strcpy(tp->board_part_number, "BCM95906");
13601 } else {
13602 nomatch:
13603 strcpy(tp->board_part_number, "none");
13607 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13609 u32 val;
13611 if (tg3_nvram_read(tp, offset, &val) ||
13612 (val & 0xfc000000) != 0x0c000000 ||
13613 tg3_nvram_read(tp, offset + 4, &val) ||
13614 val != 0)
13615 return 0;
13617 return 1;
13620 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13622 u32 val, offset, start, ver_offset;
13623 int i, dst_off;
13624 bool newver = false;
13626 if (tg3_nvram_read(tp, 0xc, &offset) ||
13627 tg3_nvram_read(tp, 0x4, &start))
13628 return;
13630 offset = tg3_nvram_logical_addr(tp, offset);
13632 if (tg3_nvram_read(tp, offset, &val))
13633 return;
13635 if ((val & 0xfc000000) == 0x0c000000) {
13636 if (tg3_nvram_read(tp, offset + 4, &val))
13637 return;
13639 if (val == 0)
13640 newver = true;
13643 dst_off = strlen(tp->fw_ver);
13645 if (newver) {
13646 if (TG3_VER_SIZE - dst_off < 16 ||
13647 tg3_nvram_read(tp, offset + 8, &ver_offset))
13648 return;
13650 offset = offset + ver_offset - start;
13651 for (i = 0; i < 16; i += 4) {
13652 __be32 v;
13653 if (tg3_nvram_read_be32(tp, offset + i, &v))
13654 return;
13656 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13658 } else {
13659 u32 major, minor;
13661 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13662 return;
13664 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13665 TG3_NVM_BCVER_MAJSFT;
13666 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13667 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13668 "v%d.%02d", major, minor);
13672 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13674 u32 val, major, minor;
13676 /* Use native endian representation */
13677 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13678 return;
13680 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13681 TG3_NVM_HWSB_CFG1_MAJSFT;
13682 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13683 TG3_NVM_HWSB_CFG1_MINSFT;
13685 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13688 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13690 u32 offset, major, minor, build;
13692 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13694 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13695 return;
13697 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13698 case TG3_EEPROM_SB_REVISION_0:
13699 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13700 break;
13701 case TG3_EEPROM_SB_REVISION_2:
13702 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13703 break;
13704 case TG3_EEPROM_SB_REVISION_3:
13705 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13706 break;
13707 case TG3_EEPROM_SB_REVISION_4:
13708 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13709 break;
13710 case TG3_EEPROM_SB_REVISION_5:
13711 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13712 break;
13713 case TG3_EEPROM_SB_REVISION_6:
13714 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13715 break;
13716 default:
13717 return;
13720 if (tg3_nvram_read(tp, offset, &val))
13721 return;
13723 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13724 TG3_EEPROM_SB_EDH_BLD_SHFT;
13725 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13726 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13727 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13729 if (minor > 99 || build > 26)
13730 return;
13732 offset = strlen(tp->fw_ver);
13733 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13734 " v%d.%02d", major, minor);
13736 if (build > 0) {
13737 offset = strlen(tp->fw_ver);
13738 if (offset < TG3_VER_SIZE - 1)
13739 tp->fw_ver[offset] = 'a' + build - 1;
13743 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13745 u32 val, offset, start;
13746 int i, vlen;
13748 for (offset = TG3_NVM_DIR_START;
13749 offset < TG3_NVM_DIR_END;
13750 offset += TG3_NVM_DIRENT_SIZE) {
13751 if (tg3_nvram_read(tp, offset, &val))
13752 return;
13754 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13755 break;
13758 if (offset == TG3_NVM_DIR_END)
13759 return;
13761 if (!tg3_flag(tp, 5705_PLUS))
13762 start = 0x08000000;
13763 else if (tg3_nvram_read(tp, offset - 4, &start))
13764 return;
13766 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13767 !tg3_fw_img_is_valid(tp, offset) ||
13768 tg3_nvram_read(tp, offset + 8, &val))
13769 return;
13771 offset += val - start;
13773 vlen = strlen(tp->fw_ver);
13775 tp->fw_ver[vlen++] = ',';
13776 tp->fw_ver[vlen++] = ' ';
13778 for (i = 0; i < 4; i++) {
13779 __be32 v;
13780 if (tg3_nvram_read_be32(tp, offset, &v))
13781 return;
13783 offset += sizeof(v);
13785 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13786 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13787 break;
13790 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13791 vlen += sizeof(v);
13795 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13797 int vlen;
13798 u32 apedata;
13799 char *fwtype;
13801 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13802 return;
13804 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13805 if (apedata != APE_SEG_SIG_MAGIC)
13806 return;
13808 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13809 if (!(apedata & APE_FW_STATUS_READY))
13810 return;
13812 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13814 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13815 tg3_flag_set(tp, APE_HAS_NCSI);
13816 fwtype = "NCSI";
13817 } else {
13818 fwtype = "DASH";
13821 vlen = strlen(tp->fw_ver);
13823 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13824 fwtype,
13825 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13826 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13827 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13828 (apedata & APE_FW_VERSION_BLDMSK));
13831 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13833 u32 val;
13834 bool vpd_vers = false;
13836 if (tp->fw_ver[0] != 0)
13837 vpd_vers = true;
13839 if (tg3_flag(tp, NO_NVRAM)) {
13840 strcat(tp->fw_ver, "sb");
13841 return;
13844 if (tg3_nvram_read(tp, 0, &val))
13845 return;
13847 if (val == TG3_EEPROM_MAGIC)
13848 tg3_read_bc_ver(tp);
13849 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13850 tg3_read_sb_ver(tp, val);
13851 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13852 tg3_read_hwsb_ver(tp);
13853 else
13854 return;
13856 if (vpd_vers)
13857 goto done;
13859 if (tg3_flag(tp, ENABLE_APE)) {
13860 if (tg3_flag(tp, ENABLE_ASF))
13861 tg3_read_dash_ver(tp);
13862 } else if (tg3_flag(tp, ENABLE_ASF)) {
13863 tg3_read_mgmtfw_ver(tp);
13866 done:
13867 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13870 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13872 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13873 return TG3_RX_RET_MAX_SIZE_5717;
13874 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13875 return TG3_RX_RET_MAX_SIZE_5700;
13876 else
13877 return TG3_RX_RET_MAX_SIZE_5705;
13880 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13881 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13882 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13883 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13884 { },
13887 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13889 struct pci_dev *peer;
13890 unsigned int func, devnr = tp->pdev->devfn & ~7;
13892 for (func = 0; func < 8; func++) {
13893 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13894 if (peer && peer != tp->pdev)
13895 break;
13896 pci_dev_put(peer);
13898 /* 5704 can be configured in single-port mode, set peer to
13899 * tp->pdev in that case.
13901 if (!peer) {
13902 peer = tp->pdev;
13903 return peer;
13907 * We don't need to keep the refcount elevated; there's no way
13908 * to remove one half of this device without removing the other
13910 pci_dev_put(peer);
13912 return peer;
13915 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13917 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13919 u32 reg;
13921 /* All devices that use the alternate
13922 * ASIC REV location have a CPMU.
13924 tg3_flag_set(tp, CPMU_PRESENT);
13926 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13927 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13928 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13929 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13930 reg = TG3PCI_GEN2_PRODID_ASICREV;
13931 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13932 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13933 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13934 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13935 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13936 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13937 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13938 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13939 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13940 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13941 reg = TG3PCI_GEN15_PRODID_ASICREV;
13942 else
13943 reg = TG3PCI_PRODID_ASICREV;
13945 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13948 /* Wrong chip ID in 5752 A0. This code can be removed later
13949 * as A0 is not in production.
13951 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13952 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13955 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13957 tg3_flag_set(tp, 5717_PLUS);
13959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13961 tg3_flag_set(tp, 57765_CLASS);
13963 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
13964 tg3_flag_set(tp, 57765_PLUS);
13966 /* Intentionally exclude ASIC_REV_5906 */
13967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13973 tg3_flag(tp, 57765_PLUS))
13974 tg3_flag_set(tp, 5755_PLUS);
13976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13978 tg3_flag_set(tp, 5780_CLASS);
13980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13983 tg3_flag(tp, 5755_PLUS) ||
13984 tg3_flag(tp, 5780_CLASS))
13985 tg3_flag_set(tp, 5750_PLUS);
13987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13988 tg3_flag(tp, 5750_PLUS))
13989 tg3_flag_set(tp, 5705_PLUS);
13992 static int __devinit tg3_get_invariants(struct tg3 *tp)
13994 u32 misc_ctrl_reg;
13995 u32 pci_state_reg, grc_misc_cfg;
13996 u32 val;
13997 u16 pci_cmd;
13998 int err;
14000 /* Force memory write invalidate off. If we leave it on,
14001 * then on 5700_BX chips we have to enable a workaround.
14002 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14003 * to match the cacheline size. The Broadcom driver have this
14004 * workaround but turns MWI off all the times so never uses
14005 * it. This seems to suggest that the workaround is insufficient.
14007 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14008 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14009 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14011 /* Important! -- Make sure register accesses are byteswapped
14012 * correctly. Also, for those chips that require it, make
14013 * sure that indirect register accesses are enabled before
14014 * the first operation.
14016 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14017 &misc_ctrl_reg);
14018 tp->misc_host_ctrl |= (misc_ctrl_reg &
14019 MISC_HOST_CTRL_CHIPREV);
14020 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14021 tp->misc_host_ctrl);
14023 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14025 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14026 * we need to disable memory and use config. cycles
14027 * only to access all registers. The 5702/03 chips
14028 * can mistakenly decode the special cycles from the
14029 * ICH chipsets as memory write cycles, causing corruption
14030 * of register and memory space. Only certain ICH bridges
14031 * will drive special cycles with non-zero data during the
14032 * address phase which can fall within the 5703's address
14033 * range. This is not an ICH bug as the PCI spec allows
14034 * non-zero address during special cycles. However, only
14035 * these ICH bridges are known to drive non-zero addresses
14036 * during special cycles.
14038 * Since special cycles do not cross PCI bridges, we only
14039 * enable this workaround if the 5703 is on the secondary
14040 * bus of these ICH bridges.
14042 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14043 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14044 static struct tg3_dev_id {
14045 u32 vendor;
14046 u32 device;
14047 u32 rev;
14048 } ich_chipsets[] = {
14049 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14050 PCI_ANY_ID },
14051 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14052 PCI_ANY_ID },
14053 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14054 0xa },
14055 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14056 PCI_ANY_ID },
14057 { },
14059 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14060 struct pci_dev *bridge = NULL;
14062 while (pci_id->vendor != 0) {
14063 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14064 bridge);
14065 if (!bridge) {
14066 pci_id++;
14067 continue;
14069 if (pci_id->rev != PCI_ANY_ID) {
14070 if (bridge->revision > pci_id->rev)
14071 continue;
14073 if (bridge->subordinate &&
14074 (bridge->subordinate->number ==
14075 tp->pdev->bus->number)) {
14076 tg3_flag_set(tp, ICH_WORKAROUND);
14077 pci_dev_put(bridge);
14078 break;
14083 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14084 static struct tg3_dev_id {
14085 u32 vendor;
14086 u32 device;
14087 } bridge_chipsets[] = {
14088 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14089 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14090 { },
14092 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14093 struct pci_dev *bridge = NULL;
14095 while (pci_id->vendor != 0) {
14096 bridge = pci_get_device(pci_id->vendor,
14097 pci_id->device,
14098 bridge);
14099 if (!bridge) {
14100 pci_id++;
14101 continue;
14103 if (bridge->subordinate &&
14104 (bridge->subordinate->number <=
14105 tp->pdev->bus->number) &&
14106 (bridge->subordinate->subordinate >=
14107 tp->pdev->bus->number)) {
14108 tg3_flag_set(tp, 5701_DMA_BUG);
14109 pci_dev_put(bridge);
14110 break;
14115 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14116 * DMA addresses > 40-bit. This bridge may have other additional
14117 * 57xx devices behind it in some 4-port NIC designs for example.
14118 * Any tg3 device found behind the bridge will also need the 40-bit
14119 * DMA workaround.
14121 if (tg3_flag(tp, 5780_CLASS)) {
14122 tg3_flag_set(tp, 40BIT_DMA_BUG);
14123 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14124 } else {
14125 struct pci_dev *bridge = NULL;
14127 do {
14128 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14129 PCI_DEVICE_ID_SERVERWORKS_EPB,
14130 bridge);
14131 if (bridge && bridge->subordinate &&
14132 (bridge->subordinate->number <=
14133 tp->pdev->bus->number) &&
14134 (bridge->subordinate->subordinate >=
14135 tp->pdev->bus->number)) {
14136 tg3_flag_set(tp, 40BIT_DMA_BUG);
14137 pci_dev_put(bridge);
14138 break;
14140 } while (bridge);
14143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14145 tp->pdev_peer = tg3_find_peer(tp);
14147 /* Determine TSO capabilities */
14148 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14149 ; /* Do nothing. HW bug. */
14150 else if (tg3_flag(tp, 57765_PLUS))
14151 tg3_flag_set(tp, HW_TSO_3);
14152 else if (tg3_flag(tp, 5755_PLUS) ||
14153 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14154 tg3_flag_set(tp, HW_TSO_2);
14155 else if (tg3_flag(tp, 5750_PLUS)) {
14156 tg3_flag_set(tp, HW_TSO_1);
14157 tg3_flag_set(tp, TSO_BUG);
14158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14159 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14160 tg3_flag_clear(tp, TSO_BUG);
14161 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14162 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14163 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14164 tg3_flag_set(tp, TSO_BUG);
14165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14166 tp->fw_needed = FIRMWARE_TG3TSO5;
14167 else
14168 tp->fw_needed = FIRMWARE_TG3TSO;
14171 /* Selectively allow TSO based on operating conditions */
14172 if (tg3_flag(tp, HW_TSO_1) ||
14173 tg3_flag(tp, HW_TSO_2) ||
14174 tg3_flag(tp, HW_TSO_3) ||
14175 tp->fw_needed) {
14176 /* For firmware TSO, assume ASF is disabled.
14177 * We'll disable TSO later if we discover ASF
14178 * is enabled in tg3_get_eeprom_hw_cfg().
14180 tg3_flag_set(tp, TSO_CAPABLE);
14181 } else {
14182 tg3_flag_clear(tp, TSO_CAPABLE);
14183 tg3_flag_clear(tp, TSO_BUG);
14184 tp->fw_needed = NULL;
14187 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14188 tp->fw_needed = FIRMWARE_TG3;
14190 tp->irq_max = 1;
14192 if (tg3_flag(tp, 5750_PLUS)) {
14193 tg3_flag_set(tp, SUPPORT_MSI);
14194 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14195 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14196 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14197 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14198 tp->pdev_peer == tp->pdev))
14199 tg3_flag_clear(tp, SUPPORT_MSI);
14201 if (tg3_flag(tp, 5755_PLUS) ||
14202 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14203 tg3_flag_set(tp, 1SHOT_MSI);
14206 if (tg3_flag(tp, 57765_PLUS)) {
14207 tg3_flag_set(tp, SUPPORT_MSIX);
14208 tp->irq_max = TG3_IRQ_MAX_VECS;
14209 tg3_rss_init_dflt_indir_tbl(tp);
14213 if (tg3_flag(tp, 5755_PLUS))
14214 tg3_flag_set(tp, SHORT_DMA_BUG);
14216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14217 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14222 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14224 if (tg3_flag(tp, 57765_PLUS) &&
14225 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14226 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14228 if (!tg3_flag(tp, 5705_PLUS) ||
14229 tg3_flag(tp, 5780_CLASS) ||
14230 tg3_flag(tp, USE_JUMBO_BDFLAG))
14231 tg3_flag_set(tp, JUMBO_CAPABLE);
14233 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14234 &pci_state_reg);
14236 if (pci_is_pcie(tp->pdev)) {
14237 u16 lnkctl;
14239 tg3_flag_set(tp, PCI_EXPRESS);
14241 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14242 int readrq = pcie_get_readrq(tp->pdev);
14243 if (readrq > 2048)
14244 pcie_set_readrq(tp->pdev, 2048);
14247 pci_read_config_word(tp->pdev,
14248 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14249 &lnkctl);
14250 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14251 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14252 ASIC_REV_5906) {
14253 tg3_flag_clear(tp, HW_TSO_2);
14254 tg3_flag_clear(tp, TSO_CAPABLE);
14256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14257 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14258 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14259 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14260 tg3_flag_set(tp, CLKREQ_BUG);
14261 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14262 tg3_flag_set(tp, L1PLLPD_EN);
14264 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14265 /* BCM5785 devices are effectively PCIe devices, and should
14266 * follow PCIe codepaths, but do not have a PCIe capabilities
14267 * section.
14269 tg3_flag_set(tp, PCI_EXPRESS);
14270 } else if (!tg3_flag(tp, 5705_PLUS) ||
14271 tg3_flag(tp, 5780_CLASS)) {
14272 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14273 if (!tp->pcix_cap) {
14274 dev_err(&tp->pdev->dev,
14275 "Cannot find PCI-X capability, aborting\n");
14276 return -EIO;
14279 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14280 tg3_flag_set(tp, PCIX_MODE);
14283 /* If we have an AMD 762 or VIA K8T800 chipset, write
14284 * reordering to the mailbox registers done by the host
14285 * controller can cause major troubles. We read back from
14286 * every mailbox register write to force the writes to be
14287 * posted to the chip in order.
14289 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14290 !tg3_flag(tp, PCI_EXPRESS))
14291 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14293 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14294 &tp->pci_cacheline_sz);
14295 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14296 &tp->pci_lat_timer);
14297 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14298 tp->pci_lat_timer < 64) {
14299 tp->pci_lat_timer = 64;
14300 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14301 tp->pci_lat_timer);
14304 /* Important! -- It is critical that the PCI-X hw workaround
14305 * situation is decided before the first MMIO register access.
14307 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14308 /* 5700 BX chips need to have their TX producer index
14309 * mailboxes written twice to workaround a bug.
14311 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14313 /* If we are in PCI-X mode, enable register write workaround.
14315 * The workaround is to use indirect register accesses
14316 * for all chip writes not to mailbox registers.
14318 if (tg3_flag(tp, PCIX_MODE)) {
14319 u32 pm_reg;
14321 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14323 /* The chip can have it's power management PCI config
14324 * space registers clobbered due to this bug.
14325 * So explicitly force the chip into D0 here.
14327 pci_read_config_dword(tp->pdev,
14328 tp->pm_cap + PCI_PM_CTRL,
14329 &pm_reg);
14330 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14331 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14332 pci_write_config_dword(tp->pdev,
14333 tp->pm_cap + PCI_PM_CTRL,
14334 pm_reg);
14336 /* Also, force SERR#/PERR# in PCI command. */
14337 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14338 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14339 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14343 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14344 tg3_flag_set(tp, PCI_HIGH_SPEED);
14345 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14346 tg3_flag_set(tp, PCI_32BIT);
14348 /* Chip-specific fixup from Broadcom driver */
14349 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14350 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14351 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14352 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14355 /* Default fast path register access methods */
14356 tp->read32 = tg3_read32;
14357 tp->write32 = tg3_write32;
14358 tp->read32_mbox = tg3_read32;
14359 tp->write32_mbox = tg3_write32;
14360 tp->write32_tx_mbox = tg3_write32;
14361 tp->write32_rx_mbox = tg3_write32;
14363 /* Various workaround register access methods */
14364 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14365 tp->write32 = tg3_write_indirect_reg32;
14366 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14367 (tg3_flag(tp, PCI_EXPRESS) &&
14368 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14370 * Back to back register writes can cause problems on these
14371 * chips, the workaround is to read back all reg writes
14372 * except those to mailbox regs.
14374 * See tg3_write_indirect_reg32().
14376 tp->write32 = tg3_write_flush_reg32;
14379 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14380 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14381 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14382 tp->write32_rx_mbox = tg3_write_flush_reg32;
14385 if (tg3_flag(tp, ICH_WORKAROUND)) {
14386 tp->read32 = tg3_read_indirect_reg32;
14387 tp->write32 = tg3_write_indirect_reg32;
14388 tp->read32_mbox = tg3_read_indirect_mbox;
14389 tp->write32_mbox = tg3_write_indirect_mbox;
14390 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14391 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14393 iounmap(tp->regs);
14394 tp->regs = NULL;
14396 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14397 pci_cmd &= ~PCI_COMMAND_MEMORY;
14398 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14401 tp->read32_mbox = tg3_read32_mbox_5906;
14402 tp->write32_mbox = tg3_write32_mbox_5906;
14403 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14404 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14407 if (tp->write32 == tg3_write_indirect_reg32 ||
14408 (tg3_flag(tp, PCIX_MODE) &&
14409 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14411 tg3_flag_set(tp, SRAM_USE_CONFIG);
14413 /* The memory arbiter has to be enabled in order for SRAM accesses
14414 * to succeed. Normally on powerup the tg3 chip firmware will make
14415 * sure it is enabled, but other entities such as system netboot
14416 * code might disable it.
14418 val = tr32(MEMARB_MODE);
14419 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14421 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14423 tg3_flag(tp, 5780_CLASS)) {
14424 if (tg3_flag(tp, PCIX_MODE)) {
14425 pci_read_config_dword(tp->pdev,
14426 tp->pcix_cap + PCI_X_STATUS,
14427 &val);
14428 tp->pci_fn = val & 0x7;
14430 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14431 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14432 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14433 NIC_SRAM_CPMUSTAT_SIG) {
14434 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14435 tp->pci_fn = tp->pci_fn ? 1 : 0;
14437 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14438 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14439 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14440 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14441 NIC_SRAM_CPMUSTAT_SIG) {
14442 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14443 TG3_CPMU_STATUS_FSHFT_5719;
14447 /* Get eeprom hw config before calling tg3_set_power_state().
14448 * In particular, the TG3_FLAG_IS_NIC flag must be
14449 * determined before calling tg3_set_power_state() so that
14450 * we know whether or not to switch out of Vaux power.
14451 * When the flag is set, it means that GPIO1 is used for eeprom
14452 * write protect and also implies that it is a LOM where GPIOs
14453 * are not used to switch power.
14455 tg3_get_eeprom_hw_cfg(tp);
14457 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14458 tg3_flag_clear(tp, TSO_CAPABLE);
14459 tg3_flag_clear(tp, TSO_BUG);
14460 tp->fw_needed = NULL;
14463 if (tg3_flag(tp, ENABLE_APE)) {
14464 /* Allow reads and writes to the
14465 * APE register and memory space.
14467 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14468 PCISTATE_ALLOW_APE_SHMEM_WR |
14469 PCISTATE_ALLOW_APE_PSPACE_WR;
14470 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14471 pci_state_reg);
14473 tg3_ape_lock_init(tp);
14476 /* Set up tp->grc_local_ctrl before calling
14477 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14478 * will bring 5700's external PHY out of reset.
14479 * It is also used as eeprom write protect on LOMs.
14481 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14482 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14483 tg3_flag(tp, EEPROM_WRITE_PROT))
14484 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14485 GRC_LCLCTRL_GPIO_OUTPUT1);
14486 /* Unused GPIO3 must be driven as output on 5752 because there
14487 * are no pull-up resistors on unused GPIO pins.
14489 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14490 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14492 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14494 tg3_flag(tp, 57765_CLASS))
14495 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14497 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14498 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14499 /* Turn off the debug UART. */
14500 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14501 if (tg3_flag(tp, IS_NIC))
14502 /* Keep VMain power. */
14503 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14504 GRC_LCLCTRL_GPIO_OUTPUT0;
14507 /* Switch out of Vaux if it is a NIC */
14508 tg3_pwrsrc_switch_to_vmain(tp);
14510 /* Derive initial jumbo mode from MTU assigned in
14511 * ether_setup() via the alloc_etherdev() call
14513 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14514 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14516 /* Determine WakeOnLan speed to use. */
14517 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14518 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14519 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14520 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14521 tg3_flag_clear(tp, WOL_SPEED_100MB);
14522 } else {
14523 tg3_flag_set(tp, WOL_SPEED_100MB);
14526 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14527 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14529 /* A few boards don't want Ethernet@WireSpeed phy feature */
14530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14531 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14532 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14533 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14534 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14535 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14536 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14538 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14539 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14540 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14541 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14542 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14544 if (tg3_flag(tp, 5705_PLUS) &&
14545 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14546 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14547 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14548 !tg3_flag(tp, 57765_PLUS)) {
14549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14553 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14554 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14555 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14556 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14557 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14558 } else
14559 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14563 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14564 tp->phy_otp = tg3_read_otp_phycfg(tp);
14565 if (tp->phy_otp == 0)
14566 tp->phy_otp = TG3_OTP_DEFAULT;
14569 if (tg3_flag(tp, CPMU_PRESENT))
14570 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14571 else
14572 tp->mi_mode = MAC_MI_MODE_BASE;
14574 tp->coalesce_mode = 0;
14575 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14576 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14577 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14579 /* Set these bits to enable statistics workaround. */
14580 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14581 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14582 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14583 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14584 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14588 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14589 tg3_flag_set(tp, USE_PHYLIB);
14591 err = tg3_mdio_init(tp);
14592 if (err)
14593 return err;
14595 /* Initialize data/descriptor byte/word swapping. */
14596 val = tr32(GRC_MODE);
14597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14598 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14599 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14600 GRC_MODE_B2HRX_ENABLE |
14601 GRC_MODE_HTX2B_ENABLE |
14602 GRC_MODE_HOST_STACKUP);
14603 else
14604 val &= GRC_MODE_HOST_STACKUP;
14606 tw32(GRC_MODE, val | tp->grc_mode);
14608 tg3_switch_clocks(tp);
14610 /* Clear this out for sanity. */
14611 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14613 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14614 &pci_state_reg);
14615 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14616 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14617 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14619 if (chiprevid == CHIPREV_ID_5701_A0 ||
14620 chiprevid == CHIPREV_ID_5701_B0 ||
14621 chiprevid == CHIPREV_ID_5701_B2 ||
14622 chiprevid == CHIPREV_ID_5701_B5) {
14623 void __iomem *sram_base;
14625 /* Write some dummy words into the SRAM status block
14626 * area, see if it reads back correctly. If the return
14627 * value is bad, force enable the PCIX workaround.
14629 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14631 writel(0x00000000, sram_base);
14632 writel(0x00000000, sram_base + 4);
14633 writel(0xffffffff, sram_base + 4);
14634 if (readl(sram_base) != 0x00000000)
14635 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14639 udelay(50);
14640 tg3_nvram_init(tp);
14642 grc_misc_cfg = tr32(GRC_MISC_CFG);
14643 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14646 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14647 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14648 tg3_flag_set(tp, IS_5788);
14650 if (!tg3_flag(tp, IS_5788) &&
14651 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14652 tg3_flag_set(tp, TAGGED_STATUS);
14653 if (tg3_flag(tp, TAGGED_STATUS)) {
14654 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14655 HOSTCC_MODE_CLRTICK_TXBD);
14657 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14658 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14659 tp->misc_host_ctrl);
14662 /* Preserve the APE MAC_MODE bits */
14663 if (tg3_flag(tp, ENABLE_APE))
14664 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14665 else
14666 tp->mac_mode = 0;
14668 /* these are limited to 10/100 only */
14669 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14670 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14671 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14672 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14673 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14674 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14675 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14676 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14677 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14678 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14679 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14680 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14681 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14682 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14683 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14684 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14686 err = tg3_phy_probe(tp);
14687 if (err) {
14688 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14689 /* ... but do not return immediately ... */
14690 tg3_mdio_fini(tp);
14693 tg3_read_vpd(tp);
14694 tg3_read_fw_ver(tp);
14696 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14697 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14698 } else {
14699 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14700 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14701 else
14702 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14705 /* 5700 {AX,BX} chips have a broken status block link
14706 * change bit implementation, so we must use the
14707 * status register in those cases.
14709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14710 tg3_flag_set(tp, USE_LINKCHG_REG);
14711 else
14712 tg3_flag_clear(tp, USE_LINKCHG_REG);
14714 /* The led_ctrl is set during tg3_phy_probe, here we might
14715 * have to force the link status polling mechanism based
14716 * upon subsystem IDs.
14718 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14719 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14720 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14721 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14722 tg3_flag_set(tp, USE_LINKCHG_REG);
14725 /* For all SERDES we poll the MAC status register. */
14726 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14727 tg3_flag_set(tp, POLL_SERDES);
14728 else
14729 tg3_flag_clear(tp, POLL_SERDES);
14731 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14732 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14734 tg3_flag(tp, PCIX_MODE)) {
14735 tp->rx_offset = NET_SKB_PAD;
14736 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14737 tp->rx_copy_thresh = ~(u16)0;
14738 #endif
14741 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14742 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14743 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14745 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14747 /* Increment the rx prod index on the rx std ring by at most
14748 * 8 for these chips to workaround hw errata.
14750 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14751 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14752 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14753 tp->rx_std_max_post = 8;
14755 if (tg3_flag(tp, ASPM_WORKAROUND))
14756 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14757 PCIE_PWR_MGMT_L1_THRESH_MSK;
14759 return err;
14762 #ifdef CONFIG_SPARC
14763 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14765 struct net_device *dev = tp->dev;
14766 struct pci_dev *pdev = tp->pdev;
14767 struct device_node *dp = pci_device_to_OF_node(pdev);
14768 const unsigned char *addr;
14769 int len;
14771 addr = of_get_property(dp, "local-mac-address", &len);
14772 if (addr && len == 6) {
14773 memcpy(dev->dev_addr, addr, 6);
14774 memcpy(dev->perm_addr, dev->dev_addr, 6);
14775 return 0;
14777 return -ENODEV;
14780 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14782 struct net_device *dev = tp->dev;
14784 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14785 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14786 return 0;
14788 #endif
14790 static int __devinit tg3_get_device_address(struct tg3 *tp)
14792 struct net_device *dev = tp->dev;
14793 u32 hi, lo, mac_offset;
14794 int addr_ok = 0;
14796 #ifdef CONFIG_SPARC
14797 if (!tg3_get_macaddr_sparc(tp))
14798 return 0;
14799 #endif
14801 mac_offset = 0x7c;
14802 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14803 tg3_flag(tp, 5780_CLASS)) {
14804 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14805 mac_offset = 0xcc;
14806 if (tg3_nvram_lock(tp))
14807 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14808 else
14809 tg3_nvram_unlock(tp);
14810 } else if (tg3_flag(tp, 5717_PLUS)) {
14811 if (tp->pci_fn & 1)
14812 mac_offset = 0xcc;
14813 if (tp->pci_fn > 1)
14814 mac_offset += 0x18c;
14815 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14816 mac_offset = 0x10;
14818 /* First try to get it from MAC address mailbox. */
14819 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14820 if ((hi >> 16) == 0x484b) {
14821 dev->dev_addr[0] = (hi >> 8) & 0xff;
14822 dev->dev_addr[1] = (hi >> 0) & 0xff;
14824 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14825 dev->dev_addr[2] = (lo >> 24) & 0xff;
14826 dev->dev_addr[3] = (lo >> 16) & 0xff;
14827 dev->dev_addr[4] = (lo >> 8) & 0xff;
14828 dev->dev_addr[5] = (lo >> 0) & 0xff;
14830 /* Some old bootcode may report a 0 MAC address in SRAM */
14831 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14833 if (!addr_ok) {
14834 /* Next, try NVRAM. */
14835 if (!tg3_flag(tp, NO_NVRAM) &&
14836 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14837 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14838 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14839 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14841 /* Finally just fetch it out of the MAC control regs. */
14842 else {
14843 hi = tr32(MAC_ADDR_0_HIGH);
14844 lo = tr32(MAC_ADDR_0_LOW);
14846 dev->dev_addr[5] = lo & 0xff;
14847 dev->dev_addr[4] = (lo >> 8) & 0xff;
14848 dev->dev_addr[3] = (lo >> 16) & 0xff;
14849 dev->dev_addr[2] = (lo >> 24) & 0xff;
14850 dev->dev_addr[1] = hi & 0xff;
14851 dev->dev_addr[0] = (hi >> 8) & 0xff;
14855 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14856 #ifdef CONFIG_SPARC
14857 if (!tg3_get_default_macaddr_sparc(tp))
14858 return 0;
14859 #endif
14860 return -EINVAL;
14862 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14863 return 0;
14866 #define BOUNDARY_SINGLE_CACHELINE 1
14867 #define BOUNDARY_MULTI_CACHELINE 2
14869 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14871 int cacheline_size;
14872 u8 byte;
14873 int goal;
14875 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14876 if (byte == 0)
14877 cacheline_size = 1024;
14878 else
14879 cacheline_size = (int) byte * 4;
14881 /* On 5703 and later chips, the boundary bits have no
14882 * effect.
14884 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14885 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14886 !tg3_flag(tp, PCI_EXPRESS))
14887 goto out;
14889 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14890 goal = BOUNDARY_MULTI_CACHELINE;
14891 #else
14892 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14893 goal = BOUNDARY_SINGLE_CACHELINE;
14894 #else
14895 goal = 0;
14896 #endif
14897 #endif
14899 if (tg3_flag(tp, 57765_PLUS)) {
14900 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14901 goto out;
14904 if (!goal)
14905 goto out;
14907 /* PCI controllers on most RISC systems tend to disconnect
14908 * when a device tries to burst across a cache-line boundary.
14909 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14911 * Unfortunately, for PCI-E there are only limited
14912 * write-side controls for this, and thus for reads
14913 * we will still get the disconnects. We'll also waste
14914 * these PCI cycles for both read and write for chips
14915 * other than 5700 and 5701 which do not implement the
14916 * boundary bits.
14918 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14919 switch (cacheline_size) {
14920 case 16:
14921 case 32:
14922 case 64:
14923 case 128:
14924 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14925 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14926 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14927 } else {
14928 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14929 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14931 break;
14933 case 256:
14934 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14935 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14936 break;
14938 default:
14939 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14940 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14941 break;
14943 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14944 switch (cacheline_size) {
14945 case 16:
14946 case 32:
14947 case 64:
14948 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14949 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14950 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14951 break;
14953 /* fallthrough */
14954 case 128:
14955 default:
14956 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14957 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14958 break;
14960 } else {
14961 switch (cacheline_size) {
14962 case 16:
14963 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14964 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14965 DMA_RWCTRL_WRITE_BNDRY_16);
14966 break;
14968 /* fallthrough */
14969 case 32:
14970 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14971 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14972 DMA_RWCTRL_WRITE_BNDRY_32);
14973 break;
14975 /* fallthrough */
14976 case 64:
14977 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14978 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14979 DMA_RWCTRL_WRITE_BNDRY_64);
14980 break;
14982 /* fallthrough */
14983 case 128:
14984 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14985 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14986 DMA_RWCTRL_WRITE_BNDRY_128);
14987 break;
14989 /* fallthrough */
14990 case 256:
14991 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14992 DMA_RWCTRL_WRITE_BNDRY_256);
14993 break;
14994 case 512:
14995 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14996 DMA_RWCTRL_WRITE_BNDRY_512);
14997 break;
14998 case 1024:
14999 default:
15000 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15001 DMA_RWCTRL_WRITE_BNDRY_1024);
15002 break;
15006 out:
15007 return val;
15010 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15012 struct tg3_internal_buffer_desc test_desc;
15013 u32 sram_dma_descs;
15014 int i, ret;
15016 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15018 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15019 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15020 tw32(RDMAC_STATUS, 0);
15021 tw32(WDMAC_STATUS, 0);
15023 tw32(BUFMGR_MODE, 0);
15024 tw32(FTQ_RESET, 0);
15026 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15027 test_desc.addr_lo = buf_dma & 0xffffffff;
15028 test_desc.nic_mbuf = 0x00002100;
15029 test_desc.len = size;
15032 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15033 * the *second* time the tg3 driver was getting loaded after an
15034 * initial scan.
15036 * Broadcom tells me:
15037 * ...the DMA engine is connected to the GRC block and a DMA
15038 * reset may affect the GRC block in some unpredictable way...
15039 * The behavior of resets to individual blocks has not been tested.
15041 * Broadcom noted the GRC reset will also reset all sub-components.
15043 if (to_device) {
15044 test_desc.cqid_sqid = (13 << 8) | 2;
15046 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15047 udelay(40);
15048 } else {
15049 test_desc.cqid_sqid = (16 << 8) | 7;
15051 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15052 udelay(40);
15054 test_desc.flags = 0x00000005;
15056 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15057 u32 val;
15059 val = *(((u32 *)&test_desc) + i);
15060 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15061 sram_dma_descs + (i * sizeof(u32)));
15062 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15064 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15066 if (to_device)
15067 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15068 else
15069 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15071 ret = -ENODEV;
15072 for (i = 0; i < 40; i++) {
15073 u32 val;
15075 if (to_device)
15076 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15077 else
15078 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15079 if ((val & 0xffff) == sram_dma_descs) {
15080 ret = 0;
15081 break;
15084 udelay(100);
15087 return ret;
15090 #define TEST_BUFFER_SIZE 0x2000
15092 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15093 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15094 { },
15097 static int __devinit tg3_test_dma(struct tg3 *tp)
15099 dma_addr_t buf_dma;
15100 u32 *buf, saved_dma_rwctrl;
15101 int ret = 0;
15103 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15104 &buf_dma, GFP_KERNEL);
15105 if (!buf) {
15106 ret = -ENOMEM;
15107 goto out_nofree;
15110 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15111 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15113 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15115 if (tg3_flag(tp, 57765_PLUS))
15116 goto out;
15118 if (tg3_flag(tp, PCI_EXPRESS)) {
15119 /* DMA read watermark not used on PCIE */
15120 tp->dma_rwctrl |= 0x00180000;
15121 } else if (!tg3_flag(tp, PCIX_MODE)) {
15122 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15123 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15124 tp->dma_rwctrl |= 0x003f0000;
15125 else
15126 tp->dma_rwctrl |= 0x003f000f;
15127 } else {
15128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15130 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15131 u32 read_water = 0x7;
15133 /* If the 5704 is behind the EPB bridge, we can
15134 * do the less restrictive ONE_DMA workaround for
15135 * better performance.
15137 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15139 tp->dma_rwctrl |= 0x8000;
15140 else if (ccval == 0x6 || ccval == 0x7)
15141 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15144 read_water = 4;
15145 /* Set bit 23 to enable PCIX hw bug fix */
15146 tp->dma_rwctrl |=
15147 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15148 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15149 (1 << 23);
15150 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15151 /* 5780 always in PCIX mode */
15152 tp->dma_rwctrl |= 0x00144000;
15153 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15154 /* 5714 always in PCIX mode */
15155 tp->dma_rwctrl |= 0x00148000;
15156 } else {
15157 tp->dma_rwctrl |= 0x001b000f;
15161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15162 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15163 tp->dma_rwctrl &= 0xfffffff0;
15165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15167 /* Remove this if it causes problems for some boards. */
15168 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15170 /* On 5700/5701 chips, we need to set this bit.
15171 * Otherwise the chip will issue cacheline transactions
15172 * to streamable DMA memory with not all the byte
15173 * enables turned on. This is an error on several
15174 * RISC PCI controllers, in particular sparc64.
15176 * On 5703/5704 chips, this bit has been reassigned
15177 * a different meaning. In particular, it is used
15178 * on those chips to enable a PCI-X workaround.
15180 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15183 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15185 #if 0
15186 /* Unneeded, already done by tg3_get_invariants. */
15187 tg3_switch_clocks(tp);
15188 #endif
15190 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15191 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15192 goto out;
15194 /* It is best to perform DMA test with maximum write burst size
15195 * to expose the 5700/5701 write DMA bug.
15197 saved_dma_rwctrl = tp->dma_rwctrl;
15198 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15199 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15201 while (1) {
15202 u32 *p = buf, i;
15204 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15205 p[i] = i;
15207 /* Send the buffer to the chip. */
15208 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15209 if (ret) {
15210 dev_err(&tp->pdev->dev,
15211 "%s: Buffer write failed. err = %d\n",
15212 __func__, ret);
15213 break;
15216 #if 0
15217 /* validate data reached card RAM correctly. */
15218 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15219 u32 val;
15220 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15221 if (le32_to_cpu(val) != p[i]) {
15222 dev_err(&tp->pdev->dev,
15223 "%s: Buffer corrupted on device! "
15224 "(%d != %d)\n", __func__, val, i);
15225 /* ret = -ENODEV here? */
15227 p[i] = 0;
15229 #endif
15230 /* Now read it back. */
15231 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15232 if (ret) {
15233 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15234 "err = %d\n", __func__, ret);
15235 break;
15238 /* Verify it. */
15239 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15240 if (p[i] == i)
15241 continue;
15243 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15244 DMA_RWCTRL_WRITE_BNDRY_16) {
15245 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15246 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15247 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15248 break;
15249 } else {
15250 dev_err(&tp->pdev->dev,
15251 "%s: Buffer corrupted on read back! "
15252 "(%d != %d)\n", __func__, p[i], i);
15253 ret = -ENODEV;
15254 goto out;
15258 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15259 /* Success. */
15260 ret = 0;
15261 break;
15264 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15265 DMA_RWCTRL_WRITE_BNDRY_16) {
15266 /* DMA test passed without adjusting DMA boundary,
15267 * now look for chipsets that are known to expose the
15268 * DMA bug without failing the test.
15270 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15271 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15272 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15273 } else {
15274 /* Safe to use the calculated DMA boundary. */
15275 tp->dma_rwctrl = saved_dma_rwctrl;
15278 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15281 out:
15282 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15283 out_nofree:
15284 return ret;
15287 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15289 if (tg3_flag(tp, 57765_PLUS)) {
15290 tp->bufmgr_config.mbuf_read_dma_low_water =
15291 DEFAULT_MB_RDMA_LOW_WATER_5705;
15292 tp->bufmgr_config.mbuf_mac_rx_low_water =
15293 DEFAULT_MB_MACRX_LOW_WATER_57765;
15294 tp->bufmgr_config.mbuf_high_water =
15295 DEFAULT_MB_HIGH_WATER_57765;
15297 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15298 DEFAULT_MB_RDMA_LOW_WATER_5705;
15299 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15300 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15301 tp->bufmgr_config.mbuf_high_water_jumbo =
15302 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15303 } else if (tg3_flag(tp, 5705_PLUS)) {
15304 tp->bufmgr_config.mbuf_read_dma_low_water =
15305 DEFAULT_MB_RDMA_LOW_WATER_5705;
15306 tp->bufmgr_config.mbuf_mac_rx_low_water =
15307 DEFAULT_MB_MACRX_LOW_WATER_5705;
15308 tp->bufmgr_config.mbuf_high_water =
15309 DEFAULT_MB_HIGH_WATER_5705;
15310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15311 tp->bufmgr_config.mbuf_mac_rx_low_water =
15312 DEFAULT_MB_MACRX_LOW_WATER_5906;
15313 tp->bufmgr_config.mbuf_high_water =
15314 DEFAULT_MB_HIGH_WATER_5906;
15317 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15318 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15319 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15320 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15321 tp->bufmgr_config.mbuf_high_water_jumbo =
15322 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15323 } else {
15324 tp->bufmgr_config.mbuf_read_dma_low_water =
15325 DEFAULT_MB_RDMA_LOW_WATER;
15326 tp->bufmgr_config.mbuf_mac_rx_low_water =
15327 DEFAULT_MB_MACRX_LOW_WATER;
15328 tp->bufmgr_config.mbuf_high_water =
15329 DEFAULT_MB_HIGH_WATER;
15331 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15332 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15333 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15334 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15335 tp->bufmgr_config.mbuf_high_water_jumbo =
15336 DEFAULT_MB_HIGH_WATER_JUMBO;
15339 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15340 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15343 static char * __devinit tg3_phy_string(struct tg3 *tp)
15345 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15346 case TG3_PHY_ID_BCM5400: return "5400";
15347 case TG3_PHY_ID_BCM5401: return "5401";
15348 case TG3_PHY_ID_BCM5411: return "5411";
15349 case TG3_PHY_ID_BCM5701: return "5701";
15350 case TG3_PHY_ID_BCM5703: return "5703";
15351 case TG3_PHY_ID_BCM5704: return "5704";
15352 case TG3_PHY_ID_BCM5705: return "5705";
15353 case TG3_PHY_ID_BCM5750: return "5750";
15354 case TG3_PHY_ID_BCM5752: return "5752";
15355 case TG3_PHY_ID_BCM5714: return "5714";
15356 case TG3_PHY_ID_BCM5780: return "5780";
15357 case TG3_PHY_ID_BCM5755: return "5755";
15358 case TG3_PHY_ID_BCM5787: return "5787";
15359 case TG3_PHY_ID_BCM5784: return "5784";
15360 case TG3_PHY_ID_BCM5756: return "5722/5756";
15361 case TG3_PHY_ID_BCM5906: return "5906";
15362 case TG3_PHY_ID_BCM5761: return "5761";
15363 case TG3_PHY_ID_BCM5718C: return "5718C";
15364 case TG3_PHY_ID_BCM5718S: return "5718S";
15365 case TG3_PHY_ID_BCM57765: return "57765";
15366 case TG3_PHY_ID_BCM5719C: return "5719C";
15367 case TG3_PHY_ID_BCM5720C: return "5720C";
15368 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15369 case 0: return "serdes";
15370 default: return "unknown";
15374 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15376 if (tg3_flag(tp, PCI_EXPRESS)) {
15377 strcpy(str, "PCI Express");
15378 return str;
15379 } else if (tg3_flag(tp, PCIX_MODE)) {
15380 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15382 strcpy(str, "PCIX:");
15384 if ((clock_ctrl == 7) ||
15385 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15386 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15387 strcat(str, "133MHz");
15388 else if (clock_ctrl == 0)
15389 strcat(str, "33MHz");
15390 else if (clock_ctrl == 2)
15391 strcat(str, "50MHz");
15392 else if (clock_ctrl == 4)
15393 strcat(str, "66MHz");
15394 else if (clock_ctrl == 6)
15395 strcat(str, "100MHz");
15396 } else {
15397 strcpy(str, "PCI:");
15398 if (tg3_flag(tp, PCI_HIGH_SPEED))
15399 strcat(str, "66MHz");
15400 else
15401 strcat(str, "33MHz");
15403 if (tg3_flag(tp, PCI_32BIT))
15404 strcat(str, ":32-bit");
15405 else
15406 strcat(str, ":64-bit");
15407 return str;
15410 static void __devinit tg3_init_coal(struct tg3 *tp)
15412 struct ethtool_coalesce *ec = &tp->coal;
15414 memset(ec, 0, sizeof(*ec));
15415 ec->cmd = ETHTOOL_GCOALESCE;
15416 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15417 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15418 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15419 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15420 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15421 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15422 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15423 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15424 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15426 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15427 HOSTCC_MODE_CLRTICK_TXBD)) {
15428 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15429 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15430 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15431 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15434 if (tg3_flag(tp, 5705_PLUS)) {
15435 ec->rx_coalesce_usecs_irq = 0;
15436 ec->tx_coalesce_usecs_irq = 0;
15437 ec->stats_block_coalesce_usecs = 0;
15441 static int __devinit tg3_init_one(struct pci_dev *pdev,
15442 const struct pci_device_id *ent)
15444 struct net_device *dev;
15445 struct tg3 *tp;
15446 int i, err, pm_cap;
15447 u32 sndmbx, rcvmbx, intmbx;
15448 char str[40];
15449 u64 dma_mask, persist_dma_mask;
15450 netdev_features_t features = 0;
15452 printk_once(KERN_INFO "%s\n", version);
15454 err = pci_enable_device(pdev);
15455 if (err) {
15456 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15457 return err;
15460 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15461 if (err) {
15462 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15463 goto err_out_disable_pdev;
15466 pci_set_master(pdev);
15468 /* Find power-management capability. */
15469 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15470 if (pm_cap == 0) {
15471 dev_err(&pdev->dev,
15472 "Cannot find Power Management capability, aborting\n");
15473 err = -EIO;
15474 goto err_out_free_res;
15477 err = pci_set_power_state(pdev, PCI_D0);
15478 if (err) {
15479 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15480 goto err_out_free_res;
15483 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15484 if (!dev) {
15485 err = -ENOMEM;
15486 goto err_out_power_down;
15489 SET_NETDEV_DEV(dev, &pdev->dev);
15491 tp = netdev_priv(dev);
15492 tp->pdev = pdev;
15493 tp->dev = dev;
15494 tp->pm_cap = pm_cap;
15495 tp->rx_mode = TG3_DEF_RX_MODE;
15496 tp->tx_mode = TG3_DEF_TX_MODE;
15498 if (tg3_debug > 0)
15499 tp->msg_enable = tg3_debug;
15500 else
15501 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15503 /* The word/byte swap controls here control register access byte
15504 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15505 * setting below.
15507 tp->misc_host_ctrl =
15508 MISC_HOST_CTRL_MASK_PCI_INT |
15509 MISC_HOST_CTRL_WORD_SWAP |
15510 MISC_HOST_CTRL_INDIR_ACCESS |
15511 MISC_HOST_CTRL_PCISTATE_RW;
15513 /* The NONFRM (non-frame) byte/word swap controls take effect
15514 * on descriptor entries, anything which isn't packet data.
15516 * The StrongARM chips on the board (one for tx, one for rx)
15517 * are running in big-endian mode.
15519 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15520 GRC_MODE_WSWAP_NONFRM_DATA);
15521 #ifdef __BIG_ENDIAN
15522 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15523 #endif
15524 spin_lock_init(&tp->lock);
15525 spin_lock_init(&tp->indirect_lock);
15526 INIT_WORK(&tp->reset_task, tg3_reset_task);
15528 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15529 if (!tp->regs) {
15530 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15531 err = -ENOMEM;
15532 goto err_out_free_dev;
15535 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15536 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15537 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15538 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15539 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15540 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15541 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15542 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15543 tg3_flag_set(tp, ENABLE_APE);
15544 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15545 if (!tp->aperegs) {
15546 dev_err(&pdev->dev,
15547 "Cannot map APE registers, aborting\n");
15548 err = -ENOMEM;
15549 goto err_out_iounmap;
15553 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15554 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15556 dev->ethtool_ops = &tg3_ethtool_ops;
15557 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15558 dev->netdev_ops = &tg3_netdev_ops;
15559 dev->irq = pdev->irq;
15561 err = tg3_get_invariants(tp);
15562 if (err) {
15563 dev_err(&pdev->dev,
15564 "Problem fetching invariants of chip, aborting\n");
15565 goto err_out_apeunmap;
15568 /* The EPB bridge inside 5714, 5715, and 5780 and any
15569 * device behind the EPB cannot support DMA addresses > 40-bit.
15570 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15571 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15572 * do DMA address check in tg3_start_xmit().
15574 if (tg3_flag(tp, IS_5788))
15575 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15576 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15577 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15578 #ifdef CONFIG_HIGHMEM
15579 dma_mask = DMA_BIT_MASK(64);
15580 #endif
15581 } else
15582 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15584 /* Configure DMA attributes. */
15585 if (dma_mask > DMA_BIT_MASK(32)) {
15586 err = pci_set_dma_mask(pdev, dma_mask);
15587 if (!err) {
15588 features |= NETIF_F_HIGHDMA;
15589 err = pci_set_consistent_dma_mask(pdev,
15590 persist_dma_mask);
15591 if (err < 0) {
15592 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15593 "DMA for consistent allocations\n");
15594 goto err_out_apeunmap;
15598 if (err || dma_mask == DMA_BIT_MASK(32)) {
15599 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15600 if (err) {
15601 dev_err(&pdev->dev,
15602 "No usable DMA configuration, aborting\n");
15603 goto err_out_apeunmap;
15607 tg3_init_bufmgr_config(tp);
15609 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15611 /* 5700 B0 chips do not support checksumming correctly due
15612 * to hardware bugs.
15614 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15615 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15617 if (tg3_flag(tp, 5755_PLUS))
15618 features |= NETIF_F_IPV6_CSUM;
15621 /* TSO is on by default on chips that support hardware TSO.
15622 * Firmware TSO on older chips gives lower performance, so it
15623 * is off by default, but can be enabled using ethtool.
15625 if ((tg3_flag(tp, HW_TSO_1) ||
15626 tg3_flag(tp, HW_TSO_2) ||
15627 tg3_flag(tp, HW_TSO_3)) &&
15628 (features & NETIF_F_IP_CSUM))
15629 features |= NETIF_F_TSO;
15630 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15631 if (features & NETIF_F_IPV6_CSUM)
15632 features |= NETIF_F_TSO6;
15633 if (tg3_flag(tp, HW_TSO_3) ||
15634 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15635 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15636 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15639 features |= NETIF_F_TSO_ECN;
15642 dev->features |= features;
15643 dev->vlan_features |= features;
15646 * Add loopback capability only for a subset of devices that support
15647 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15648 * loopback for the remaining devices.
15650 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15651 !tg3_flag(tp, CPMU_PRESENT))
15652 /* Add the loopback capability */
15653 features |= NETIF_F_LOOPBACK;
15655 dev->hw_features |= features;
15657 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15658 !tg3_flag(tp, TSO_CAPABLE) &&
15659 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15660 tg3_flag_set(tp, MAX_RXPEND_64);
15661 tp->rx_pending = 63;
15664 err = tg3_get_device_address(tp);
15665 if (err) {
15666 dev_err(&pdev->dev,
15667 "Could not obtain valid ethernet address, aborting\n");
15668 goto err_out_apeunmap;
15672 * Reset chip in case UNDI or EFI driver did not shutdown
15673 * DMA self test will enable WDMAC and we'll see (spurious)
15674 * pending DMA on the PCI bus at that point.
15676 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15677 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15678 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15679 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15682 err = tg3_test_dma(tp);
15683 if (err) {
15684 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15685 goto err_out_apeunmap;
15688 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15689 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15690 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15691 for (i = 0; i < tp->irq_max; i++) {
15692 struct tg3_napi *tnapi = &tp->napi[i];
15694 tnapi->tp = tp;
15695 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15697 tnapi->int_mbox = intmbx;
15698 if (i <= 4)
15699 intmbx += 0x8;
15700 else
15701 intmbx += 0x4;
15703 tnapi->consmbox = rcvmbx;
15704 tnapi->prodmbox = sndmbx;
15706 if (i)
15707 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15708 else
15709 tnapi->coal_now = HOSTCC_MODE_NOW;
15711 if (!tg3_flag(tp, SUPPORT_MSIX))
15712 break;
15715 * If we support MSIX, we'll be using RSS. If we're using
15716 * RSS, the first vector only handles link interrupts and the
15717 * remaining vectors handle rx and tx interrupts. Reuse the
15718 * mailbox values for the next iteration. The values we setup
15719 * above are still useful for the single vectored mode.
15721 if (!i)
15722 continue;
15724 rcvmbx += 0x8;
15726 if (sndmbx & 0x4)
15727 sndmbx -= 0x4;
15728 else
15729 sndmbx += 0xc;
15732 tg3_init_coal(tp);
15734 pci_set_drvdata(pdev, dev);
15736 if (tg3_flag(tp, 5717_PLUS)) {
15737 /* Resume a low-power mode */
15738 tg3_frob_aux_power(tp, false);
15741 err = register_netdev(dev);
15742 if (err) {
15743 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15744 goto err_out_apeunmap;
15747 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15748 tp->board_part_number,
15749 tp->pci_chip_rev_id,
15750 tg3_bus_string(tp, str),
15751 dev->dev_addr);
15753 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15754 struct phy_device *phydev;
15755 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15756 netdev_info(dev,
15757 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15758 phydev->drv->name, dev_name(&phydev->dev));
15759 } else {
15760 char *ethtype;
15762 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15763 ethtype = "10/100Base-TX";
15764 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15765 ethtype = "1000Base-SX";
15766 else
15767 ethtype = "10/100/1000Base-T";
15769 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15770 "(WireSpeed[%d], EEE[%d])\n",
15771 tg3_phy_string(tp), ethtype,
15772 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15773 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15776 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15777 (dev->features & NETIF_F_RXCSUM) != 0,
15778 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15779 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15780 tg3_flag(tp, ENABLE_ASF) != 0,
15781 tg3_flag(tp, TSO_CAPABLE) != 0);
15782 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15783 tp->dma_rwctrl,
15784 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15785 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15787 pci_save_state(pdev);
15789 return 0;
15791 err_out_apeunmap:
15792 if (tp->aperegs) {
15793 iounmap(tp->aperegs);
15794 tp->aperegs = NULL;
15797 err_out_iounmap:
15798 if (tp->regs) {
15799 iounmap(tp->regs);
15800 tp->regs = NULL;
15803 err_out_free_dev:
15804 free_netdev(dev);
15806 err_out_power_down:
15807 pci_set_power_state(pdev, PCI_D3hot);
15809 err_out_free_res:
15810 pci_release_regions(pdev);
15812 err_out_disable_pdev:
15813 pci_disable_device(pdev);
15814 pci_set_drvdata(pdev, NULL);
15815 return err;
15818 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15820 struct net_device *dev = pci_get_drvdata(pdev);
15822 if (dev) {
15823 struct tg3 *tp = netdev_priv(dev);
15825 if (tp->fw)
15826 release_firmware(tp->fw);
15828 tg3_reset_task_cancel(tp);
15830 if (tg3_flag(tp, USE_PHYLIB)) {
15831 tg3_phy_fini(tp);
15832 tg3_mdio_fini(tp);
15835 unregister_netdev(dev);
15836 if (tp->aperegs) {
15837 iounmap(tp->aperegs);
15838 tp->aperegs = NULL;
15840 if (tp->regs) {
15841 iounmap(tp->regs);
15842 tp->regs = NULL;
15844 free_netdev(dev);
15845 pci_release_regions(pdev);
15846 pci_disable_device(pdev);
15847 pci_set_drvdata(pdev, NULL);
15851 #ifdef CONFIG_PM_SLEEP
15852 static int tg3_suspend(struct device *device)
15854 struct pci_dev *pdev = to_pci_dev(device);
15855 struct net_device *dev = pci_get_drvdata(pdev);
15856 struct tg3 *tp = netdev_priv(dev);
15857 int err;
15859 if (!netif_running(dev))
15860 return 0;
15862 tg3_reset_task_cancel(tp);
15863 tg3_phy_stop(tp);
15864 tg3_netif_stop(tp);
15866 del_timer_sync(&tp->timer);
15868 tg3_full_lock(tp, 1);
15869 tg3_disable_ints(tp);
15870 tg3_full_unlock(tp);
15872 netif_device_detach(dev);
15874 tg3_full_lock(tp, 0);
15875 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15876 tg3_flag_clear(tp, INIT_COMPLETE);
15877 tg3_full_unlock(tp);
15879 err = tg3_power_down_prepare(tp);
15880 if (err) {
15881 int err2;
15883 tg3_full_lock(tp, 0);
15885 tg3_flag_set(tp, INIT_COMPLETE);
15886 err2 = tg3_restart_hw(tp, 1);
15887 if (err2)
15888 goto out;
15890 tp->timer.expires = jiffies + tp->timer_offset;
15891 add_timer(&tp->timer);
15893 netif_device_attach(dev);
15894 tg3_netif_start(tp);
15896 out:
15897 tg3_full_unlock(tp);
15899 if (!err2)
15900 tg3_phy_start(tp);
15903 return err;
15906 static int tg3_resume(struct device *device)
15908 struct pci_dev *pdev = to_pci_dev(device);
15909 struct net_device *dev = pci_get_drvdata(pdev);
15910 struct tg3 *tp = netdev_priv(dev);
15911 int err;
15913 if (!netif_running(dev))
15914 return 0;
15916 netif_device_attach(dev);
15918 tg3_full_lock(tp, 0);
15920 tg3_flag_set(tp, INIT_COMPLETE);
15921 err = tg3_restart_hw(tp, 1);
15922 if (err)
15923 goto out;
15925 tp->timer.expires = jiffies + tp->timer_offset;
15926 add_timer(&tp->timer);
15928 tg3_netif_start(tp);
15930 out:
15931 tg3_full_unlock(tp);
15933 if (!err)
15934 tg3_phy_start(tp);
15936 return err;
15939 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15940 #define TG3_PM_OPS (&tg3_pm_ops)
15942 #else
15944 #define TG3_PM_OPS NULL
15946 #endif /* CONFIG_PM_SLEEP */
15949 * tg3_io_error_detected - called when PCI error is detected
15950 * @pdev: Pointer to PCI device
15951 * @state: The current pci connection state
15953 * This function is called after a PCI bus error affecting
15954 * this device has been detected.
15956 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15957 pci_channel_state_t state)
15959 struct net_device *netdev = pci_get_drvdata(pdev);
15960 struct tg3 *tp = netdev_priv(netdev);
15961 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15963 netdev_info(netdev, "PCI I/O error detected\n");
15965 rtnl_lock();
15967 if (!netif_running(netdev))
15968 goto done;
15970 tg3_phy_stop(tp);
15972 tg3_netif_stop(tp);
15974 del_timer_sync(&tp->timer);
15976 /* Want to make sure that the reset task doesn't run */
15977 tg3_reset_task_cancel(tp);
15978 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15980 netif_device_detach(netdev);
15982 /* Clean up software state, even if MMIO is blocked */
15983 tg3_full_lock(tp, 0);
15984 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15985 tg3_full_unlock(tp);
15987 done:
15988 if (state == pci_channel_io_perm_failure)
15989 err = PCI_ERS_RESULT_DISCONNECT;
15990 else
15991 pci_disable_device(pdev);
15993 rtnl_unlock();
15995 return err;
15999 * tg3_io_slot_reset - called after the pci bus has been reset.
16000 * @pdev: Pointer to PCI device
16002 * Restart the card from scratch, as if from a cold-boot.
16003 * At this point, the card has exprienced a hard reset,
16004 * followed by fixups by BIOS, and has its config space
16005 * set up identically to what it was at cold boot.
16007 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16009 struct net_device *netdev = pci_get_drvdata(pdev);
16010 struct tg3 *tp = netdev_priv(netdev);
16011 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16012 int err;
16014 rtnl_lock();
16016 if (pci_enable_device(pdev)) {
16017 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16018 goto done;
16021 pci_set_master(pdev);
16022 pci_restore_state(pdev);
16023 pci_save_state(pdev);
16025 if (!netif_running(netdev)) {
16026 rc = PCI_ERS_RESULT_RECOVERED;
16027 goto done;
16030 err = tg3_power_up(tp);
16031 if (err)
16032 goto done;
16034 rc = PCI_ERS_RESULT_RECOVERED;
16036 done:
16037 rtnl_unlock();
16039 return rc;
16043 * tg3_io_resume - called when traffic can start flowing again.
16044 * @pdev: Pointer to PCI device
16046 * This callback is called when the error recovery driver tells
16047 * us that its OK to resume normal operation.
16049 static void tg3_io_resume(struct pci_dev *pdev)
16051 struct net_device *netdev = pci_get_drvdata(pdev);
16052 struct tg3 *tp = netdev_priv(netdev);
16053 int err;
16055 rtnl_lock();
16057 if (!netif_running(netdev))
16058 goto done;
16060 tg3_full_lock(tp, 0);
16061 tg3_flag_set(tp, INIT_COMPLETE);
16062 err = tg3_restart_hw(tp, 1);
16063 tg3_full_unlock(tp);
16064 if (err) {
16065 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16066 goto done;
16069 netif_device_attach(netdev);
16071 tp->timer.expires = jiffies + tp->timer_offset;
16072 add_timer(&tp->timer);
16074 tg3_netif_start(tp);
16076 tg3_phy_start(tp);
16078 done:
16079 rtnl_unlock();
16082 static struct pci_error_handlers tg3_err_handler = {
16083 .error_detected = tg3_io_error_detected,
16084 .slot_reset = tg3_io_slot_reset,
16085 .resume = tg3_io_resume
16088 static struct pci_driver tg3_driver = {
16089 .name = DRV_MODULE_NAME,
16090 .id_table = tg3_pci_tbl,
16091 .probe = tg3_init_one,
16092 .remove = __devexit_p(tg3_remove_one),
16093 .err_handler = &tg3_err_handler,
16094 .driver.pm = TG3_PM_OPS,
16097 static int __init tg3_init(void)
16099 return pci_register_driver(&tg3_driver);
16102 static void __exit tg3_cleanup(void)
16104 pci_unregister_driver(&tg3_driver);
16107 module_init(tg3_init);
16108 module_exit(tg3_cleanup);