tg3: Refactor tg3_open()
[linux-2.6/btrfs-unstable.git] / drivers / net / ethernet / broadcom / tg3.c
blob9bd99ce46e5fb2ed1dd3ed94dada0235e6d344dc
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #if IS_ENABLED(CONFIG_HWMON)
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 #endif
52 #include <net/checksum.h>
53 #include <net/ip.h>
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
59 #ifdef CONFIG_SPARC
60 #include <asm/idprom.h>
61 #include <asm/prom.h>
62 #endif
64 #define BAR_0 0
65 #define BAR_2 2
67 #include "tg3.h"
69 /* Functions & macros to verify TG3_FLAGS types */
71 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
73 return test_bit(flag, bits);
76 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 set_bit(flag, bits);
81 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
83 clear_bit(flag, bits);
86 #define tg3_flag(tp, flag) \
87 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_set(tp, flag) \
89 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_clear(tp, flag) \
91 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define DRV_MODULE_NAME "tg3"
94 #define TG3_MAJ_NUM 3
95 #define TG3_MIN_NUM 124
96 #define DRV_MODULE_VERSION \
97 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98 #define DRV_MODULE_RELDATE "March 21, 2012"
100 #define RESET_KIND_SHUTDOWN 0
101 #define RESET_KIND_INIT 1
102 #define RESET_KIND_SUSPEND 2
104 #define TG3_DEF_RX_MODE 0
105 #define TG3_DEF_TX_MODE 0
106 #define TG3_DEF_MSG_ENABLE \
107 (NETIF_MSG_DRV | \
108 NETIF_MSG_PROBE | \
109 NETIF_MSG_LINK | \
110 NETIF_MSG_TIMER | \
111 NETIF_MSG_IFDOWN | \
112 NETIF_MSG_IFUP | \
113 NETIF_MSG_RX_ERR | \
114 NETIF_MSG_TX_ERR)
116 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
118 /* length of time before we decide the hardware is borked,
119 * and dev->tx_timeout() should be called to fix the problem
122 #define TG3_TX_TIMEOUT (5 * HZ)
124 /* hardware minimum and maximum for a single frame's data payload */
125 #define TG3_MIN_MTU 60
126 #define TG3_MAX_MTU(tp) \
127 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
129 /* These numbers seem to be hard coded in the NIC firmware somehow.
130 * You can't change the ring sizes, but you can change where you place
131 * them in the NIC onboard memory.
133 #define TG3_RX_STD_RING_SIZE(tp) \
134 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
136 #define TG3_DEF_RX_RING_PENDING 200
137 #define TG3_RX_JMB_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
140 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
142 /* Do not place this n-ring entries value into the tp struct itself,
143 * we really want to expose these constants to GCC so that modulo et
144 * al. operations are done with shifts and masks instead of with
145 * hw multiply/modulo instructions. Another solution would be to
146 * replace things like '% foo' with '& (foo - 1)'.
149 #define TG3_TX_RING_SIZE 512
150 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
152 #define TG3_RX_STD_RING_BYTES(tp) \
153 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
154 #define TG3_RX_JMB_RING_BYTES(tp) \
155 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
156 #define TG3_RX_RCB_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
158 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
159 TG3_TX_RING_SIZE)
160 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
162 #define TG3_DMA_BYTE_ENAB 64
164 #define TG3_RX_STD_DMA_SZ 1536
165 #define TG3_RX_JMB_DMA_SZ 9046
167 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
169 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
170 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
172 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
173 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
175 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
176 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
178 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
179 * that are at least dword aligned when used in PCIX mode. The driver
180 * works around this bug by double copying the packet. This workaround
181 * is built into the normal double copy length check for efficiency.
183 * However, the double copy is only necessary on those architectures
184 * where unaligned memory accesses are inefficient. For those architectures
185 * where unaligned memory accesses incur little penalty, we can reintegrate
186 * the 5701 in the normal rx path. Doing so saves a device structure
187 * dereference by hardcoding the double copy threshold in place.
189 #define TG3_RX_COPY_THRESHOLD 256
190 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
191 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
192 #else
193 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
194 #endif
196 #if (NET_IP_ALIGN != 0)
197 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
198 #else
199 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
200 #endif
202 /* minimum number of free TX descriptors required to wake up TX process */
203 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
204 #define TG3_TX_BD_DMA_MAX_2K 2048
205 #define TG3_TX_BD_DMA_MAX_4K 4096
207 #define TG3_RAW_IP_ALIGN 2
209 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
210 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
212 #define FIRMWARE_TG3 "tigon/tg3.bin"
213 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
214 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
216 static char version[] __devinitdata =
217 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
219 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
220 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_MODULE_VERSION);
223 MODULE_FIRMWARE(FIRMWARE_TG3);
224 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
227 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
228 module_param(tg3_debug, int, 0);
229 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
231 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
306 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
307 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
308 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
309 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
310 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
311 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
312 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
313 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
317 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
319 static const struct {
320 const char string[ETH_GSTRING_LEN];
321 } ethtool_stats_keys[] = {
322 { "rx_octets" },
323 { "rx_fragments" },
324 { "rx_ucast_packets" },
325 { "rx_mcast_packets" },
326 { "rx_bcast_packets" },
327 { "rx_fcs_errors" },
328 { "rx_align_errors" },
329 { "rx_xon_pause_rcvd" },
330 { "rx_xoff_pause_rcvd" },
331 { "rx_mac_ctrl_rcvd" },
332 { "rx_xoff_entered" },
333 { "rx_frame_too_long_errors" },
334 { "rx_jabbers" },
335 { "rx_undersize_packets" },
336 { "rx_in_length_errors" },
337 { "rx_out_length_errors" },
338 { "rx_64_or_less_octet_packets" },
339 { "rx_65_to_127_octet_packets" },
340 { "rx_128_to_255_octet_packets" },
341 { "rx_256_to_511_octet_packets" },
342 { "rx_512_to_1023_octet_packets" },
343 { "rx_1024_to_1522_octet_packets" },
344 { "rx_1523_to_2047_octet_packets" },
345 { "rx_2048_to_4095_octet_packets" },
346 { "rx_4096_to_8191_octet_packets" },
347 { "rx_8192_to_9022_octet_packets" },
349 { "tx_octets" },
350 { "tx_collisions" },
352 { "tx_xon_sent" },
353 { "tx_xoff_sent" },
354 { "tx_flow_control" },
355 { "tx_mac_errors" },
356 { "tx_single_collisions" },
357 { "tx_mult_collisions" },
358 { "tx_deferred" },
359 { "tx_excessive_collisions" },
360 { "tx_late_collisions" },
361 { "tx_collide_2times" },
362 { "tx_collide_3times" },
363 { "tx_collide_4times" },
364 { "tx_collide_5times" },
365 { "tx_collide_6times" },
366 { "tx_collide_7times" },
367 { "tx_collide_8times" },
368 { "tx_collide_9times" },
369 { "tx_collide_10times" },
370 { "tx_collide_11times" },
371 { "tx_collide_12times" },
372 { "tx_collide_13times" },
373 { "tx_collide_14times" },
374 { "tx_collide_15times" },
375 { "tx_ucast_packets" },
376 { "tx_mcast_packets" },
377 { "tx_bcast_packets" },
378 { "tx_carrier_sense_errors" },
379 { "tx_discards" },
380 { "tx_errors" },
382 { "dma_writeq_full" },
383 { "dma_write_prioq_full" },
384 { "rxbds_empty" },
385 { "rx_discards" },
386 { "rx_errors" },
387 { "rx_threshold_hit" },
389 { "dma_readq_full" },
390 { "dma_read_prioq_full" },
391 { "tx_comp_queue_full" },
393 { "ring_set_send_prod_index" },
394 { "ring_status_update" },
395 { "nic_irqs" },
396 { "nic_avoided_irqs" },
397 { "nic_tx_threshold_hit" },
399 { "mbuf_lwm_thresh_hit" },
402 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
405 static const struct {
406 const char string[ETH_GSTRING_LEN];
407 } ethtool_test_keys[] = {
408 { "nvram test (online) " },
409 { "link test (online) " },
410 { "register test (offline)" },
411 { "memory test (offline)" },
412 { "mac loopback test (offline)" },
413 { "phy loopback test (offline)" },
414 { "ext loopback test (offline)" },
415 { "interrupt test (offline)" },
418 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
421 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
423 writel(val, tp->regs + off);
426 static u32 tg3_read32(struct tg3 *tp, u32 off)
428 return readl(tp->regs + off);
431 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
433 writel(val, tp->aperegs + off);
436 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
438 return readl(tp->aperegs + off);
441 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
443 unsigned long flags;
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
451 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
453 writel(val, tp->regs + off);
454 readl(tp->regs + off);
457 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
459 unsigned long flags;
460 u32 val;
462 spin_lock_irqsave(&tp->indirect_lock, flags);
463 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
464 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
465 spin_unlock_irqrestore(&tp->indirect_lock, flags);
466 return val;
469 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
471 unsigned long flags;
473 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
474 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
475 TG3_64BIT_REG_LOW, val);
476 return;
478 if (off == TG3_RX_STD_PROD_IDX_REG) {
479 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
480 TG3_64BIT_REG_LOW, val);
481 return;
484 spin_lock_irqsave(&tp->indirect_lock, flags);
485 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
487 spin_unlock_irqrestore(&tp->indirect_lock, flags);
489 /* In indirect mode when disabling interrupts, we also need
490 * to clear the interrupt bit in the GRC local ctrl register.
492 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
493 (val == 0x1)) {
494 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
495 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
499 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
501 unsigned long flags;
502 u32 val;
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
511 /* usec_wait specifies the wait time in usec when writing to certain registers
512 * where it is unsafe to read back the register without some delay.
513 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
514 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
516 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
518 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
519 /* Non-posted methods */
520 tp->write32(tp, off, val);
521 else {
522 /* Posted method */
523 tg3_write32(tp, off, val);
524 if (usec_wait)
525 udelay(usec_wait);
526 tp->read32(tp, off);
528 /* Wait again after the read for the posted method to guarantee that
529 * the wait time is met.
531 if (usec_wait)
532 udelay(usec_wait);
535 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
537 tp->write32_mbox(tp, off, val);
538 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
539 tp->read32_mbox(tp, off);
542 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
544 void __iomem *mbox = tp->regs + off;
545 writel(val, mbox);
546 if (tg3_flag(tp, TXD_MBOX_HWBUG))
547 writel(val, mbox);
548 if (tg3_flag(tp, MBOX_WRITE_REORDER))
549 readl(mbox);
552 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
554 return readl(tp->regs + off + GRCMBOX_BASE);
557 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
559 writel(val, tp->regs + off + GRCMBOX_BASE);
562 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
563 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
564 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
565 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
566 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
568 #define tw32(reg, val) tp->write32(tp, reg, val)
569 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
570 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
571 #define tr32(reg) tp->read32(tp, reg)
573 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
575 unsigned long flags;
577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
578 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
579 return;
581 spin_lock_irqsave(&tp->indirect_lock, flags);
582 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
583 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
584 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
586 /* Always leave this as zero. */
587 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
588 } else {
589 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
590 tw32_f(TG3PCI_MEM_WIN_DATA, val);
592 /* Always leave this as zero. */
593 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
595 spin_unlock_irqrestore(&tp->indirect_lock, flags);
598 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
600 unsigned long flags;
602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
603 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
604 *val = 0;
605 return;
608 spin_lock_irqsave(&tp->indirect_lock, flags);
609 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
610 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
611 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
613 /* Always leave this as zero. */
614 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
615 } else {
616 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
617 *val = tr32(TG3PCI_MEM_WIN_DATA);
619 /* Always leave this as zero. */
620 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
622 spin_unlock_irqrestore(&tp->indirect_lock, flags);
625 static void tg3_ape_lock_init(struct tg3 *tp)
627 int i;
628 u32 regbase, bit;
630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
631 regbase = TG3_APE_LOCK_GRANT;
632 else
633 regbase = TG3_APE_PER_LOCK_GRANT;
635 /* Make sure the driver hasn't any stale locks. */
636 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
637 switch (i) {
638 case TG3_APE_LOCK_PHY0:
639 case TG3_APE_LOCK_PHY1:
640 case TG3_APE_LOCK_PHY2:
641 case TG3_APE_LOCK_PHY3:
642 bit = APE_LOCK_GRANT_DRIVER;
643 break;
644 default:
645 if (!tp->pci_fn)
646 bit = APE_LOCK_GRANT_DRIVER;
647 else
648 bit = 1 << tp->pci_fn;
650 tg3_ape_write32(tp, regbase + 4 * i, bit);
655 static int tg3_ape_lock(struct tg3 *tp, int locknum)
657 int i, off;
658 int ret = 0;
659 u32 status, req, gnt, bit;
661 if (!tg3_flag(tp, ENABLE_APE))
662 return 0;
664 switch (locknum) {
665 case TG3_APE_LOCK_GPIO:
666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
667 return 0;
668 case TG3_APE_LOCK_GRC:
669 case TG3_APE_LOCK_MEM:
670 if (!tp->pci_fn)
671 bit = APE_LOCK_REQ_DRIVER;
672 else
673 bit = 1 << tp->pci_fn;
674 break;
675 case TG3_APE_LOCK_PHY0:
676 case TG3_APE_LOCK_PHY1:
677 case TG3_APE_LOCK_PHY2:
678 case TG3_APE_LOCK_PHY3:
679 bit = APE_LOCK_REQ_DRIVER;
680 break;
681 default:
682 return -EINVAL;
685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
686 req = TG3_APE_LOCK_REQ;
687 gnt = TG3_APE_LOCK_GRANT;
688 } else {
689 req = TG3_APE_PER_LOCK_REQ;
690 gnt = TG3_APE_PER_LOCK_GRANT;
693 off = 4 * locknum;
695 tg3_ape_write32(tp, req + off, bit);
697 /* Wait for up to 1 millisecond to acquire lock. */
698 for (i = 0; i < 100; i++) {
699 status = tg3_ape_read32(tp, gnt + off);
700 if (status == bit)
701 break;
702 udelay(10);
705 if (status != bit) {
706 /* Revoke the lock request. */
707 tg3_ape_write32(tp, gnt + off, bit);
708 ret = -EBUSY;
711 return ret;
714 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
716 u32 gnt, bit;
718 if (!tg3_flag(tp, ENABLE_APE))
719 return;
721 switch (locknum) {
722 case TG3_APE_LOCK_GPIO:
723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
724 return;
725 case TG3_APE_LOCK_GRC:
726 case TG3_APE_LOCK_MEM:
727 if (!tp->pci_fn)
728 bit = APE_LOCK_GRANT_DRIVER;
729 else
730 bit = 1 << tp->pci_fn;
731 break;
732 case TG3_APE_LOCK_PHY0:
733 case TG3_APE_LOCK_PHY1:
734 case TG3_APE_LOCK_PHY2:
735 case TG3_APE_LOCK_PHY3:
736 bit = APE_LOCK_GRANT_DRIVER;
737 break;
738 default:
739 return;
742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
743 gnt = TG3_APE_LOCK_GRANT;
744 else
745 gnt = TG3_APE_PER_LOCK_GRANT;
747 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
750 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
752 u32 apedata;
754 while (timeout_us) {
755 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
756 return -EBUSY;
758 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
759 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
760 break;
762 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
764 udelay(10);
765 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
768 return timeout_us ? 0 : -EBUSY;
771 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
773 u32 i, apedata;
775 for (i = 0; i < timeout_us / 10; i++) {
776 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
778 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
779 break;
781 udelay(10);
784 return i == timeout_us / 10;
787 int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
789 int err;
790 u32 i, bufoff, msgoff, maxlen, apedata;
792 if (!tg3_flag(tp, APE_HAS_NCSI))
793 return 0;
795 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
796 if (apedata != APE_SEG_SIG_MAGIC)
797 return -ENODEV;
799 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
800 if (!(apedata & APE_FW_STATUS_READY))
801 return -EAGAIN;
803 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
804 TG3_APE_SHMEM_BASE;
805 msgoff = bufoff + 2 * sizeof(u32);
806 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
808 while (len) {
809 u32 length;
811 /* Cap xfer sizes to scratchpad limits. */
812 length = (len > maxlen) ? maxlen : len;
813 len -= length;
815 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
816 if (!(apedata & APE_FW_STATUS_READY))
817 return -EAGAIN;
819 /* Wait for up to 1 msec for APE to service previous event. */
820 err = tg3_ape_event_lock(tp, 1000);
821 if (err)
822 return err;
824 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
825 APE_EVENT_STATUS_SCRTCHPD_READ |
826 APE_EVENT_STATUS_EVENT_PENDING;
827 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
829 tg3_ape_write32(tp, bufoff, base_off);
830 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
832 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
833 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
835 base_off += length;
837 if (tg3_ape_wait_for_event(tp, 30000))
838 return -EAGAIN;
840 for (i = 0; length; i += 4, length -= 4) {
841 u32 val = tg3_ape_read32(tp, msgoff + i);
842 memcpy(data, &val, sizeof(u32));
843 data++;
847 return 0;
850 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
852 int err;
853 u32 apedata;
855 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856 if (apedata != APE_SEG_SIG_MAGIC)
857 return -EAGAIN;
859 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860 if (!(apedata & APE_FW_STATUS_READY))
861 return -EAGAIN;
863 /* Wait for up to 1 millisecond for APE to service previous event. */
864 err = tg3_ape_event_lock(tp, 1000);
865 if (err)
866 return err;
868 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
869 event | APE_EVENT_STATUS_EVENT_PENDING);
871 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
874 return 0;
877 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
879 u32 event;
880 u32 apedata;
882 if (!tg3_flag(tp, ENABLE_APE))
883 return;
885 switch (kind) {
886 case RESET_KIND_INIT:
887 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
888 APE_HOST_SEG_SIG_MAGIC);
889 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
890 APE_HOST_SEG_LEN_MAGIC);
891 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
892 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
893 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
894 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
895 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
896 APE_HOST_BEHAV_NO_PHYLOCK);
897 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
898 TG3_APE_HOST_DRVR_STATE_START);
900 event = APE_EVENT_STATUS_STATE_START;
901 break;
902 case RESET_KIND_SHUTDOWN:
903 /* With the interface we are currently using,
904 * APE does not track driver state. Wiping
905 * out the HOST SEGMENT SIGNATURE forces
906 * the APE to assume OS absent status.
908 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
910 if (device_may_wakeup(&tp->pdev->dev) &&
911 tg3_flag(tp, WOL_ENABLE)) {
912 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
913 TG3_APE_HOST_WOL_SPEED_AUTO);
914 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
915 } else
916 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
918 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
920 event = APE_EVENT_STATUS_STATE_UNLOAD;
921 break;
922 case RESET_KIND_SUSPEND:
923 event = APE_EVENT_STATUS_STATE_SUSPEND;
924 break;
925 default:
926 return;
929 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
931 tg3_ape_send_event(tp, event);
934 static void tg3_disable_ints(struct tg3 *tp)
936 int i;
938 tw32(TG3PCI_MISC_HOST_CTRL,
939 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
940 for (i = 0; i < tp->irq_max; i++)
941 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
944 static void tg3_enable_ints(struct tg3 *tp)
946 int i;
948 tp->irq_sync = 0;
949 wmb();
951 tw32(TG3PCI_MISC_HOST_CTRL,
952 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
954 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
955 for (i = 0; i < tp->irq_cnt; i++) {
956 struct tg3_napi *tnapi = &tp->napi[i];
958 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
959 if (tg3_flag(tp, 1SHOT_MSI))
960 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
962 tp->coal_now |= tnapi->coal_now;
965 /* Force an initial interrupt */
966 if (!tg3_flag(tp, TAGGED_STATUS) &&
967 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
968 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
969 else
970 tw32(HOSTCC_MODE, tp->coal_now);
972 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
975 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
977 struct tg3 *tp = tnapi->tp;
978 struct tg3_hw_status *sblk = tnapi->hw_status;
979 unsigned int work_exists = 0;
981 /* check for phy events */
982 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
983 if (sblk->status & SD_STATUS_LINK_CHG)
984 work_exists = 1;
987 /* check for TX work to do */
988 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
989 work_exists = 1;
991 /* check for RX work to do */
992 if (tnapi->rx_rcb_prod_idx &&
993 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
994 work_exists = 1;
996 return work_exists;
999 /* tg3_int_reenable
1000 * similar to tg3_enable_ints, but it accurately determines whether there
1001 * is new work pending and can return without flushing the PIO write
1002 * which reenables interrupts
1004 static void tg3_int_reenable(struct tg3_napi *tnapi)
1006 struct tg3 *tp = tnapi->tp;
1008 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1009 mmiowb();
1011 /* When doing tagged status, this work check is unnecessary.
1012 * The last_tag we write above tells the chip which piece of
1013 * work we've completed.
1015 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1016 tw32(HOSTCC_MODE, tp->coalesce_mode |
1017 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1020 static void tg3_switch_clocks(struct tg3 *tp)
1022 u32 clock_ctrl;
1023 u32 orig_clock_ctrl;
1025 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1026 return;
1028 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1030 orig_clock_ctrl = clock_ctrl;
1031 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1032 CLOCK_CTRL_CLKRUN_OENABLE |
1033 0x1f);
1034 tp->pci_clock_ctrl = clock_ctrl;
1036 if (tg3_flag(tp, 5705_PLUS)) {
1037 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1038 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1039 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1041 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1042 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1043 clock_ctrl |
1044 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1045 40);
1046 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1047 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1048 40);
1050 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1053 #define PHY_BUSY_LOOPS 5000
1055 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1057 u32 frame_val;
1058 unsigned int loops;
1059 int ret;
1061 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1062 tw32_f(MAC_MI_MODE,
1063 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1064 udelay(80);
1067 tg3_ape_lock(tp, tp->phy_ape_lock);
1069 *val = 0x0;
1071 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1072 MI_COM_PHY_ADDR_MASK);
1073 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1074 MI_COM_REG_ADDR_MASK);
1075 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1077 tw32_f(MAC_MI_COM, frame_val);
1079 loops = PHY_BUSY_LOOPS;
1080 while (loops != 0) {
1081 udelay(10);
1082 frame_val = tr32(MAC_MI_COM);
1084 if ((frame_val & MI_COM_BUSY) == 0) {
1085 udelay(5);
1086 frame_val = tr32(MAC_MI_COM);
1087 break;
1089 loops -= 1;
1092 ret = -EBUSY;
1093 if (loops != 0) {
1094 *val = frame_val & MI_COM_DATA_MASK;
1095 ret = 0;
1098 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1099 tw32_f(MAC_MI_MODE, tp->mi_mode);
1100 udelay(80);
1103 tg3_ape_unlock(tp, tp->phy_ape_lock);
1105 return ret;
1108 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1110 u32 frame_val;
1111 unsigned int loops;
1112 int ret;
1114 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1115 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1116 return 0;
1118 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1119 tw32_f(MAC_MI_MODE,
1120 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1121 udelay(80);
1124 tg3_ape_lock(tp, tp->phy_ape_lock);
1126 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127 MI_COM_PHY_ADDR_MASK);
1128 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129 MI_COM_REG_ADDR_MASK);
1130 frame_val |= (val & MI_COM_DATA_MASK);
1131 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1133 tw32_f(MAC_MI_COM, frame_val);
1135 loops = PHY_BUSY_LOOPS;
1136 while (loops != 0) {
1137 udelay(10);
1138 frame_val = tr32(MAC_MI_COM);
1139 if ((frame_val & MI_COM_BUSY) == 0) {
1140 udelay(5);
1141 frame_val = tr32(MAC_MI_COM);
1142 break;
1144 loops -= 1;
1147 ret = -EBUSY;
1148 if (loops != 0)
1149 ret = 0;
1151 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1152 tw32_f(MAC_MI_MODE, tp->mi_mode);
1153 udelay(80);
1156 tg3_ape_unlock(tp, tp->phy_ape_lock);
1158 return ret;
1161 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1163 int err;
1165 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1166 if (err)
1167 goto done;
1169 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1170 if (err)
1171 goto done;
1173 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1174 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1175 if (err)
1176 goto done;
1178 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1180 done:
1181 return err;
1184 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1186 int err;
1188 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1189 if (err)
1190 goto done;
1192 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1193 if (err)
1194 goto done;
1196 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1197 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1198 if (err)
1199 goto done;
1201 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1203 done:
1204 return err;
1207 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1209 int err;
1211 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1212 if (!err)
1213 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1215 return err;
1218 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1220 int err;
1222 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1223 if (!err)
1224 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1226 return err;
1229 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1231 int err;
1233 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1234 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1235 MII_TG3_AUXCTL_SHDWSEL_MISC);
1236 if (!err)
1237 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1239 return err;
1242 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1244 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1245 set |= MII_TG3_AUXCTL_MISC_WREN;
1247 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1250 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1251 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1252 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1253 MII_TG3_AUXCTL_ACTL_TX_6DB)
1255 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1256 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1257 MII_TG3_AUXCTL_ACTL_TX_6DB);
1259 static int tg3_bmcr_reset(struct tg3 *tp)
1261 u32 phy_control;
1262 int limit, err;
1264 /* OK, reset it, and poll the BMCR_RESET bit until it
1265 * clears or we time out.
1267 phy_control = BMCR_RESET;
1268 err = tg3_writephy(tp, MII_BMCR, phy_control);
1269 if (err != 0)
1270 return -EBUSY;
1272 limit = 5000;
1273 while (limit--) {
1274 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1275 if (err != 0)
1276 return -EBUSY;
1278 if ((phy_control & BMCR_RESET) == 0) {
1279 udelay(40);
1280 break;
1282 udelay(10);
1284 if (limit < 0)
1285 return -EBUSY;
1287 return 0;
1290 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1292 struct tg3 *tp = bp->priv;
1293 u32 val;
1295 spin_lock_bh(&tp->lock);
1297 if (tg3_readphy(tp, reg, &val))
1298 val = -EIO;
1300 spin_unlock_bh(&tp->lock);
1302 return val;
1305 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1307 struct tg3 *tp = bp->priv;
1308 u32 ret = 0;
1310 spin_lock_bh(&tp->lock);
1312 if (tg3_writephy(tp, reg, val))
1313 ret = -EIO;
1315 spin_unlock_bh(&tp->lock);
1317 return ret;
1320 static int tg3_mdio_reset(struct mii_bus *bp)
1322 return 0;
1325 static void tg3_mdio_config_5785(struct tg3 *tp)
1327 u32 val;
1328 struct phy_device *phydev;
1330 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1331 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1332 case PHY_ID_BCM50610:
1333 case PHY_ID_BCM50610M:
1334 val = MAC_PHYCFG2_50610_LED_MODES;
1335 break;
1336 case PHY_ID_BCMAC131:
1337 val = MAC_PHYCFG2_AC131_LED_MODES;
1338 break;
1339 case PHY_ID_RTL8211C:
1340 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1341 break;
1342 case PHY_ID_RTL8201E:
1343 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1344 break;
1345 default:
1346 return;
1349 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1350 tw32(MAC_PHYCFG2, val);
1352 val = tr32(MAC_PHYCFG1);
1353 val &= ~(MAC_PHYCFG1_RGMII_INT |
1354 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1355 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1356 tw32(MAC_PHYCFG1, val);
1358 return;
1361 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1362 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1363 MAC_PHYCFG2_FMODE_MASK_MASK |
1364 MAC_PHYCFG2_GMODE_MASK_MASK |
1365 MAC_PHYCFG2_ACT_MASK_MASK |
1366 MAC_PHYCFG2_QUAL_MASK_MASK |
1367 MAC_PHYCFG2_INBAND_ENABLE;
1369 tw32(MAC_PHYCFG2, val);
1371 val = tr32(MAC_PHYCFG1);
1372 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1373 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1374 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1375 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1376 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1377 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1378 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1380 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1381 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1382 tw32(MAC_PHYCFG1, val);
1384 val = tr32(MAC_EXT_RGMII_MODE);
1385 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1386 MAC_RGMII_MODE_RX_QUALITY |
1387 MAC_RGMII_MODE_RX_ACTIVITY |
1388 MAC_RGMII_MODE_RX_ENG_DET |
1389 MAC_RGMII_MODE_TX_ENABLE |
1390 MAC_RGMII_MODE_TX_LOWPWR |
1391 MAC_RGMII_MODE_TX_RESET);
1392 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1393 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1394 val |= MAC_RGMII_MODE_RX_INT_B |
1395 MAC_RGMII_MODE_RX_QUALITY |
1396 MAC_RGMII_MODE_RX_ACTIVITY |
1397 MAC_RGMII_MODE_RX_ENG_DET;
1398 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1399 val |= MAC_RGMII_MODE_TX_ENABLE |
1400 MAC_RGMII_MODE_TX_LOWPWR |
1401 MAC_RGMII_MODE_TX_RESET;
1403 tw32(MAC_EXT_RGMII_MODE, val);
1406 static void tg3_mdio_start(struct tg3 *tp)
1408 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1409 tw32_f(MAC_MI_MODE, tp->mi_mode);
1410 udelay(80);
1412 if (tg3_flag(tp, MDIOBUS_INITED) &&
1413 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1414 tg3_mdio_config_5785(tp);
1417 static int tg3_mdio_init(struct tg3 *tp)
1419 int i;
1420 u32 reg;
1421 struct phy_device *phydev;
1423 if (tg3_flag(tp, 5717_PLUS)) {
1424 u32 is_serdes;
1426 tp->phy_addr = tp->pci_fn + 1;
1428 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1429 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1430 else
1431 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1432 TG3_CPMU_PHY_STRAP_IS_SERDES;
1433 if (is_serdes)
1434 tp->phy_addr += 7;
1435 } else
1436 tp->phy_addr = TG3_PHY_MII_ADDR;
1438 tg3_mdio_start(tp);
1440 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1441 return 0;
1443 tp->mdio_bus = mdiobus_alloc();
1444 if (tp->mdio_bus == NULL)
1445 return -ENOMEM;
1447 tp->mdio_bus->name = "tg3 mdio bus";
1448 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1449 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1450 tp->mdio_bus->priv = tp;
1451 tp->mdio_bus->parent = &tp->pdev->dev;
1452 tp->mdio_bus->read = &tg3_mdio_read;
1453 tp->mdio_bus->write = &tg3_mdio_write;
1454 tp->mdio_bus->reset = &tg3_mdio_reset;
1455 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1456 tp->mdio_bus->irq = &tp->mdio_irq[0];
1458 for (i = 0; i < PHY_MAX_ADDR; i++)
1459 tp->mdio_bus->irq[i] = PHY_POLL;
1461 /* The bus registration will look for all the PHYs on the mdio bus.
1462 * Unfortunately, it does not ensure the PHY is powered up before
1463 * accessing the PHY ID registers. A chip reset is the
1464 * quickest way to bring the device back to an operational state..
1466 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1467 tg3_bmcr_reset(tp);
1469 i = mdiobus_register(tp->mdio_bus);
1470 if (i) {
1471 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1472 mdiobus_free(tp->mdio_bus);
1473 return i;
1476 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1478 if (!phydev || !phydev->drv) {
1479 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1480 mdiobus_unregister(tp->mdio_bus);
1481 mdiobus_free(tp->mdio_bus);
1482 return -ENODEV;
1485 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1486 case PHY_ID_BCM57780:
1487 phydev->interface = PHY_INTERFACE_MODE_GMII;
1488 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1489 break;
1490 case PHY_ID_BCM50610:
1491 case PHY_ID_BCM50610M:
1492 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1493 PHY_BRCM_RX_REFCLK_UNUSED |
1494 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1495 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1496 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1497 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1498 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1499 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1500 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1501 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1502 /* fallthru */
1503 case PHY_ID_RTL8211C:
1504 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1505 break;
1506 case PHY_ID_RTL8201E:
1507 case PHY_ID_BCMAC131:
1508 phydev->interface = PHY_INTERFACE_MODE_MII;
1509 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1510 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1511 break;
1514 tg3_flag_set(tp, MDIOBUS_INITED);
1516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1517 tg3_mdio_config_5785(tp);
1519 return 0;
1522 static void tg3_mdio_fini(struct tg3 *tp)
1524 if (tg3_flag(tp, MDIOBUS_INITED)) {
1525 tg3_flag_clear(tp, MDIOBUS_INITED);
1526 mdiobus_unregister(tp->mdio_bus);
1527 mdiobus_free(tp->mdio_bus);
1531 /* tp->lock is held. */
1532 static inline void tg3_generate_fw_event(struct tg3 *tp)
1534 u32 val;
1536 val = tr32(GRC_RX_CPU_EVENT);
1537 val |= GRC_RX_CPU_DRIVER_EVENT;
1538 tw32_f(GRC_RX_CPU_EVENT, val);
1540 tp->last_event_jiffies = jiffies;
1543 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1545 /* tp->lock is held. */
1546 static void tg3_wait_for_event_ack(struct tg3 *tp)
1548 int i;
1549 unsigned int delay_cnt;
1550 long time_remain;
1552 /* If enough time has passed, no wait is necessary. */
1553 time_remain = (long)(tp->last_event_jiffies + 1 +
1554 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1555 (long)jiffies;
1556 if (time_remain < 0)
1557 return;
1559 /* Check if we can shorten the wait time. */
1560 delay_cnt = jiffies_to_usecs(time_remain);
1561 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1562 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1563 delay_cnt = (delay_cnt >> 3) + 1;
1565 for (i = 0; i < delay_cnt; i++) {
1566 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1567 break;
1568 udelay(8);
1572 /* tp->lock is held. */
1573 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1575 u32 reg, val;
1577 val = 0;
1578 if (!tg3_readphy(tp, MII_BMCR, &reg))
1579 val = reg << 16;
1580 if (!tg3_readphy(tp, MII_BMSR, &reg))
1581 val |= (reg & 0xffff);
1582 *data++ = val;
1584 val = 0;
1585 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1586 val = reg << 16;
1587 if (!tg3_readphy(tp, MII_LPA, &reg))
1588 val |= (reg & 0xffff);
1589 *data++ = val;
1591 val = 0;
1592 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1593 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1594 val = reg << 16;
1595 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1596 val |= (reg & 0xffff);
1598 *data++ = val;
1600 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1601 val = reg << 16;
1602 else
1603 val = 0;
1604 *data++ = val;
1607 /* tp->lock is held. */
1608 static void tg3_ump_link_report(struct tg3 *tp)
1610 u32 data[4];
1612 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1613 return;
1615 tg3_phy_gather_ump_data(tp, data);
1617 tg3_wait_for_event_ack(tp);
1619 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1620 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1621 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1622 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1623 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1624 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1626 tg3_generate_fw_event(tp);
1629 /* tp->lock is held. */
1630 static void tg3_stop_fw(struct tg3 *tp)
1632 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1633 /* Wait for RX cpu to ACK the previous event. */
1634 tg3_wait_for_event_ack(tp);
1636 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1638 tg3_generate_fw_event(tp);
1640 /* Wait for RX cpu to ACK this event. */
1641 tg3_wait_for_event_ack(tp);
1645 /* tp->lock is held. */
1646 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1648 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1649 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1651 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1652 switch (kind) {
1653 case RESET_KIND_INIT:
1654 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1655 DRV_STATE_START);
1656 break;
1658 case RESET_KIND_SHUTDOWN:
1659 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1660 DRV_STATE_UNLOAD);
1661 break;
1663 case RESET_KIND_SUSPEND:
1664 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1665 DRV_STATE_SUSPEND);
1666 break;
1668 default:
1669 break;
1673 if (kind == RESET_KIND_INIT ||
1674 kind == RESET_KIND_SUSPEND)
1675 tg3_ape_driver_state_change(tp, kind);
1678 /* tp->lock is held. */
1679 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1681 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1682 switch (kind) {
1683 case RESET_KIND_INIT:
1684 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1685 DRV_STATE_START_DONE);
1686 break;
1688 case RESET_KIND_SHUTDOWN:
1689 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1690 DRV_STATE_UNLOAD_DONE);
1691 break;
1693 default:
1694 break;
1698 if (kind == RESET_KIND_SHUTDOWN)
1699 tg3_ape_driver_state_change(tp, kind);
1702 /* tp->lock is held. */
1703 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1705 if (tg3_flag(tp, ENABLE_ASF)) {
1706 switch (kind) {
1707 case RESET_KIND_INIT:
1708 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1709 DRV_STATE_START);
1710 break;
1712 case RESET_KIND_SHUTDOWN:
1713 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1714 DRV_STATE_UNLOAD);
1715 break;
1717 case RESET_KIND_SUSPEND:
1718 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1719 DRV_STATE_SUSPEND);
1720 break;
1722 default:
1723 break;
1728 static int tg3_poll_fw(struct tg3 *tp)
1730 int i;
1731 u32 val;
1733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1734 /* Wait up to 20ms for init done. */
1735 for (i = 0; i < 200; i++) {
1736 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1737 return 0;
1738 udelay(100);
1740 return -ENODEV;
1743 /* Wait for firmware initialization to complete. */
1744 for (i = 0; i < 100000; i++) {
1745 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1746 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1747 break;
1748 udelay(10);
1751 /* Chip might not be fitted with firmware. Some Sun onboard
1752 * parts are configured like that. So don't signal the timeout
1753 * of the above loop as an error, but do report the lack of
1754 * running firmware once.
1756 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1757 tg3_flag_set(tp, NO_FWARE_REPORTED);
1759 netdev_info(tp->dev, "No firmware running\n");
1762 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1763 /* The 57765 A0 needs a little more
1764 * time to do some important work.
1766 mdelay(10);
1769 return 0;
1772 static void tg3_link_report(struct tg3 *tp)
1774 if (!netif_carrier_ok(tp->dev)) {
1775 netif_info(tp, link, tp->dev, "Link is down\n");
1776 tg3_ump_link_report(tp);
1777 } else if (netif_msg_link(tp)) {
1778 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1779 (tp->link_config.active_speed == SPEED_1000 ?
1780 1000 :
1781 (tp->link_config.active_speed == SPEED_100 ?
1782 100 : 10)),
1783 (tp->link_config.active_duplex == DUPLEX_FULL ?
1784 "full" : "half"));
1786 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1787 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1788 "on" : "off",
1789 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1790 "on" : "off");
1792 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1793 netdev_info(tp->dev, "EEE is %s\n",
1794 tp->setlpicnt ? "enabled" : "disabled");
1796 tg3_ump_link_report(tp);
1800 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1802 u16 miireg;
1804 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1805 miireg = ADVERTISE_1000XPAUSE;
1806 else if (flow_ctrl & FLOW_CTRL_TX)
1807 miireg = ADVERTISE_1000XPSE_ASYM;
1808 else if (flow_ctrl & FLOW_CTRL_RX)
1809 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1810 else
1811 miireg = 0;
1813 return miireg;
1816 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1818 u8 cap = 0;
1820 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1821 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1822 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1823 if (lcladv & ADVERTISE_1000XPAUSE)
1824 cap = FLOW_CTRL_RX;
1825 if (rmtadv & ADVERTISE_1000XPAUSE)
1826 cap = FLOW_CTRL_TX;
1829 return cap;
1832 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1834 u8 autoneg;
1835 u8 flowctrl = 0;
1836 u32 old_rx_mode = tp->rx_mode;
1837 u32 old_tx_mode = tp->tx_mode;
1839 if (tg3_flag(tp, USE_PHYLIB))
1840 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1841 else
1842 autoneg = tp->link_config.autoneg;
1844 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1845 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1846 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1847 else
1848 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1849 } else
1850 flowctrl = tp->link_config.flowctrl;
1852 tp->link_config.active_flowctrl = flowctrl;
1854 if (flowctrl & FLOW_CTRL_RX)
1855 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1856 else
1857 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1859 if (old_rx_mode != tp->rx_mode)
1860 tw32_f(MAC_RX_MODE, tp->rx_mode);
1862 if (flowctrl & FLOW_CTRL_TX)
1863 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1864 else
1865 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1867 if (old_tx_mode != tp->tx_mode)
1868 tw32_f(MAC_TX_MODE, tp->tx_mode);
1871 static void tg3_adjust_link(struct net_device *dev)
1873 u8 oldflowctrl, linkmesg = 0;
1874 u32 mac_mode, lcl_adv, rmt_adv;
1875 struct tg3 *tp = netdev_priv(dev);
1876 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1878 spin_lock_bh(&tp->lock);
1880 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1881 MAC_MODE_HALF_DUPLEX);
1883 oldflowctrl = tp->link_config.active_flowctrl;
1885 if (phydev->link) {
1886 lcl_adv = 0;
1887 rmt_adv = 0;
1889 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1890 mac_mode |= MAC_MODE_PORT_MODE_MII;
1891 else if (phydev->speed == SPEED_1000 ||
1892 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1893 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1894 else
1895 mac_mode |= MAC_MODE_PORT_MODE_MII;
1897 if (phydev->duplex == DUPLEX_HALF)
1898 mac_mode |= MAC_MODE_HALF_DUPLEX;
1899 else {
1900 lcl_adv = mii_advertise_flowctrl(
1901 tp->link_config.flowctrl);
1903 if (phydev->pause)
1904 rmt_adv = LPA_PAUSE_CAP;
1905 if (phydev->asym_pause)
1906 rmt_adv |= LPA_PAUSE_ASYM;
1909 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1910 } else
1911 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1913 if (mac_mode != tp->mac_mode) {
1914 tp->mac_mode = mac_mode;
1915 tw32_f(MAC_MODE, tp->mac_mode);
1916 udelay(40);
1919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1920 if (phydev->speed == SPEED_10)
1921 tw32(MAC_MI_STAT,
1922 MAC_MI_STAT_10MBPS_MODE |
1923 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1924 else
1925 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1928 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1929 tw32(MAC_TX_LENGTHS,
1930 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1931 (6 << TX_LENGTHS_IPG_SHIFT) |
1932 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1933 else
1934 tw32(MAC_TX_LENGTHS,
1935 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1936 (6 << TX_LENGTHS_IPG_SHIFT) |
1937 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1939 if (phydev->link != tp->old_link ||
1940 phydev->speed != tp->link_config.active_speed ||
1941 phydev->duplex != tp->link_config.active_duplex ||
1942 oldflowctrl != tp->link_config.active_flowctrl)
1943 linkmesg = 1;
1945 tp->old_link = phydev->link;
1946 tp->link_config.active_speed = phydev->speed;
1947 tp->link_config.active_duplex = phydev->duplex;
1949 spin_unlock_bh(&tp->lock);
1951 if (linkmesg)
1952 tg3_link_report(tp);
1955 static int tg3_phy_init(struct tg3 *tp)
1957 struct phy_device *phydev;
1959 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1960 return 0;
1962 /* Bring the PHY back to a known state. */
1963 tg3_bmcr_reset(tp);
1965 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1967 /* Attach the MAC to the PHY. */
1968 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1969 phydev->dev_flags, phydev->interface);
1970 if (IS_ERR(phydev)) {
1971 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1972 return PTR_ERR(phydev);
1975 /* Mask with MAC supported features. */
1976 switch (phydev->interface) {
1977 case PHY_INTERFACE_MODE_GMII:
1978 case PHY_INTERFACE_MODE_RGMII:
1979 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1980 phydev->supported &= (PHY_GBIT_FEATURES |
1981 SUPPORTED_Pause |
1982 SUPPORTED_Asym_Pause);
1983 break;
1985 /* fallthru */
1986 case PHY_INTERFACE_MODE_MII:
1987 phydev->supported &= (PHY_BASIC_FEATURES |
1988 SUPPORTED_Pause |
1989 SUPPORTED_Asym_Pause);
1990 break;
1991 default:
1992 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1993 return -EINVAL;
1996 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1998 phydev->advertising = phydev->supported;
2000 return 0;
2003 static void tg3_phy_start(struct tg3 *tp)
2005 struct phy_device *phydev;
2007 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2008 return;
2010 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2012 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2013 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2014 phydev->speed = tp->link_config.speed;
2015 phydev->duplex = tp->link_config.duplex;
2016 phydev->autoneg = tp->link_config.autoneg;
2017 phydev->advertising = tp->link_config.advertising;
2020 phy_start(phydev);
2022 phy_start_aneg(phydev);
2025 static void tg3_phy_stop(struct tg3 *tp)
2027 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2028 return;
2030 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2033 static void tg3_phy_fini(struct tg3 *tp)
2035 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2036 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2037 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2041 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2043 int err;
2044 u32 val;
2046 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2047 return 0;
2049 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2050 /* Cannot do read-modify-write on 5401 */
2051 err = tg3_phy_auxctl_write(tp,
2052 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2053 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2054 0x4c20);
2055 goto done;
2058 err = tg3_phy_auxctl_read(tp,
2059 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2060 if (err)
2061 return err;
2063 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2064 err = tg3_phy_auxctl_write(tp,
2065 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2067 done:
2068 return err;
2071 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2073 u32 phytest;
2075 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2076 u32 phy;
2078 tg3_writephy(tp, MII_TG3_FET_TEST,
2079 phytest | MII_TG3_FET_SHADOW_EN);
2080 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2081 if (enable)
2082 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2083 else
2084 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2085 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2087 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2091 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2093 u32 reg;
2095 if (!tg3_flag(tp, 5705_PLUS) ||
2096 (tg3_flag(tp, 5717_PLUS) &&
2097 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2098 return;
2100 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2101 tg3_phy_fet_toggle_apd(tp, enable);
2102 return;
2105 reg = MII_TG3_MISC_SHDW_WREN |
2106 MII_TG3_MISC_SHDW_SCR5_SEL |
2107 MII_TG3_MISC_SHDW_SCR5_LPED |
2108 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2109 MII_TG3_MISC_SHDW_SCR5_SDTL |
2110 MII_TG3_MISC_SHDW_SCR5_C125OE;
2111 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2112 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2114 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2117 reg = MII_TG3_MISC_SHDW_WREN |
2118 MII_TG3_MISC_SHDW_APD_SEL |
2119 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2120 if (enable)
2121 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2123 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2126 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2128 u32 phy;
2130 if (!tg3_flag(tp, 5705_PLUS) ||
2131 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2132 return;
2134 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2135 u32 ephy;
2137 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2138 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2140 tg3_writephy(tp, MII_TG3_FET_TEST,
2141 ephy | MII_TG3_FET_SHADOW_EN);
2142 if (!tg3_readphy(tp, reg, &phy)) {
2143 if (enable)
2144 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2145 else
2146 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2147 tg3_writephy(tp, reg, phy);
2149 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2151 } else {
2152 int ret;
2154 ret = tg3_phy_auxctl_read(tp,
2155 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2156 if (!ret) {
2157 if (enable)
2158 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2159 else
2160 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2161 tg3_phy_auxctl_write(tp,
2162 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2167 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2169 int ret;
2170 u32 val;
2172 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2173 return;
2175 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2176 if (!ret)
2177 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2178 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2181 static void tg3_phy_apply_otp(struct tg3 *tp)
2183 u32 otp, phy;
2185 if (!tp->phy_otp)
2186 return;
2188 otp = tp->phy_otp;
2190 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2191 return;
2193 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2194 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2195 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2197 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2198 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2199 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2201 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2202 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2203 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2205 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2206 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2208 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2209 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2211 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2212 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2213 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2215 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2218 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2220 u32 val;
2222 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2223 return;
2225 tp->setlpicnt = 0;
2227 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2228 current_link_up == 1 &&
2229 tp->link_config.active_duplex == DUPLEX_FULL &&
2230 (tp->link_config.active_speed == SPEED_100 ||
2231 tp->link_config.active_speed == SPEED_1000)) {
2232 u32 eeectl;
2234 if (tp->link_config.active_speed == SPEED_1000)
2235 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2236 else
2237 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2239 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2241 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2242 TG3_CL45_D7_EEERES_STAT, &val);
2244 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2245 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2246 tp->setlpicnt = 2;
2249 if (!tp->setlpicnt) {
2250 if (current_link_up == 1 &&
2251 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2252 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2253 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2256 val = tr32(TG3_CPMU_EEE_MODE);
2257 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2261 static void tg3_phy_eee_enable(struct tg3 *tp)
2263 u32 val;
2265 if (tp->link_config.active_speed == SPEED_1000 &&
2266 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2268 tg3_flag(tp, 57765_CLASS)) &&
2269 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2270 val = MII_TG3_DSP_TAP26_ALNOKO |
2271 MII_TG3_DSP_TAP26_RMRXSTO;
2272 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2273 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2276 val = tr32(TG3_CPMU_EEE_MODE);
2277 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2280 static int tg3_wait_macro_done(struct tg3 *tp)
2282 int limit = 100;
2284 while (limit--) {
2285 u32 tmp32;
2287 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2288 if ((tmp32 & 0x1000) == 0)
2289 break;
2292 if (limit < 0)
2293 return -EBUSY;
2295 return 0;
2298 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2300 static const u32 test_pat[4][6] = {
2301 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2302 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2303 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2304 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2306 int chan;
2308 for (chan = 0; chan < 4; chan++) {
2309 int i;
2311 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2312 (chan * 0x2000) | 0x0200);
2313 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2315 for (i = 0; i < 6; i++)
2316 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2317 test_pat[chan][i]);
2319 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2320 if (tg3_wait_macro_done(tp)) {
2321 *resetp = 1;
2322 return -EBUSY;
2325 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2326 (chan * 0x2000) | 0x0200);
2327 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2328 if (tg3_wait_macro_done(tp)) {
2329 *resetp = 1;
2330 return -EBUSY;
2333 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2334 if (tg3_wait_macro_done(tp)) {
2335 *resetp = 1;
2336 return -EBUSY;
2339 for (i = 0; i < 6; i += 2) {
2340 u32 low, high;
2342 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2343 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2344 tg3_wait_macro_done(tp)) {
2345 *resetp = 1;
2346 return -EBUSY;
2348 low &= 0x7fff;
2349 high &= 0x000f;
2350 if (low != test_pat[chan][i] ||
2351 high != test_pat[chan][i+1]) {
2352 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2353 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2354 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2356 return -EBUSY;
2361 return 0;
2364 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2366 int chan;
2368 for (chan = 0; chan < 4; chan++) {
2369 int i;
2371 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2372 (chan * 0x2000) | 0x0200);
2373 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2374 for (i = 0; i < 6; i++)
2375 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2376 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2377 if (tg3_wait_macro_done(tp))
2378 return -EBUSY;
2381 return 0;
2384 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2386 u32 reg32, phy9_orig;
2387 int retries, do_phy_reset, err;
2389 retries = 10;
2390 do_phy_reset = 1;
2391 do {
2392 if (do_phy_reset) {
2393 err = tg3_bmcr_reset(tp);
2394 if (err)
2395 return err;
2396 do_phy_reset = 0;
2399 /* Disable transmitter and interrupt. */
2400 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2401 continue;
2403 reg32 |= 0x3000;
2404 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2406 /* Set full-duplex, 1000 mbps. */
2407 tg3_writephy(tp, MII_BMCR,
2408 BMCR_FULLDPLX | BMCR_SPEED1000);
2410 /* Set to master mode. */
2411 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2412 continue;
2414 tg3_writephy(tp, MII_CTRL1000,
2415 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2417 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2418 if (err)
2419 return err;
2421 /* Block the PHY control access. */
2422 tg3_phydsp_write(tp, 0x8005, 0x0800);
2424 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2425 if (!err)
2426 break;
2427 } while (--retries);
2429 err = tg3_phy_reset_chanpat(tp);
2430 if (err)
2431 return err;
2433 tg3_phydsp_write(tp, 0x8005, 0x0000);
2435 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2436 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2438 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2440 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2442 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2443 reg32 &= ~0x3000;
2444 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2445 } else if (!err)
2446 err = -EBUSY;
2448 return err;
2451 /* This will reset the tigon3 PHY if there is no valid
2452 * link unless the FORCE argument is non-zero.
2454 static int tg3_phy_reset(struct tg3 *tp)
2456 u32 val, cpmuctrl;
2457 int err;
2459 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2460 val = tr32(GRC_MISC_CFG);
2461 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2462 udelay(40);
2464 err = tg3_readphy(tp, MII_BMSR, &val);
2465 err |= tg3_readphy(tp, MII_BMSR, &val);
2466 if (err != 0)
2467 return -EBUSY;
2469 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2470 netif_carrier_off(tp->dev);
2471 tg3_link_report(tp);
2474 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2477 err = tg3_phy_reset_5703_4_5(tp);
2478 if (err)
2479 return err;
2480 goto out;
2483 cpmuctrl = 0;
2484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2485 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2486 cpmuctrl = tr32(TG3_CPMU_CTRL);
2487 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2488 tw32(TG3_CPMU_CTRL,
2489 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2492 err = tg3_bmcr_reset(tp);
2493 if (err)
2494 return err;
2496 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2497 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2498 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2500 tw32(TG3_CPMU_CTRL, cpmuctrl);
2503 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2504 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2505 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2506 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2507 CPMU_LSPD_1000MB_MACCLK_12_5) {
2508 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2509 udelay(40);
2510 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2514 if (tg3_flag(tp, 5717_PLUS) &&
2515 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2516 return 0;
2518 tg3_phy_apply_otp(tp);
2520 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2521 tg3_phy_toggle_apd(tp, true);
2522 else
2523 tg3_phy_toggle_apd(tp, false);
2525 out:
2526 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2527 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2528 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2529 tg3_phydsp_write(tp, 0x000a, 0x0323);
2530 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2533 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2534 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2535 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2538 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2539 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2540 tg3_phydsp_write(tp, 0x000a, 0x310b);
2541 tg3_phydsp_write(tp, 0x201f, 0x9506);
2542 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2543 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2545 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2546 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2547 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2548 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2549 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2550 tg3_writephy(tp, MII_TG3_TEST1,
2551 MII_TG3_TEST1_TRIM_EN | 0x4);
2552 } else
2553 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2555 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2559 /* Set Extended packet length bit (bit 14) on all chips that */
2560 /* support jumbo frames */
2561 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2562 /* Cannot do read-modify-write on 5401 */
2563 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2564 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2565 /* Set bit 14 with read-modify-write to preserve other bits */
2566 err = tg3_phy_auxctl_read(tp,
2567 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2568 if (!err)
2569 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2570 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2573 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2574 * jumbo frames transmission.
2576 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2577 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2578 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2579 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2583 /* adjust output voltage */
2584 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2587 tg3_phy_toggle_automdix(tp, 1);
2588 tg3_phy_set_wirespeed(tp);
2589 return 0;
2592 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2593 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2594 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2595 TG3_GPIO_MSG_NEED_VAUX)
2596 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2597 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2598 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2599 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2600 (TG3_GPIO_MSG_DRVR_PRES << 12))
2602 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2603 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2604 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2605 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2606 (TG3_GPIO_MSG_NEED_VAUX << 12))
2608 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2610 u32 status, shift;
2612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2614 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2615 else
2616 status = tr32(TG3_CPMU_DRV_STATUS);
2618 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2619 status &= ~(TG3_GPIO_MSG_MASK << shift);
2620 status |= (newstat << shift);
2622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2624 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2625 else
2626 tw32(TG3_CPMU_DRV_STATUS, status);
2628 return status >> TG3_APE_GPIO_MSG_SHIFT;
2631 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2633 if (!tg3_flag(tp, IS_NIC))
2634 return 0;
2636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2639 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2640 return -EIO;
2642 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2644 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2645 TG3_GRC_LCLCTL_PWRSW_DELAY);
2647 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2648 } else {
2649 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2650 TG3_GRC_LCLCTL_PWRSW_DELAY);
2653 return 0;
2656 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2658 u32 grc_local_ctrl;
2660 if (!tg3_flag(tp, IS_NIC) ||
2661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2663 return;
2665 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2667 tw32_wait_f(GRC_LOCAL_CTRL,
2668 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2669 TG3_GRC_LCLCTL_PWRSW_DELAY);
2671 tw32_wait_f(GRC_LOCAL_CTRL,
2672 grc_local_ctrl,
2673 TG3_GRC_LCLCTL_PWRSW_DELAY);
2675 tw32_wait_f(GRC_LOCAL_CTRL,
2676 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2677 TG3_GRC_LCLCTL_PWRSW_DELAY);
2680 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2682 if (!tg3_flag(tp, IS_NIC))
2683 return;
2685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2687 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2688 (GRC_LCLCTRL_GPIO_OE0 |
2689 GRC_LCLCTRL_GPIO_OE1 |
2690 GRC_LCLCTRL_GPIO_OE2 |
2691 GRC_LCLCTRL_GPIO_OUTPUT0 |
2692 GRC_LCLCTRL_GPIO_OUTPUT1),
2693 TG3_GRC_LCLCTL_PWRSW_DELAY);
2694 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2695 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2696 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2697 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2698 GRC_LCLCTRL_GPIO_OE1 |
2699 GRC_LCLCTRL_GPIO_OE2 |
2700 GRC_LCLCTRL_GPIO_OUTPUT0 |
2701 GRC_LCLCTRL_GPIO_OUTPUT1 |
2702 tp->grc_local_ctrl;
2703 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2704 TG3_GRC_LCLCTL_PWRSW_DELAY);
2706 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2707 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2708 TG3_GRC_LCLCTL_PWRSW_DELAY);
2710 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2711 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2712 TG3_GRC_LCLCTL_PWRSW_DELAY);
2713 } else {
2714 u32 no_gpio2;
2715 u32 grc_local_ctrl = 0;
2717 /* Workaround to prevent overdrawing Amps. */
2718 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2719 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2720 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2721 grc_local_ctrl,
2722 TG3_GRC_LCLCTL_PWRSW_DELAY);
2725 /* On 5753 and variants, GPIO2 cannot be used. */
2726 no_gpio2 = tp->nic_sram_data_cfg &
2727 NIC_SRAM_DATA_CFG_NO_GPIO2;
2729 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2730 GRC_LCLCTRL_GPIO_OE1 |
2731 GRC_LCLCTRL_GPIO_OE2 |
2732 GRC_LCLCTRL_GPIO_OUTPUT1 |
2733 GRC_LCLCTRL_GPIO_OUTPUT2;
2734 if (no_gpio2) {
2735 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2736 GRC_LCLCTRL_GPIO_OUTPUT2);
2738 tw32_wait_f(GRC_LOCAL_CTRL,
2739 tp->grc_local_ctrl | grc_local_ctrl,
2740 TG3_GRC_LCLCTL_PWRSW_DELAY);
2742 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2744 tw32_wait_f(GRC_LOCAL_CTRL,
2745 tp->grc_local_ctrl | grc_local_ctrl,
2746 TG3_GRC_LCLCTL_PWRSW_DELAY);
2748 if (!no_gpio2) {
2749 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2750 tw32_wait_f(GRC_LOCAL_CTRL,
2751 tp->grc_local_ctrl | grc_local_ctrl,
2752 TG3_GRC_LCLCTL_PWRSW_DELAY);
2757 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2759 u32 msg = 0;
2761 /* Serialize power state transitions */
2762 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2763 return;
2765 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2766 msg = TG3_GPIO_MSG_NEED_VAUX;
2768 msg = tg3_set_function_status(tp, msg);
2770 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2771 goto done;
2773 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2774 tg3_pwrsrc_switch_to_vaux(tp);
2775 else
2776 tg3_pwrsrc_die_with_vmain(tp);
2778 done:
2779 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2782 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2784 bool need_vaux = false;
2786 /* The GPIOs do something completely different on 57765. */
2787 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2788 return;
2790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2793 tg3_frob_aux_power_5717(tp, include_wol ?
2794 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2795 return;
2798 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2799 struct net_device *dev_peer;
2801 dev_peer = pci_get_drvdata(tp->pdev_peer);
2803 /* remove_one() may have been run on the peer. */
2804 if (dev_peer) {
2805 struct tg3 *tp_peer = netdev_priv(dev_peer);
2807 if (tg3_flag(tp_peer, INIT_COMPLETE))
2808 return;
2810 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2811 tg3_flag(tp_peer, ENABLE_ASF))
2812 need_vaux = true;
2816 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2817 tg3_flag(tp, ENABLE_ASF))
2818 need_vaux = true;
2820 if (need_vaux)
2821 tg3_pwrsrc_switch_to_vaux(tp);
2822 else
2823 tg3_pwrsrc_die_with_vmain(tp);
2826 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2828 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2829 return 1;
2830 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2831 if (speed != SPEED_10)
2832 return 1;
2833 } else if (speed == SPEED_10)
2834 return 1;
2836 return 0;
2839 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2841 u32 val;
2843 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2845 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2846 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2848 sg_dig_ctrl |=
2849 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2850 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2851 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2853 return;
2856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2857 tg3_bmcr_reset(tp);
2858 val = tr32(GRC_MISC_CFG);
2859 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2860 udelay(40);
2861 return;
2862 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2863 u32 phytest;
2864 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2865 u32 phy;
2867 tg3_writephy(tp, MII_ADVERTISE, 0);
2868 tg3_writephy(tp, MII_BMCR,
2869 BMCR_ANENABLE | BMCR_ANRESTART);
2871 tg3_writephy(tp, MII_TG3_FET_TEST,
2872 phytest | MII_TG3_FET_SHADOW_EN);
2873 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2874 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2875 tg3_writephy(tp,
2876 MII_TG3_FET_SHDW_AUXMODE4,
2877 phy);
2879 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2881 return;
2882 } else if (do_low_power) {
2883 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2884 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2886 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2887 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2888 MII_TG3_AUXCTL_PCTL_VREG_11V;
2889 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2892 /* The PHY should not be powered down on some chips because
2893 * of bugs.
2895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2896 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2897 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2898 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2899 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2900 !tp->pci_fn))
2901 return;
2903 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2904 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2905 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2906 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2907 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2908 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2911 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2914 /* tp->lock is held. */
2915 static int tg3_nvram_lock(struct tg3 *tp)
2917 if (tg3_flag(tp, NVRAM)) {
2918 int i;
2920 if (tp->nvram_lock_cnt == 0) {
2921 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2922 for (i = 0; i < 8000; i++) {
2923 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2924 break;
2925 udelay(20);
2927 if (i == 8000) {
2928 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2929 return -ENODEV;
2932 tp->nvram_lock_cnt++;
2934 return 0;
2937 /* tp->lock is held. */
2938 static void tg3_nvram_unlock(struct tg3 *tp)
2940 if (tg3_flag(tp, NVRAM)) {
2941 if (tp->nvram_lock_cnt > 0)
2942 tp->nvram_lock_cnt--;
2943 if (tp->nvram_lock_cnt == 0)
2944 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2948 /* tp->lock is held. */
2949 static void tg3_enable_nvram_access(struct tg3 *tp)
2951 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2952 u32 nvaccess = tr32(NVRAM_ACCESS);
2954 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2958 /* tp->lock is held. */
2959 static void tg3_disable_nvram_access(struct tg3 *tp)
2961 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2962 u32 nvaccess = tr32(NVRAM_ACCESS);
2964 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2968 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2969 u32 offset, u32 *val)
2971 u32 tmp;
2972 int i;
2974 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2975 return -EINVAL;
2977 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2978 EEPROM_ADDR_DEVID_MASK |
2979 EEPROM_ADDR_READ);
2980 tw32(GRC_EEPROM_ADDR,
2981 tmp |
2982 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2983 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2984 EEPROM_ADDR_ADDR_MASK) |
2985 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2987 for (i = 0; i < 1000; i++) {
2988 tmp = tr32(GRC_EEPROM_ADDR);
2990 if (tmp & EEPROM_ADDR_COMPLETE)
2991 break;
2992 msleep(1);
2994 if (!(tmp & EEPROM_ADDR_COMPLETE))
2995 return -EBUSY;
2997 tmp = tr32(GRC_EEPROM_DATA);
3000 * The data will always be opposite the native endian
3001 * format. Perform a blind byteswap to compensate.
3003 *val = swab32(tmp);
3005 return 0;
3008 #define NVRAM_CMD_TIMEOUT 10000
3010 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3012 int i;
3014 tw32(NVRAM_CMD, nvram_cmd);
3015 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3016 udelay(10);
3017 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3018 udelay(10);
3019 break;
3023 if (i == NVRAM_CMD_TIMEOUT)
3024 return -EBUSY;
3026 return 0;
3029 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3031 if (tg3_flag(tp, NVRAM) &&
3032 tg3_flag(tp, NVRAM_BUFFERED) &&
3033 tg3_flag(tp, FLASH) &&
3034 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3035 (tp->nvram_jedecnum == JEDEC_ATMEL))
3037 addr = ((addr / tp->nvram_pagesize) <<
3038 ATMEL_AT45DB0X1B_PAGE_POS) +
3039 (addr % tp->nvram_pagesize);
3041 return addr;
3044 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3046 if (tg3_flag(tp, NVRAM) &&
3047 tg3_flag(tp, NVRAM_BUFFERED) &&
3048 tg3_flag(tp, FLASH) &&
3049 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3050 (tp->nvram_jedecnum == JEDEC_ATMEL))
3052 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3053 tp->nvram_pagesize) +
3054 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3056 return addr;
3059 /* NOTE: Data read in from NVRAM is byteswapped according to
3060 * the byteswapping settings for all other register accesses.
3061 * tg3 devices are BE devices, so on a BE machine, the data
3062 * returned will be exactly as it is seen in NVRAM. On a LE
3063 * machine, the 32-bit value will be byteswapped.
3065 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3067 int ret;
3069 if (!tg3_flag(tp, NVRAM))
3070 return tg3_nvram_read_using_eeprom(tp, offset, val);
3072 offset = tg3_nvram_phys_addr(tp, offset);
3074 if (offset > NVRAM_ADDR_MSK)
3075 return -EINVAL;
3077 ret = tg3_nvram_lock(tp);
3078 if (ret)
3079 return ret;
3081 tg3_enable_nvram_access(tp);
3083 tw32(NVRAM_ADDR, offset);
3084 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3085 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3087 if (ret == 0)
3088 *val = tr32(NVRAM_RDDATA);
3090 tg3_disable_nvram_access(tp);
3092 tg3_nvram_unlock(tp);
3094 return ret;
3097 /* Ensures NVRAM data is in bytestream format. */
3098 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3100 u32 v;
3101 int res = tg3_nvram_read(tp, offset, &v);
3102 if (!res)
3103 *val = cpu_to_be32(v);
3104 return res;
3107 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3108 u32 offset, u32 len, u8 *buf)
3110 int i, j, rc = 0;
3111 u32 val;
3113 for (i = 0; i < len; i += 4) {
3114 u32 addr;
3115 __be32 data;
3117 addr = offset + i;
3119 memcpy(&data, buf + i, 4);
3122 * The SEEPROM interface expects the data to always be opposite
3123 * the native endian format. We accomplish this by reversing
3124 * all the operations that would have been performed on the
3125 * data from a call to tg3_nvram_read_be32().
3127 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3129 val = tr32(GRC_EEPROM_ADDR);
3130 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3132 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3133 EEPROM_ADDR_READ);
3134 tw32(GRC_EEPROM_ADDR, val |
3135 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3136 (addr & EEPROM_ADDR_ADDR_MASK) |
3137 EEPROM_ADDR_START |
3138 EEPROM_ADDR_WRITE);
3140 for (j = 0; j < 1000; j++) {
3141 val = tr32(GRC_EEPROM_ADDR);
3143 if (val & EEPROM_ADDR_COMPLETE)
3144 break;
3145 msleep(1);
3147 if (!(val & EEPROM_ADDR_COMPLETE)) {
3148 rc = -EBUSY;
3149 break;
3153 return rc;
3156 /* offset and length are dword aligned */
3157 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3158 u8 *buf)
3160 int ret = 0;
3161 u32 pagesize = tp->nvram_pagesize;
3162 u32 pagemask = pagesize - 1;
3163 u32 nvram_cmd;
3164 u8 *tmp;
3166 tmp = kmalloc(pagesize, GFP_KERNEL);
3167 if (tmp == NULL)
3168 return -ENOMEM;
3170 while (len) {
3171 int j;
3172 u32 phy_addr, page_off, size;
3174 phy_addr = offset & ~pagemask;
3176 for (j = 0; j < pagesize; j += 4) {
3177 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3178 (__be32 *) (tmp + j));
3179 if (ret)
3180 break;
3182 if (ret)
3183 break;
3185 page_off = offset & pagemask;
3186 size = pagesize;
3187 if (len < size)
3188 size = len;
3190 len -= size;
3192 memcpy(tmp + page_off, buf, size);
3194 offset = offset + (pagesize - page_off);
3196 tg3_enable_nvram_access(tp);
3199 * Before we can erase the flash page, we need
3200 * to issue a special "write enable" command.
3202 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3204 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3205 break;
3207 /* Erase the target page */
3208 tw32(NVRAM_ADDR, phy_addr);
3210 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3211 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3213 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3214 break;
3216 /* Issue another write enable to start the write. */
3217 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3219 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3220 break;
3222 for (j = 0; j < pagesize; j += 4) {
3223 __be32 data;
3225 data = *((__be32 *) (tmp + j));
3227 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3229 tw32(NVRAM_ADDR, phy_addr + j);
3231 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3232 NVRAM_CMD_WR;
3234 if (j == 0)
3235 nvram_cmd |= NVRAM_CMD_FIRST;
3236 else if (j == (pagesize - 4))
3237 nvram_cmd |= NVRAM_CMD_LAST;
3239 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3240 if (ret)
3241 break;
3243 if (ret)
3244 break;
3247 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3248 tg3_nvram_exec_cmd(tp, nvram_cmd);
3250 kfree(tmp);
3252 return ret;
3255 /* offset and length are dword aligned */
3256 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3257 u8 *buf)
3259 int i, ret = 0;
3261 for (i = 0; i < len; i += 4, offset += 4) {
3262 u32 page_off, phy_addr, nvram_cmd;
3263 __be32 data;
3265 memcpy(&data, buf + i, 4);
3266 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3268 page_off = offset % tp->nvram_pagesize;
3270 phy_addr = tg3_nvram_phys_addr(tp, offset);
3272 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3274 if (page_off == 0 || i == 0)
3275 nvram_cmd |= NVRAM_CMD_FIRST;
3276 if (page_off == (tp->nvram_pagesize - 4))
3277 nvram_cmd |= NVRAM_CMD_LAST;
3279 if (i == (len - 4))
3280 nvram_cmd |= NVRAM_CMD_LAST;
3282 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3283 !tg3_flag(tp, FLASH) ||
3284 !tg3_flag(tp, 57765_PLUS))
3285 tw32(NVRAM_ADDR, phy_addr);
3287 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3288 !tg3_flag(tp, 5755_PLUS) &&
3289 (tp->nvram_jedecnum == JEDEC_ST) &&
3290 (nvram_cmd & NVRAM_CMD_FIRST)) {
3291 u32 cmd;
3293 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3294 ret = tg3_nvram_exec_cmd(tp, cmd);
3295 if (ret)
3296 break;
3298 if (!tg3_flag(tp, FLASH)) {
3299 /* We always do complete word writes to eeprom. */
3300 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3303 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3304 if (ret)
3305 break;
3307 return ret;
3310 /* offset and length are dword aligned */
3311 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3313 int ret;
3315 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3316 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3317 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3318 udelay(40);
3321 if (!tg3_flag(tp, NVRAM)) {
3322 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3323 } else {
3324 u32 grc_mode;
3326 ret = tg3_nvram_lock(tp);
3327 if (ret)
3328 return ret;
3330 tg3_enable_nvram_access(tp);
3331 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3332 tw32(NVRAM_WRITE1, 0x406);
3334 grc_mode = tr32(GRC_MODE);
3335 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3337 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3338 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3339 buf);
3340 } else {
3341 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3342 buf);
3345 grc_mode = tr32(GRC_MODE);
3346 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3348 tg3_disable_nvram_access(tp);
3349 tg3_nvram_unlock(tp);
3352 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3353 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3354 udelay(40);
3357 return ret;
3360 #define RX_CPU_SCRATCH_BASE 0x30000
3361 #define RX_CPU_SCRATCH_SIZE 0x04000
3362 #define TX_CPU_SCRATCH_BASE 0x34000
3363 #define TX_CPU_SCRATCH_SIZE 0x04000
3365 /* tp->lock is held. */
3366 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3368 int i;
3370 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3372 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3373 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3375 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3376 return 0;
3378 if (offset == RX_CPU_BASE) {
3379 for (i = 0; i < 10000; i++) {
3380 tw32(offset + CPU_STATE, 0xffffffff);
3381 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3382 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3383 break;
3386 tw32(offset + CPU_STATE, 0xffffffff);
3387 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3388 udelay(10);
3389 } else {
3390 for (i = 0; i < 10000; i++) {
3391 tw32(offset + CPU_STATE, 0xffffffff);
3392 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3393 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3394 break;
3398 if (i >= 10000) {
3399 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3400 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3401 return -ENODEV;
3404 /* Clear firmware's nvram arbitration. */
3405 if (tg3_flag(tp, NVRAM))
3406 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3407 return 0;
3410 struct fw_info {
3411 unsigned int fw_base;
3412 unsigned int fw_len;
3413 const __be32 *fw_data;
3416 /* tp->lock is held. */
3417 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3418 u32 cpu_scratch_base, int cpu_scratch_size,
3419 struct fw_info *info)
3421 int err, lock_err, i;
3422 void (*write_op)(struct tg3 *, u32, u32);
3424 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3425 netdev_err(tp->dev,
3426 "%s: Trying to load TX cpu firmware which is 5705\n",
3427 __func__);
3428 return -EINVAL;
3431 if (tg3_flag(tp, 5705_PLUS))
3432 write_op = tg3_write_mem;
3433 else
3434 write_op = tg3_write_indirect_reg32;
3436 /* It is possible that bootcode is still loading at this point.
3437 * Get the nvram lock first before halting the cpu.
3439 lock_err = tg3_nvram_lock(tp);
3440 err = tg3_halt_cpu(tp, cpu_base);
3441 if (!lock_err)
3442 tg3_nvram_unlock(tp);
3443 if (err)
3444 goto out;
3446 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3447 write_op(tp, cpu_scratch_base + i, 0);
3448 tw32(cpu_base + CPU_STATE, 0xffffffff);
3449 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3450 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3451 write_op(tp, (cpu_scratch_base +
3452 (info->fw_base & 0xffff) +
3453 (i * sizeof(u32))),
3454 be32_to_cpu(info->fw_data[i]));
3456 err = 0;
3458 out:
3459 return err;
3462 /* tp->lock is held. */
3463 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3465 struct fw_info info;
3466 const __be32 *fw_data;
3467 int err, i;
3469 fw_data = (void *)tp->fw->data;
3471 /* Firmware blob starts with version numbers, followed by
3472 start address and length. We are setting complete length.
3473 length = end_address_of_bss - start_address_of_text.
3474 Remainder is the blob to be loaded contiguously
3475 from start address. */
3477 info.fw_base = be32_to_cpu(fw_data[1]);
3478 info.fw_len = tp->fw->size - 12;
3479 info.fw_data = &fw_data[3];
3481 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3482 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3483 &info);
3484 if (err)
3485 return err;
3487 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3488 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3489 &info);
3490 if (err)
3491 return err;
3493 /* Now startup only the RX cpu. */
3494 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3495 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3497 for (i = 0; i < 5; i++) {
3498 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3499 break;
3500 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3501 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3502 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3503 udelay(1000);
3505 if (i >= 5) {
3506 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3507 "should be %08x\n", __func__,
3508 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3509 return -ENODEV;
3511 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3512 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3514 return 0;
3517 /* tp->lock is held. */
3518 static int tg3_load_tso_firmware(struct tg3 *tp)
3520 struct fw_info info;
3521 const __be32 *fw_data;
3522 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3523 int err, i;
3525 if (tg3_flag(tp, HW_TSO_1) ||
3526 tg3_flag(tp, HW_TSO_2) ||
3527 tg3_flag(tp, HW_TSO_3))
3528 return 0;
3530 fw_data = (void *)tp->fw->data;
3532 /* Firmware blob starts with version numbers, followed by
3533 start address and length. We are setting complete length.
3534 length = end_address_of_bss - start_address_of_text.
3535 Remainder is the blob to be loaded contiguously
3536 from start address. */
3538 info.fw_base = be32_to_cpu(fw_data[1]);
3539 cpu_scratch_size = tp->fw_len;
3540 info.fw_len = tp->fw->size - 12;
3541 info.fw_data = &fw_data[3];
3543 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3544 cpu_base = RX_CPU_BASE;
3545 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3546 } else {
3547 cpu_base = TX_CPU_BASE;
3548 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3549 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3552 err = tg3_load_firmware_cpu(tp, cpu_base,
3553 cpu_scratch_base, cpu_scratch_size,
3554 &info);
3555 if (err)
3556 return err;
3558 /* Now startup the cpu. */
3559 tw32(cpu_base + CPU_STATE, 0xffffffff);
3560 tw32_f(cpu_base + CPU_PC, info.fw_base);
3562 for (i = 0; i < 5; i++) {
3563 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3564 break;
3565 tw32(cpu_base + CPU_STATE, 0xffffffff);
3566 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3567 tw32_f(cpu_base + CPU_PC, info.fw_base);
3568 udelay(1000);
3570 if (i >= 5) {
3571 netdev_err(tp->dev,
3572 "%s fails to set CPU PC, is %08x should be %08x\n",
3573 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3574 return -ENODEV;
3576 tw32(cpu_base + CPU_STATE, 0xffffffff);
3577 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3578 return 0;
3582 /* tp->lock is held. */
3583 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3585 u32 addr_high, addr_low;
3586 int i;
3588 addr_high = ((tp->dev->dev_addr[0] << 8) |
3589 tp->dev->dev_addr[1]);
3590 addr_low = ((tp->dev->dev_addr[2] << 24) |
3591 (tp->dev->dev_addr[3] << 16) |
3592 (tp->dev->dev_addr[4] << 8) |
3593 (tp->dev->dev_addr[5] << 0));
3594 for (i = 0; i < 4; i++) {
3595 if (i == 1 && skip_mac_1)
3596 continue;
3597 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3598 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3602 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3603 for (i = 0; i < 12; i++) {
3604 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3605 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3609 addr_high = (tp->dev->dev_addr[0] +
3610 tp->dev->dev_addr[1] +
3611 tp->dev->dev_addr[2] +
3612 tp->dev->dev_addr[3] +
3613 tp->dev->dev_addr[4] +
3614 tp->dev->dev_addr[5]) &
3615 TX_BACKOFF_SEED_MASK;
3616 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3619 static void tg3_enable_register_access(struct tg3 *tp)
3622 * Make sure register accesses (indirect or otherwise) will function
3623 * correctly.
3625 pci_write_config_dword(tp->pdev,
3626 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3629 static int tg3_power_up(struct tg3 *tp)
3631 int err;
3633 tg3_enable_register_access(tp);
3635 err = pci_set_power_state(tp->pdev, PCI_D0);
3636 if (!err) {
3637 /* Switch out of Vaux if it is a NIC */
3638 tg3_pwrsrc_switch_to_vmain(tp);
3639 } else {
3640 netdev_err(tp->dev, "Transition to D0 failed\n");
3643 return err;
3646 static int tg3_setup_phy(struct tg3 *, int);
3648 static int tg3_power_down_prepare(struct tg3 *tp)
3650 u32 misc_host_ctrl;
3651 bool device_should_wake, do_low_power;
3653 tg3_enable_register_access(tp);
3655 /* Restore the CLKREQ setting. */
3656 if (tg3_flag(tp, CLKREQ_BUG)) {
3657 u16 lnkctl;
3659 pci_read_config_word(tp->pdev,
3660 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3661 &lnkctl);
3662 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3663 pci_write_config_word(tp->pdev,
3664 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3665 lnkctl);
3668 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3669 tw32(TG3PCI_MISC_HOST_CTRL,
3670 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3672 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3673 tg3_flag(tp, WOL_ENABLE);
3675 if (tg3_flag(tp, USE_PHYLIB)) {
3676 do_low_power = false;
3677 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3678 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3679 struct phy_device *phydev;
3680 u32 phyid, advertising;
3682 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3684 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3686 tp->link_config.speed = phydev->speed;
3687 tp->link_config.duplex = phydev->duplex;
3688 tp->link_config.autoneg = phydev->autoneg;
3689 tp->link_config.advertising = phydev->advertising;
3691 advertising = ADVERTISED_TP |
3692 ADVERTISED_Pause |
3693 ADVERTISED_Autoneg |
3694 ADVERTISED_10baseT_Half;
3696 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3697 if (tg3_flag(tp, WOL_SPEED_100MB))
3698 advertising |=
3699 ADVERTISED_100baseT_Half |
3700 ADVERTISED_100baseT_Full |
3701 ADVERTISED_10baseT_Full;
3702 else
3703 advertising |= ADVERTISED_10baseT_Full;
3706 phydev->advertising = advertising;
3708 phy_start_aneg(phydev);
3710 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3711 if (phyid != PHY_ID_BCMAC131) {
3712 phyid &= PHY_BCM_OUI_MASK;
3713 if (phyid == PHY_BCM_OUI_1 ||
3714 phyid == PHY_BCM_OUI_2 ||
3715 phyid == PHY_BCM_OUI_3)
3716 do_low_power = true;
3719 } else {
3720 do_low_power = true;
3722 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3723 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3725 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3726 tg3_setup_phy(tp, 0);
3729 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3730 u32 val;
3732 val = tr32(GRC_VCPU_EXT_CTRL);
3733 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3734 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3735 int i;
3736 u32 val;
3738 for (i = 0; i < 200; i++) {
3739 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3740 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3741 break;
3742 msleep(1);
3745 if (tg3_flag(tp, WOL_CAP))
3746 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3747 WOL_DRV_STATE_SHUTDOWN |
3748 WOL_DRV_WOL |
3749 WOL_SET_MAGIC_PKT);
3751 if (device_should_wake) {
3752 u32 mac_mode;
3754 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3755 if (do_low_power &&
3756 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3757 tg3_phy_auxctl_write(tp,
3758 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3759 MII_TG3_AUXCTL_PCTL_WOL_EN |
3760 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3761 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3762 udelay(40);
3765 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3766 mac_mode = MAC_MODE_PORT_MODE_GMII;
3767 else
3768 mac_mode = MAC_MODE_PORT_MODE_MII;
3770 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3771 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3772 ASIC_REV_5700) {
3773 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3774 SPEED_100 : SPEED_10;
3775 if (tg3_5700_link_polarity(tp, speed))
3776 mac_mode |= MAC_MODE_LINK_POLARITY;
3777 else
3778 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3780 } else {
3781 mac_mode = MAC_MODE_PORT_MODE_TBI;
3784 if (!tg3_flag(tp, 5750_PLUS))
3785 tw32(MAC_LED_CTRL, tp->led_ctrl);
3787 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3788 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3789 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3790 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3792 if (tg3_flag(tp, ENABLE_APE))
3793 mac_mode |= MAC_MODE_APE_TX_EN |
3794 MAC_MODE_APE_RX_EN |
3795 MAC_MODE_TDE_ENABLE;
3797 tw32_f(MAC_MODE, mac_mode);
3798 udelay(100);
3800 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3801 udelay(10);
3804 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3805 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3806 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3807 u32 base_val;
3809 base_val = tp->pci_clock_ctrl;
3810 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3811 CLOCK_CTRL_TXCLK_DISABLE);
3813 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3814 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3815 } else if (tg3_flag(tp, 5780_CLASS) ||
3816 tg3_flag(tp, CPMU_PRESENT) ||
3817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3818 /* do nothing */
3819 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3820 u32 newbits1, newbits2;
3822 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3824 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3825 CLOCK_CTRL_TXCLK_DISABLE |
3826 CLOCK_CTRL_ALTCLK);
3827 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3828 } else if (tg3_flag(tp, 5705_PLUS)) {
3829 newbits1 = CLOCK_CTRL_625_CORE;
3830 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3831 } else {
3832 newbits1 = CLOCK_CTRL_ALTCLK;
3833 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3836 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3837 40);
3839 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3840 40);
3842 if (!tg3_flag(tp, 5705_PLUS)) {
3843 u32 newbits3;
3845 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3847 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3848 CLOCK_CTRL_TXCLK_DISABLE |
3849 CLOCK_CTRL_44MHZ_CORE);
3850 } else {
3851 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3854 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3855 tp->pci_clock_ctrl | newbits3, 40);
3859 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3860 tg3_power_down_phy(tp, do_low_power);
3862 tg3_frob_aux_power(tp, true);
3864 /* Workaround for unstable PLL clock */
3865 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3866 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3867 u32 val = tr32(0x7d00);
3869 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3870 tw32(0x7d00, val);
3871 if (!tg3_flag(tp, ENABLE_ASF)) {
3872 int err;
3874 err = tg3_nvram_lock(tp);
3875 tg3_halt_cpu(tp, RX_CPU_BASE);
3876 if (!err)
3877 tg3_nvram_unlock(tp);
3881 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3883 return 0;
3886 static void tg3_power_down(struct tg3 *tp)
3888 tg3_power_down_prepare(tp);
3890 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3891 pci_set_power_state(tp->pdev, PCI_D3hot);
3894 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3896 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3897 case MII_TG3_AUX_STAT_10HALF:
3898 *speed = SPEED_10;
3899 *duplex = DUPLEX_HALF;
3900 break;
3902 case MII_TG3_AUX_STAT_10FULL:
3903 *speed = SPEED_10;
3904 *duplex = DUPLEX_FULL;
3905 break;
3907 case MII_TG3_AUX_STAT_100HALF:
3908 *speed = SPEED_100;
3909 *duplex = DUPLEX_HALF;
3910 break;
3912 case MII_TG3_AUX_STAT_100FULL:
3913 *speed = SPEED_100;
3914 *duplex = DUPLEX_FULL;
3915 break;
3917 case MII_TG3_AUX_STAT_1000HALF:
3918 *speed = SPEED_1000;
3919 *duplex = DUPLEX_HALF;
3920 break;
3922 case MII_TG3_AUX_STAT_1000FULL:
3923 *speed = SPEED_1000;
3924 *duplex = DUPLEX_FULL;
3925 break;
3927 default:
3928 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3929 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3930 SPEED_10;
3931 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3932 DUPLEX_HALF;
3933 break;
3935 *speed = SPEED_UNKNOWN;
3936 *duplex = DUPLEX_UNKNOWN;
3937 break;
3941 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3943 int err = 0;
3944 u32 val, new_adv;
3946 new_adv = ADVERTISE_CSMA;
3947 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3948 new_adv |= mii_advertise_flowctrl(flowctrl);
3950 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3951 if (err)
3952 goto done;
3954 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3955 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3957 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3958 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3959 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3961 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3962 if (err)
3963 goto done;
3966 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3967 goto done;
3969 tw32(TG3_CPMU_EEE_MODE,
3970 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3972 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3973 if (!err) {
3974 u32 err2;
3976 val = 0;
3977 /* Advertise 100-BaseTX EEE ability */
3978 if (advertise & ADVERTISED_100baseT_Full)
3979 val |= MDIO_AN_EEE_ADV_100TX;
3980 /* Advertise 1000-BaseT EEE ability */
3981 if (advertise & ADVERTISED_1000baseT_Full)
3982 val |= MDIO_AN_EEE_ADV_1000T;
3983 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3984 if (err)
3985 val = 0;
3987 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3988 case ASIC_REV_5717:
3989 case ASIC_REV_57765:
3990 case ASIC_REV_57766:
3991 case ASIC_REV_5719:
3992 /* If we advertised any eee advertisements above... */
3993 if (val)
3994 val = MII_TG3_DSP_TAP26_ALNOKO |
3995 MII_TG3_DSP_TAP26_RMRXSTO |
3996 MII_TG3_DSP_TAP26_OPCSINPT;
3997 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3998 /* Fall through */
3999 case ASIC_REV_5720:
4000 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4001 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4002 MII_TG3_DSP_CH34TP2_HIBW01);
4005 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4006 if (!err)
4007 err = err2;
4010 done:
4011 return err;
4014 static void tg3_phy_copper_begin(struct tg3 *tp)
4016 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4017 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4018 u32 adv, fc;
4020 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4021 adv = ADVERTISED_10baseT_Half |
4022 ADVERTISED_10baseT_Full;
4023 if (tg3_flag(tp, WOL_SPEED_100MB))
4024 adv |= ADVERTISED_100baseT_Half |
4025 ADVERTISED_100baseT_Full;
4027 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4028 } else {
4029 adv = tp->link_config.advertising;
4030 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4031 adv &= ~(ADVERTISED_1000baseT_Half |
4032 ADVERTISED_1000baseT_Full);
4034 fc = tp->link_config.flowctrl;
4037 tg3_phy_autoneg_cfg(tp, adv, fc);
4039 tg3_writephy(tp, MII_BMCR,
4040 BMCR_ANENABLE | BMCR_ANRESTART);
4041 } else {
4042 int i;
4043 u32 bmcr, orig_bmcr;
4045 tp->link_config.active_speed = tp->link_config.speed;
4046 tp->link_config.active_duplex = tp->link_config.duplex;
4048 bmcr = 0;
4049 switch (tp->link_config.speed) {
4050 default:
4051 case SPEED_10:
4052 break;
4054 case SPEED_100:
4055 bmcr |= BMCR_SPEED100;
4056 break;
4058 case SPEED_1000:
4059 bmcr |= BMCR_SPEED1000;
4060 break;
4063 if (tp->link_config.duplex == DUPLEX_FULL)
4064 bmcr |= BMCR_FULLDPLX;
4066 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4067 (bmcr != orig_bmcr)) {
4068 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4069 for (i = 0; i < 1500; i++) {
4070 u32 tmp;
4072 udelay(10);
4073 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4074 tg3_readphy(tp, MII_BMSR, &tmp))
4075 continue;
4076 if (!(tmp & BMSR_LSTATUS)) {
4077 udelay(40);
4078 break;
4081 tg3_writephy(tp, MII_BMCR, bmcr);
4082 udelay(40);
4087 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4089 int err;
4091 /* Turn off tap power management. */
4092 /* Set Extended packet length bit */
4093 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4095 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4096 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4097 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4098 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4099 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4101 udelay(40);
4103 return err;
4106 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4108 u32 advmsk, tgtadv, advertising;
4110 advertising = tp->link_config.advertising;
4111 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4113 advmsk = ADVERTISE_ALL;
4114 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4115 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4116 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4119 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4120 return false;
4122 if ((*lcladv & advmsk) != tgtadv)
4123 return false;
4125 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4126 u32 tg3_ctrl;
4128 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4130 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4131 return false;
4133 if (tgtadv &&
4134 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4135 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4136 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4137 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4138 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4139 } else {
4140 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4143 if (tg3_ctrl != tgtadv)
4144 return false;
4147 return true;
4150 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4152 u32 lpeth = 0;
4154 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4155 u32 val;
4157 if (tg3_readphy(tp, MII_STAT1000, &val))
4158 return false;
4160 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4163 if (tg3_readphy(tp, MII_LPA, rmtadv))
4164 return false;
4166 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4167 tp->link_config.rmt_adv = lpeth;
4169 return true;
4172 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4174 int current_link_up;
4175 u32 bmsr, val;
4176 u32 lcl_adv, rmt_adv;
4177 u16 current_speed;
4178 u8 current_duplex;
4179 int i, err;
4181 tw32(MAC_EVENT, 0);
4183 tw32_f(MAC_STATUS,
4184 (MAC_STATUS_SYNC_CHANGED |
4185 MAC_STATUS_CFG_CHANGED |
4186 MAC_STATUS_MI_COMPLETION |
4187 MAC_STATUS_LNKSTATE_CHANGED));
4188 udelay(40);
4190 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4191 tw32_f(MAC_MI_MODE,
4192 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4193 udelay(80);
4196 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4198 /* Some third-party PHYs need to be reset on link going
4199 * down.
4201 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4202 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4203 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4204 netif_carrier_ok(tp->dev)) {
4205 tg3_readphy(tp, MII_BMSR, &bmsr);
4206 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4207 !(bmsr & BMSR_LSTATUS))
4208 force_reset = 1;
4210 if (force_reset)
4211 tg3_phy_reset(tp);
4213 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4214 tg3_readphy(tp, MII_BMSR, &bmsr);
4215 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4216 !tg3_flag(tp, INIT_COMPLETE))
4217 bmsr = 0;
4219 if (!(bmsr & BMSR_LSTATUS)) {
4220 err = tg3_init_5401phy_dsp(tp);
4221 if (err)
4222 return err;
4224 tg3_readphy(tp, MII_BMSR, &bmsr);
4225 for (i = 0; i < 1000; i++) {
4226 udelay(10);
4227 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4228 (bmsr & BMSR_LSTATUS)) {
4229 udelay(40);
4230 break;
4234 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4235 TG3_PHY_REV_BCM5401_B0 &&
4236 !(bmsr & BMSR_LSTATUS) &&
4237 tp->link_config.active_speed == SPEED_1000) {
4238 err = tg3_phy_reset(tp);
4239 if (!err)
4240 err = tg3_init_5401phy_dsp(tp);
4241 if (err)
4242 return err;
4245 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4246 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4247 /* 5701 {A0,B0} CRC bug workaround */
4248 tg3_writephy(tp, 0x15, 0x0a75);
4249 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4250 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4251 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4254 /* Clear pending interrupts... */
4255 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4256 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4258 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4259 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4260 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4261 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4265 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4266 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4267 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4268 else
4269 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4272 current_link_up = 0;
4273 current_speed = SPEED_UNKNOWN;
4274 current_duplex = DUPLEX_UNKNOWN;
4275 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4276 tp->link_config.rmt_adv = 0;
4278 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4279 err = tg3_phy_auxctl_read(tp,
4280 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4281 &val);
4282 if (!err && !(val & (1 << 10))) {
4283 tg3_phy_auxctl_write(tp,
4284 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4285 val | (1 << 10));
4286 goto relink;
4290 bmsr = 0;
4291 for (i = 0; i < 100; i++) {
4292 tg3_readphy(tp, MII_BMSR, &bmsr);
4293 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4294 (bmsr & BMSR_LSTATUS))
4295 break;
4296 udelay(40);
4299 if (bmsr & BMSR_LSTATUS) {
4300 u32 aux_stat, bmcr;
4302 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4303 for (i = 0; i < 2000; i++) {
4304 udelay(10);
4305 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4306 aux_stat)
4307 break;
4310 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4311 &current_speed,
4312 &current_duplex);
4314 bmcr = 0;
4315 for (i = 0; i < 200; i++) {
4316 tg3_readphy(tp, MII_BMCR, &bmcr);
4317 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4318 continue;
4319 if (bmcr && bmcr != 0x7fff)
4320 break;
4321 udelay(10);
4324 lcl_adv = 0;
4325 rmt_adv = 0;
4327 tp->link_config.active_speed = current_speed;
4328 tp->link_config.active_duplex = current_duplex;
4330 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4331 if ((bmcr & BMCR_ANENABLE) &&
4332 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4333 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4334 current_link_up = 1;
4335 } else {
4336 if (!(bmcr & BMCR_ANENABLE) &&
4337 tp->link_config.speed == current_speed &&
4338 tp->link_config.duplex == current_duplex &&
4339 tp->link_config.flowctrl ==
4340 tp->link_config.active_flowctrl) {
4341 current_link_up = 1;
4345 if (current_link_up == 1 &&
4346 tp->link_config.active_duplex == DUPLEX_FULL) {
4347 u32 reg, bit;
4349 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4350 reg = MII_TG3_FET_GEN_STAT;
4351 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4352 } else {
4353 reg = MII_TG3_EXT_STAT;
4354 bit = MII_TG3_EXT_STAT_MDIX;
4357 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4358 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4360 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4364 relink:
4365 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4366 tg3_phy_copper_begin(tp);
4368 tg3_readphy(tp, MII_BMSR, &bmsr);
4369 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4370 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4371 current_link_up = 1;
4374 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4375 if (current_link_up == 1) {
4376 if (tp->link_config.active_speed == SPEED_100 ||
4377 tp->link_config.active_speed == SPEED_10)
4378 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4379 else
4380 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4381 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4382 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4383 else
4384 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4386 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4387 if (tp->link_config.active_duplex == DUPLEX_HALF)
4388 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4391 if (current_link_up == 1 &&
4392 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4393 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4394 else
4395 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4398 /* ??? Without this setting Netgear GA302T PHY does not
4399 * ??? send/receive packets...
4401 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4402 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4403 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4404 tw32_f(MAC_MI_MODE, tp->mi_mode);
4405 udelay(80);
4408 tw32_f(MAC_MODE, tp->mac_mode);
4409 udelay(40);
4411 tg3_phy_eee_adjust(tp, current_link_up);
4413 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4414 /* Polled via timer. */
4415 tw32_f(MAC_EVENT, 0);
4416 } else {
4417 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4419 udelay(40);
4421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4422 current_link_up == 1 &&
4423 tp->link_config.active_speed == SPEED_1000 &&
4424 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4425 udelay(120);
4426 tw32_f(MAC_STATUS,
4427 (MAC_STATUS_SYNC_CHANGED |
4428 MAC_STATUS_CFG_CHANGED));
4429 udelay(40);
4430 tg3_write_mem(tp,
4431 NIC_SRAM_FIRMWARE_MBOX,
4432 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4435 /* Prevent send BD corruption. */
4436 if (tg3_flag(tp, CLKREQ_BUG)) {
4437 u16 oldlnkctl, newlnkctl;
4439 pci_read_config_word(tp->pdev,
4440 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4441 &oldlnkctl);
4442 if (tp->link_config.active_speed == SPEED_100 ||
4443 tp->link_config.active_speed == SPEED_10)
4444 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4445 else
4446 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4447 if (newlnkctl != oldlnkctl)
4448 pci_write_config_word(tp->pdev,
4449 pci_pcie_cap(tp->pdev) +
4450 PCI_EXP_LNKCTL, newlnkctl);
4453 if (current_link_up != netif_carrier_ok(tp->dev)) {
4454 if (current_link_up)
4455 netif_carrier_on(tp->dev);
4456 else
4457 netif_carrier_off(tp->dev);
4458 tg3_link_report(tp);
4461 return 0;
4464 struct tg3_fiber_aneginfo {
4465 int state;
4466 #define ANEG_STATE_UNKNOWN 0
4467 #define ANEG_STATE_AN_ENABLE 1
4468 #define ANEG_STATE_RESTART_INIT 2
4469 #define ANEG_STATE_RESTART 3
4470 #define ANEG_STATE_DISABLE_LINK_OK 4
4471 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4472 #define ANEG_STATE_ABILITY_DETECT 6
4473 #define ANEG_STATE_ACK_DETECT_INIT 7
4474 #define ANEG_STATE_ACK_DETECT 8
4475 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4476 #define ANEG_STATE_COMPLETE_ACK 10
4477 #define ANEG_STATE_IDLE_DETECT_INIT 11
4478 #define ANEG_STATE_IDLE_DETECT 12
4479 #define ANEG_STATE_LINK_OK 13
4480 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4481 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4483 u32 flags;
4484 #define MR_AN_ENABLE 0x00000001
4485 #define MR_RESTART_AN 0x00000002
4486 #define MR_AN_COMPLETE 0x00000004
4487 #define MR_PAGE_RX 0x00000008
4488 #define MR_NP_LOADED 0x00000010
4489 #define MR_TOGGLE_TX 0x00000020
4490 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4491 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4492 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4493 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4494 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4495 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4496 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4497 #define MR_TOGGLE_RX 0x00002000
4498 #define MR_NP_RX 0x00004000
4500 #define MR_LINK_OK 0x80000000
4502 unsigned long link_time, cur_time;
4504 u32 ability_match_cfg;
4505 int ability_match_count;
4507 char ability_match, idle_match, ack_match;
4509 u32 txconfig, rxconfig;
4510 #define ANEG_CFG_NP 0x00000080
4511 #define ANEG_CFG_ACK 0x00000040
4512 #define ANEG_CFG_RF2 0x00000020
4513 #define ANEG_CFG_RF1 0x00000010
4514 #define ANEG_CFG_PS2 0x00000001
4515 #define ANEG_CFG_PS1 0x00008000
4516 #define ANEG_CFG_HD 0x00004000
4517 #define ANEG_CFG_FD 0x00002000
4518 #define ANEG_CFG_INVAL 0x00001f06
4521 #define ANEG_OK 0
4522 #define ANEG_DONE 1
4523 #define ANEG_TIMER_ENAB 2
4524 #define ANEG_FAILED -1
4526 #define ANEG_STATE_SETTLE_TIME 10000
4528 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4529 struct tg3_fiber_aneginfo *ap)
4531 u16 flowctrl;
4532 unsigned long delta;
4533 u32 rx_cfg_reg;
4534 int ret;
4536 if (ap->state == ANEG_STATE_UNKNOWN) {
4537 ap->rxconfig = 0;
4538 ap->link_time = 0;
4539 ap->cur_time = 0;
4540 ap->ability_match_cfg = 0;
4541 ap->ability_match_count = 0;
4542 ap->ability_match = 0;
4543 ap->idle_match = 0;
4544 ap->ack_match = 0;
4546 ap->cur_time++;
4548 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4549 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4551 if (rx_cfg_reg != ap->ability_match_cfg) {
4552 ap->ability_match_cfg = rx_cfg_reg;
4553 ap->ability_match = 0;
4554 ap->ability_match_count = 0;
4555 } else {
4556 if (++ap->ability_match_count > 1) {
4557 ap->ability_match = 1;
4558 ap->ability_match_cfg = rx_cfg_reg;
4561 if (rx_cfg_reg & ANEG_CFG_ACK)
4562 ap->ack_match = 1;
4563 else
4564 ap->ack_match = 0;
4566 ap->idle_match = 0;
4567 } else {
4568 ap->idle_match = 1;
4569 ap->ability_match_cfg = 0;
4570 ap->ability_match_count = 0;
4571 ap->ability_match = 0;
4572 ap->ack_match = 0;
4574 rx_cfg_reg = 0;
4577 ap->rxconfig = rx_cfg_reg;
4578 ret = ANEG_OK;
4580 switch (ap->state) {
4581 case ANEG_STATE_UNKNOWN:
4582 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4583 ap->state = ANEG_STATE_AN_ENABLE;
4585 /* fallthru */
4586 case ANEG_STATE_AN_ENABLE:
4587 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4588 if (ap->flags & MR_AN_ENABLE) {
4589 ap->link_time = 0;
4590 ap->cur_time = 0;
4591 ap->ability_match_cfg = 0;
4592 ap->ability_match_count = 0;
4593 ap->ability_match = 0;
4594 ap->idle_match = 0;
4595 ap->ack_match = 0;
4597 ap->state = ANEG_STATE_RESTART_INIT;
4598 } else {
4599 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4601 break;
4603 case ANEG_STATE_RESTART_INIT:
4604 ap->link_time = ap->cur_time;
4605 ap->flags &= ~(MR_NP_LOADED);
4606 ap->txconfig = 0;
4607 tw32(MAC_TX_AUTO_NEG, 0);
4608 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4609 tw32_f(MAC_MODE, tp->mac_mode);
4610 udelay(40);
4612 ret = ANEG_TIMER_ENAB;
4613 ap->state = ANEG_STATE_RESTART;
4615 /* fallthru */
4616 case ANEG_STATE_RESTART:
4617 delta = ap->cur_time - ap->link_time;
4618 if (delta > ANEG_STATE_SETTLE_TIME)
4619 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4620 else
4621 ret = ANEG_TIMER_ENAB;
4622 break;
4624 case ANEG_STATE_DISABLE_LINK_OK:
4625 ret = ANEG_DONE;
4626 break;
4628 case ANEG_STATE_ABILITY_DETECT_INIT:
4629 ap->flags &= ~(MR_TOGGLE_TX);
4630 ap->txconfig = ANEG_CFG_FD;
4631 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4632 if (flowctrl & ADVERTISE_1000XPAUSE)
4633 ap->txconfig |= ANEG_CFG_PS1;
4634 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4635 ap->txconfig |= ANEG_CFG_PS2;
4636 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4637 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4638 tw32_f(MAC_MODE, tp->mac_mode);
4639 udelay(40);
4641 ap->state = ANEG_STATE_ABILITY_DETECT;
4642 break;
4644 case ANEG_STATE_ABILITY_DETECT:
4645 if (ap->ability_match != 0 && ap->rxconfig != 0)
4646 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4647 break;
4649 case ANEG_STATE_ACK_DETECT_INIT:
4650 ap->txconfig |= ANEG_CFG_ACK;
4651 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4652 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4653 tw32_f(MAC_MODE, tp->mac_mode);
4654 udelay(40);
4656 ap->state = ANEG_STATE_ACK_DETECT;
4658 /* fallthru */
4659 case ANEG_STATE_ACK_DETECT:
4660 if (ap->ack_match != 0) {
4661 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4662 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4663 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4664 } else {
4665 ap->state = ANEG_STATE_AN_ENABLE;
4667 } else if (ap->ability_match != 0 &&
4668 ap->rxconfig == 0) {
4669 ap->state = ANEG_STATE_AN_ENABLE;
4671 break;
4673 case ANEG_STATE_COMPLETE_ACK_INIT:
4674 if (ap->rxconfig & ANEG_CFG_INVAL) {
4675 ret = ANEG_FAILED;
4676 break;
4678 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4679 MR_LP_ADV_HALF_DUPLEX |
4680 MR_LP_ADV_SYM_PAUSE |
4681 MR_LP_ADV_ASYM_PAUSE |
4682 MR_LP_ADV_REMOTE_FAULT1 |
4683 MR_LP_ADV_REMOTE_FAULT2 |
4684 MR_LP_ADV_NEXT_PAGE |
4685 MR_TOGGLE_RX |
4686 MR_NP_RX);
4687 if (ap->rxconfig & ANEG_CFG_FD)
4688 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4689 if (ap->rxconfig & ANEG_CFG_HD)
4690 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4691 if (ap->rxconfig & ANEG_CFG_PS1)
4692 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4693 if (ap->rxconfig & ANEG_CFG_PS2)
4694 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4695 if (ap->rxconfig & ANEG_CFG_RF1)
4696 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4697 if (ap->rxconfig & ANEG_CFG_RF2)
4698 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4699 if (ap->rxconfig & ANEG_CFG_NP)
4700 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4702 ap->link_time = ap->cur_time;
4704 ap->flags ^= (MR_TOGGLE_TX);
4705 if (ap->rxconfig & 0x0008)
4706 ap->flags |= MR_TOGGLE_RX;
4707 if (ap->rxconfig & ANEG_CFG_NP)
4708 ap->flags |= MR_NP_RX;
4709 ap->flags |= MR_PAGE_RX;
4711 ap->state = ANEG_STATE_COMPLETE_ACK;
4712 ret = ANEG_TIMER_ENAB;
4713 break;
4715 case ANEG_STATE_COMPLETE_ACK:
4716 if (ap->ability_match != 0 &&
4717 ap->rxconfig == 0) {
4718 ap->state = ANEG_STATE_AN_ENABLE;
4719 break;
4721 delta = ap->cur_time - ap->link_time;
4722 if (delta > ANEG_STATE_SETTLE_TIME) {
4723 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4724 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4725 } else {
4726 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4727 !(ap->flags & MR_NP_RX)) {
4728 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4729 } else {
4730 ret = ANEG_FAILED;
4734 break;
4736 case ANEG_STATE_IDLE_DETECT_INIT:
4737 ap->link_time = ap->cur_time;
4738 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4739 tw32_f(MAC_MODE, tp->mac_mode);
4740 udelay(40);
4742 ap->state = ANEG_STATE_IDLE_DETECT;
4743 ret = ANEG_TIMER_ENAB;
4744 break;
4746 case ANEG_STATE_IDLE_DETECT:
4747 if (ap->ability_match != 0 &&
4748 ap->rxconfig == 0) {
4749 ap->state = ANEG_STATE_AN_ENABLE;
4750 break;
4752 delta = ap->cur_time - ap->link_time;
4753 if (delta > ANEG_STATE_SETTLE_TIME) {
4754 /* XXX another gem from the Broadcom driver :( */
4755 ap->state = ANEG_STATE_LINK_OK;
4757 break;
4759 case ANEG_STATE_LINK_OK:
4760 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4761 ret = ANEG_DONE;
4762 break;
4764 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4765 /* ??? unimplemented */
4766 break;
4768 case ANEG_STATE_NEXT_PAGE_WAIT:
4769 /* ??? unimplemented */
4770 break;
4772 default:
4773 ret = ANEG_FAILED;
4774 break;
4777 return ret;
4780 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4782 int res = 0;
4783 struct tg3_fiber_aneginfo aninfo;
4784 int status = ANEG_FAILED;
4785 unsigned int tick;
4786 u32 tmp;
4788 tw32_f(MAC_TX_AUTO_NEG, 0);
4790 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4791 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4792 udelay(40);
4794 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4795 udelay(40);
4797 memset(&aninfo, 0, sizeof(aninfo));
4798 aninfo.flags |= MR_AN_ENABLE;
4799 aninfo.state = ANEG_STATE_UNKNOWN;
4800 aninfo.cur_time = 0;
4801 tick = 0;
4802 while (++tick < 195000) {
4803 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4804 if (status == ANEG_DONE || status == ANEG_FAILED)
4805 break;
4807 udelay(1);
4810 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4811 tw32_f(MAC_MODE, tp->mac_mode);
4812 udelay(40);
4814 *txflags = aninfo.txconfig;
4815 *rxflags = aninfo.flags;
4817 if (status == ANEG_DONE &&
4818 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4819 MR_LP_ADV_FULL_DUPLEX)))
4820 res = 1;
4822 return res;
4825 static void tg3_init_bcm8002(struct tg3 *tp)
4827 u32 mac_status = tr32(MAC_STATUS);
4828 int i;
4830 /* Reset when initting first time or we have a link. */
4831 if (tg3_flag(tp, INIT_COMPLETE) &&
4832 !(mac_status & MAC_STATUS_PCS_SYNCED))
4833 return;
4835 /* Set PLL lock range. */
4836 tg3_writephy(tp, 0x16, 0x8007);
4838 /* SW reset */
4839 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4841 /* Wait for reset to complete. */
4842 /* XXX schedule_timeout() ... */
4843 for (i = 0; i < 500; i++)
4844 udelay(10);
4846 /* Config mode; select PMA/Ch 1 regs. */
4847 tg3_writephy(tp, 0x10, 0x8411);
4849 /* Enable auto-lock and comdet, select txclk for tx. */
4850 tg3_writephy(tp, 0x11, 0x0a10);
4852 tg3_writephy(tp, 0x18, 0x00a0);
4853 tg3_writephy(tp, 0x16, 0x41ff);
4855 /* Assert and deassert POR. */
4856 tg3_writephy(tp, 0x13, 0x0400);
4857 udelay(40);
4858 tg3_writephy(tp, 0x13, 0x0000);
4860 tg3_writephy(tp, 0x11, 0x0a50);
4861 udelay(40);
4862 tg3_writephy(tp, 0x11, 0x0a10);
4864 /* Wait for signal to stabilize */
4865 /* XXX schedule_timeout() ... */
4866 for (i = 0; i < 15000; i++)
4867 udelay(10);
4869 /* Deselect the channel register so we can read the PHYID
4870 * later.
4872 tg3_writephy(tp, 0x10, 0x8011);
4875 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4877 u16 flowctrl;
4878 u32 sg_dig_ctrl, sg_dig_status;
4879 u32 serdes_cfg, expected_sg_dig_ctrl;
4880 int workaround, port_a;
4881 int current_link_up;
4883 serdes_cfg = 0;
4884 expected_sg_dig_ctrl = 0;
4885 workaround = 0;
4886 port_a = 1;
4887 current_link_up = 0;
4889 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4890 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4891 workaround = 1;
4892 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4893 port_a = 0;
4895 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4896 /* preserve bits 20-23 for voltage regulator */
4897 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4900 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4902 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4903 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4904 if (workaround) {
4905 u32 val = serdes_cfg;
4907 if (port_a)
4908 val |= 0xc010000;
4909 else
4910 val |= 0x4010000;
4911 tw32_f(MAC_SERDES_CFG, val);
4914 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4916 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4917 tg3_setup_flow_control(tp, 0, 0);
4918 current_link_up = 1;
4920 goto out;
4923 /* Want auto-negotiation. */
4924 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4926 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4927 if (flowctrl & ADVERTISE_1000XPAUSE)
4928 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4929 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4930 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4932 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4933 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4934 tp->serdes_counter &&
4935 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4936 MAC_STATUS_RCVD_CFG)) ==
4937 MAC_STATUS_PCS_SYNCED)) {
4938 tp->serdes_counter--;
4939 current_link_up = 1;
4940 goto out;
4942 restart_autoneg:
4943 if (workaround)
4944 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4945 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4946 udelay(5);
4947 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4949 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4950 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4951 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4952 MAC_STATUS_SIGNAL_DET)) {
4953 sg_dig_status = tr32(SG_DIG_STATUS);
4954 mac_status = tr32(MAC_STATUS);
4956 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4957 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4958 u32 local_adv = 0, remote_adv = 0;
4960 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4961 local_adv |= ADVERTISE_1000XPAUSE;
4962 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4963 local_adv |= ADVERTISE_1000XPSE_ASYM;
4965 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4966 remote_adv |= LPA_1000XPAUSE;
4967 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4968 remote_adv |= LPA_1000XPAUSE_ASYM;
4970 tp->link_config.rmt_adv =
4971 mii_adv_to_ethtool_adv_x(remote_adv);
4973 tg3_setup_flow_control(tp, local_adv, remote_adv);
4974 current_link_up = 1;
4975 tp->serdes_counter = 0;
4976 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4977 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4978 if (tp->serdes_counter)
4979 tp->serdes_counter--;
4980 else {
4981 if (workaround) {
4982 u32 val = serdes_cfg;
4984 if (port_a)
4985 val |= 0xc010000;
4986 else
4987 val |= 0x4010000;
4989 tw32_f(MAC_SERDES_CFG, val);
4992 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4993 udelay(40);
4995 /* Link parallel detection - link is up */
4996 /* only if we have PCS_SYNC and not */
4997 /* receiving config code words */
4998 mac_status = tr32(MAC_STATUS);
4999 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5000 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5001 tg3_setup_flow_control(tp, 0, 0);
5002 current_link_up = 1;
5003 tp->phy_flags |=
5004 TG3_PHYFLG_PARALLEL_DETECT;
5005 tp->serdes_counter =
5006 SERDES_PARALLEL_DET_TIMEOUT;
5007 } else
5008 goto restart_autoneg;
5011 } else {
5012 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5013 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5016 out:
5017 return current_link_up;
5020 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5022 int current_link_up = 0;
5024 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5025 goto out;
5027 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5028 u32 txflags, rxflags;
5029 int i;
5031 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5032 u32 local_adv = 0, remote_adv = 0;
5034 if (txflags & ANEG_CFG_PS1)
5035 local_adv |= ADVERTISE_1000XPAUSE;
5036 if (txflags & ANEG_CFG_PS2)
5037 local_adv |= ADVERTISE_1000XPSE_ASYM;
5039 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5040 remote_adv |= LPA_1000XPAUSE;
5041 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5042 remote_adv |= LPA_1000XPAUSE_ASYM;
5044 tp->link_config.rmt_adv =
5045 mii_adv_to_ethtool_adv_x(remote_adv);
5047 tg3_setup_flow_control(tp, local_adv, remote_adv);
5049 current_link_up = 1;
5051 for (i = 0; i < 30; i++) {
5052 udelay(20);
5053 tw32_f(MAC_STATUS,
5054 (MAC_STATUS_SYNC_CHANGED |
5055 MAC_STATUS_CFG_CHANGED));
5056 udelay(40);
5057 if ((tr32(MAC_STATUS) &
5058 (MAC_STATUS_SYNC_CHANGED |
5059 MAC_STATUS_CFG_CHANGED)) == 0)
5060 break;
5063 mac_status = tr32(MAC_STATUS);
5064 if (current_link_up == 0 &&
5065 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5066 !(mac_status & MAC_STATUS_RCVD_CFG))
5067 current_link_up = 1;
5068 } else {
5069 tg3_setup_flow_control(tp, 0, 0);
5071 /* Forcing 1000FD link up. */
5072 current_link_up = 1;
5074 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5075 udelay(40);
5077 tw32_f(MAC_MODE, tp->mac_mode);
5078 udelay(40);
5081 out:
5082 return current_link_up;
5085 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5087 u32 orig_pause_cfg;
5088 u16 orig_active_speed;
5089 u8 orig_active_duplex;
5090 u32 mac_status;
5091 int current_link_up;
5092 int i;
5094 orig_pause_cfg = tp->link_config.active_flowctrl;
5095 orig_active_speed = tp->link_config.active_speed;
5096 orig_active_duplex = tp->link_config.active_duplex;
5098 if (!tg3_flag(tp, HW_AUTONEG) &&
5099 netif_carrier_ok(tp->dev) &&
5100 tg3_flag(tp, INIT_COMPLETE)) {
5101 mac_status = tr32(MAC_STATUS);
5102 mac_status &= (MAC_STATUS_PCS_SYNCED |
5103 MAC_STATUS_SIGNAL_DET |
5104 MAC_STATUS_CFG_CHANGED |
5105 MAC_STATUS_RCVD_CFG);
5106 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5107 MAC_STATUS_SIGNAL_DET)) {
5108 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5109 MAC_STATUS_CFG_CHANGED));
5110 return 0;
5114 tw32_f(MAC_TX_AUTO_NEG, 0);
5116 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5117 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5118 tw32_f(MAC_MODE, tp->mac_mode);
5119 udelay(40);
5121 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5122 tg3_init_bcm8002(tp);
5124 /* Enable link change event even when serdes polling. */
5125 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5126 udelay(40);
5128 current_link_up = 0;
5129 tp->link_config.rmt_adv = 0;
5130 mac_status = tr32(MAC_STATUS);
5132 if (tg3_flag(tp, HW_AUTONEG))
5133 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5134 else
5135 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5137 tp->napi[0].hw_status->status =
5138 (SD_STATUS_UPDATED |
5139 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5141 for (i = 0; i < 100; i++) {
5142 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5143 MAC_STATUS_CFG_CHANGED));
5144 udelay(5);
5145 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5146 MAC_STATUS_CFG_CHANGED |
5147 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5148 break;
5151 mac_status = tr32(MAC_STATUS);
5152 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5153 current_link_up = 0;
5154 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5155 tp->serdes_counter == 0) {
5156 tw32_f(MAC_MODE, (tp->mac_mode |
5157 MAC_MODE_SEND_CONFIGS));
5158 udelay(1);
5159 tw32_f(MAC_MODE, tp->mac_mode);
5163 if (current_link_up == 1) {
5164 tp->link_config.active_speed = SPEED_1000;
5165 tp->link_config.active_duplex = DUPLEX_FULL;
5166 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5167 LED_CTRL_LNKLED_OVERRIDE |
5168 LED_CTRL_1000MBPS_ON));
5169 } else {
5170 tp->link_config.active_speed = SPEED_UNKNOWN;
5171 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5172 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5173 LED_CTRL_LNKLED_OVERRIDE |
5174 LED_CTRL_TRAFFIC_OVERRIDE));
5177 if (current_link_up != netif_carrier_ok(tp->dev)) {
5178 if (current_link_up)
5179 netif_carrier_on(tp->dev);
5180 else
5181 netif_carrier_off(tp->dev);
5182 tg3_link_report(tp);
5183 } else {
5184 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5185 if (orig_pause_cfg != now_pause_cfg ||
5186 orig_active_speed != tp->link_config.active_speed ||
5187 orig_active_duplex != tp->link_config.active_duplex)
5188 tg3_link_report(tp);
5191 return 0;
5194 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5196 int current_link_up, err = 0;
5197 u32 bmsr, bmcr;
5198 u16 current_speed;
5199 u8 current_duplex;
5200 u32 local_adv, remote_adv;
5202 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5203 tw32_f(MAC_MODE, tp->mac_mode);
5204 udelay(40);
5206 tw32(MAC_EVENT, 0);
5208 tw32_f(MAC_STATUS,
5209 (MAC_STATUS_SYNC_CHANGED |
5210 MAC_STATUS_CFG_CHANGED |
5211 MAC_STATUS_MI_COMPLETION |
5212 MAC_STATUS_LNKSTATE_CHANGED));
5213 udelay(40);
5215 if (force_reset)
5216 tg3_phy_reset(tp);
5218 current_link_up = 0;
5219 current_speed = SPEED_UNKNOWN;
5220 current_duplex = DUPLEX_UNKNOWN;
5221 tp->link_config.rmt_adv = 0;
5223 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5224 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5226 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5227 bmsr |= BMSR_LSTATUS;
5228 else
5229 bmsr &= ~BMSR_LSTATUS;
5232 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5234 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5235 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5236 /* do nothing, just check for link up at the end */
5237 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5238 u32 adv, newadv;
5240 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5241 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5242 ADVERTISE_1000XPAUSE |
5243 ADVERTISE_1000XPSE_ASYM |
5244 ADVERTISE_SLCT);
5246 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5247 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5249 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5250 tg3_writephy(tp, MII_ADVERTISE, newadv);
5251 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5252 tg3_writephy(tp, MII_BMCR, bmcr);
5254 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5255 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5256 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5258 return err;
5260 } else {
5261 u32 new_bmcr;
5263 bmcr &= ~BMCR_SPEED1000;
5264 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5266 if (tp->link_config.duplex == DUPLEX_FULL)
5267 new_bmcr |= BMCR_FULLDPLX;
5269 if (new_bmcr != bmcr) {
5270 /* BMCR_SPEED1000 is a reserved bit that needs
5271 * to be set on write.
5273 new_bmcr |= BMCR_SPEED1000;
5275 /* Force a linkdown */
5276 if (netif_carrier_ok(tp->dev)) {
5277 u32 adv;
5279 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5280 adv &= ~(ADVERTISE_1000XFULL |
5281 ADVERTISE_1000XHALF |
5282 ADVERTISE_SLCT);
5283 tg3_writephy(tp, MII_ADVERTISE, adv);
5284 tg3_writephy(tp, MII_BMCR, bmcr |
5285 BMCR_ANRESTART |
5286 BMCR_ANENABLE);
5287 udelay(10);
5288 netif_carrier_off(tp->dev);
5290 tg3_writephy(tp, MII_BMCR, new_bmcr);
5291 bmcr = new_bmcr;
5292 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5293 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5294 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5295 ASIC_REV_5714) {
5296 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5297 bmsr |= BMSR_LSTATUS;
5298 else
5299 bmsr &= ~BMSR_LSTATUS;
5301 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5305 if (bmsr & BMSR_LSTATUS) {
5306 current_speed = SPEED_1000;
5307 current_link_up = 1;
5308 if (bmcr & BMCR_FULLDPLX)
5309 current_duplex = DUPLEX_FULL;
5310 else
5311 current_duplex = DUPLEX_HALF;
5313 local_adv = 0;
5314 remote_adv = 0;
5316 if (bmcr & BMCR_ANENABLE) {
5317 u32 common;
5319 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5320 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5321 common = local_adv & remote_adv;
5322 if (common & (ADVERTISE_1000XHALF |
5323 ADVERTISE_1000XFULL)) {
5324 if (common & ADVERTISE_1000XFULL)
5325 current_duplex = DUPLEX_FULL;
5326 else
5327 current_duplex = DUPLEX_HALF;
5329 tp->link_config.rmt_adv =
5330 mii_adv_to_ethtool_adv_x(remote_adv);
5331 } else if (!tg3_flag(tp, 5780_CLASS)) {
5332 /* Link is up via parallel detect */
5333 } else {
5334 current_link_up = 0;
5339 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5340 tg3_setup_flow_control(tp, local_adv, remote_adv);
5342 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5343 if (tp->link_config.active_duplex == DUPLEX_HALF)
5344 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5346 tw32_f(MAC_MODE, tp->mac_mode);
5347 udelay(40);
5349 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5351 tp->link_config.active_speed = current_speed;
5352 tp->link_config.active_duplex = current_duplex;
5354 if (current_link_up != netif_carrier_ok(tp->dev)) {
5355 if (current_link_up)
5356 netif_carrier_on(tp->dev);
5357 else {
5358 netif_carrier_off(tp->dev);
5359 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5361 tg3_link_report(tp);
5363 return err;
5366 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5368 if (tp->serdes_counter) {
5369 /* Give autoneg time to complete. */
5370 tp->serdes_counter--;
5371 return;
5374 if (!netif_carrier_ok(tp->dev) &&
5375 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5376 u32 bmcr;
5378 tg3_readphy(tp, MII_BMCR, &bmcr);
5379 if (bmcr & BMCR_ANENABLE) {
5380 u32 phy1, phy2;
5382 /* Select shadow register 0x1f */
5383 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5384 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5386 /* Select expansion interrupt status register */
5387 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5388 MII_TG3_DSP_EXP1_INT_STAT);
5389 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5390 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5392 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5393 /* We have signal detect and not receiving
5394 * config code words, link is up by parallel
5395 * detection.
5398 bmcr &= ~BMCR_ANENABLE;
5399 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5400 tg3_writephy(tp, MII_BMCR, bmcr);
5401 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5404 } else if (netif_carrier_ok(tp->dev) &&
5405 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5406 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5407 u32 phy2;
5409 /* Select expansion interrupt status register */
5410 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5411 MII_TG3_DSP_EXP1_INT_STAT);
5412 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5413 if (phy2 & 0x20) {
5414 u32 bmcr;
5416 /* Config code words received, turn on autoneg. */
5417 tg3_readphy(tp, MII_BMCR, &bmcr);
5418 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5420 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5426 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5428 u32 val;
5429 int err;
5431 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5432 err = tg3_setup_fiber_phy(tp, force_reset);
5433 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5434 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5435 else
5436 err = tg3_setup_copper_phy(tp, force_reset);
5438 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5439 u32 scale;
5441 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5442 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5443 scale = 65;
5444 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5445 scale = 6;
5446 else
5447 scale = 12;
5449 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5450 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5451 tw32(GRC_MISC_CFG, val);
5454 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5455 (6 << TX_LENGTHS_IPG_SHIFT);
5456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5457 val |= tr32(MAC_TX_LENGTHS) &
5458 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5459 TX_LENGTHS_CNT_DWN_VAL_MSK);
5461 if (tp->link_config.active_speed == SPEED_1000 &&
5462 tp->link_config.active_duplex == DUPLEX_HALF)
5463 tw32(MAC_TX_LENGTHS, val |
5464 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5465 else
5466 tw32(MAC_TX_LENGTHS, val |
5467 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5469 if (!tg3_flag(tp, 5705_PLUS)) {
5470 if (netif_carrier_ok(tp->dev)) {
5471 tw32(HOSTCC_STAT_COAL_TICKS,
5472 tp->coal.stats_block_coalesce_usecs);
5473 } else {
5474 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5478 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5479 val = tr32(PCIE_PWR_MGMT_THRESH);
5480 if (!netif_carrier_ok(tp->dev))
5481 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5482 tp->pwrmgmt_thresh;
5483 else
5484 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5485 tw32(PCIE_PWR_MGMT_THRESH, val);
5488 return err;
5491 static inline int tg3_irq_sync(struct tg3 *tp)
5493 return tp->irq_sync;
5496 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5498 int i;
5500 dst = (u32 *)((u8 *)dst + off);
5501 for (i = 0; i < len; i += sizeof(u32))
5502 *dst++ = tr32(off + i);
5505 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5507 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5508 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5509 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5510 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5511 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5512 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5513 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5514 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5515 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5516 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5517 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5518 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5519 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5520 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5521 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5522 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5523 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5524 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5525 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5527 if (tg3_flag(tp, SUPPORT_MSIX))
5528 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5530 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5531 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5532 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5533 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5534 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5535 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5536 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5537 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5539 if (!tg3_flag(tp, 5705_PLUS)) {
5540 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5541 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5542 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5545 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5546 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5547 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5548 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5549 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5551 if (tg3_flag(tp, NVRAM))
5552 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5555 static void tg3_dump_state(struct tg3 *tp)
5557 int i;
5558 u32 *regs;
5560 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5561 if (!regs) {
5562 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5563 return;
5566 if (tg3_flag(tp, PCI_EXPRESS)) {
5567 /* Read up to but not including private PCI registers */
5568 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5569 regs[i / sizeof(u32)] = tr32(i);
5570 } else
5571 tg3_dump_legacy_regs(tp, regs);
5573 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5574 if (!regs[i + 0] && !regs[i + 1] &&
5575 !regs[i + 2] && !regs[i + 3])
5576 continue;
5578 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5579 i * 4,
5580 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5583 kfree(regs);
5585 for (i = 0; i < tp->irq_cnt; i++) {
5586 struct tg3_napi *tnapi = &tp->napi[i];
5588 /* SW status block */
5589 netdev_err(tp->dev,
5590 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5592 tnapi->hw_status->status,
5593 tnapi->hw_status->status_tag,
5594 tnapi->hw_status->rx_jumbo_consumer,
5595 tnapi->hw_status->rx_consumer,
5596 tnapi->hw_status->rx_mini_consumer,
5597 tnapi->hw_status->idx[0].rx_producer,
5598 tnapi->hw_status->idx[0].tx_consumer);
5600 netdev_err(tp->dev,
5601 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5603 tnapi->last_tag, tnapi->last_irq_tag,
5604 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5605 tnapi->rx_rcb_ptr,
5606 tnapi->prodring.rx_std_prod_idx,
5607 tnapi->prodring.rx_std_cons_idx,
5608 tnapi->prodring.rx_jmb_prod_idx,
5609 tnapi->prodring.rx_jmb_cons_idx);
5613 /* This is called whenever we suspect that the system chipset is re-
5614 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5615 * is bogus tx completions. We try to recover by setting the
5616 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5617 * in the workqueue.
5619 static void tg3_tx_recover(struct tg3 *tp)
5621 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5622 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5624 netdev_warn(tp->dev,
5625 "The system may be re-ordering memory-mapped I/O "
5626 "cycles to the network device, attempting to recover. "
5627 "Please report the problem to the driver maintainer "
5628 "and include system chipset information.\n");
5630 spin_lock(&tp->lock);
5631 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5632 spin_unlock(&tp->lock);
5635 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5637 /* Tell compiler to fetch tx indices from memory. */
5638 barrier();
5639 return tnapi->tx_pending -
5640 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5643 /* Tigon3 never reports partial packet sends. So we do not
5644 * need special logic to handle SKBs that have not had all
5645 * of their frags sent yet, like SunGEM does.
5647 static void tg3_tx(struct tg3_napi *tnapi)
5649 struct tg3 *tp = tnapi->tp;
5650 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5651 u32 sw_idx = tnapi->tx_cons;
5652 struct netdev_queue *txq;
5653 int index = tnapi - tp->napi;
5654 unsigned int pkts_compl = 0, bytes_compl = 0;
5656 if (tg3_flag(tp, ENABLE_TSS))
5657 index--;
5659 txq = netdev_get_tx_queue(tp->dev, index);
5661 while (sw_idx != hw_idx) {
5662 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5663 struct sk_buff *skb = ri->skb;
5664 int i, tx_bug = 0;
5666 if (unlikely(skb == NULL)) {
5667 tg3_tx_recover(tp);
5668 return;
5671 pci_unmap_single(tp->pdev,
5672 dma_unmap_addr(ri, mapping),
5673 skb_headlen(skb),
5674 PCI_DMA_TODEVICE);
5676 ri->skb = NULL;
5678 while (ri->fragmented) {
5679 ri->fragmented = false;
5680 sw_idx = NEXT_TX(sw_idx);
5681 ri = &tnapi->tx_buffers[sw_idx];
5684 sw_idx = NEXT_TX(sw_idx);
5686 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5687 ri = &tnapi->tx_buffers[sw_idx];
5688 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5689 tx_bug = 1;
5691 pci_unmap_page(tp->pdev,
5692 dma_unmap_addr(ri, mapping),
5693 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5694 PCI_DMA_TODEVICE);
5696 while (ri->fragmented) {
5697 ri->fragmented = false;
5698 sw_idx = NEXT_TX(sw_idx);
5699 ri = &tnapi->tx_buffers[sw_idx];
5702 sw_idx = NEXT_TX(sw_idx);
5705 pkts_compl++;
5706 bytes_compl += skb->len;
5708 dev_kfree_skb(skb);
5710 if (unlikely(tx_bug)) {
5711 tg3_tx_recover(tp);
5712 return;
5716 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5718 tnapi->tx_cons = sw_idx;
5720 /* Need to make the tx_cons update visible to tg3_start_xmit()
5721 * before checking for netif_queue_stopped(). Without the
5722 * memory barrier, there is a small possibility that tg3_start_xmit()
5723 * will miss it and cause the queue to be stopped forever.
5725 smp_mb();
5727 if (unlikely(netif_tx_queue_stopped(txq) &&
5728 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5729 __netif_tx_lock(txq, smp_processor_id());
5730 if (netif_tx_queue_stopped(txq) &&
5731 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5732 netif_tx_wake_queue(txq);
5733 __netif_tx_unlock(txq);
5737 static void tg3_frag_free(bool is_frag, void *data)
5739 if (is_frag)
5740 put_page(virt_to_head_page(data));
5741 else
5742 kfree(data);
5745 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5747 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5748 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5750 if (!ri->data)
5751 return;
5753 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5754 map_sz, PCI_DMA_FROMDEVICE);
5755 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5756 ri->data = NULL;
5760 /* Returns size of skb allocated or < 0 on error.
5762 * We only need to fill in the address because the other members
5763 * of the RX descriptor are invariant, see tg3_init_rings.
5765 * Note the purposeful assymetry of cpu vs. chip accesses. For
5766 * posting buffers we only dirty the first cache line of the RX
5767 * descriptor (containing the address). Whereas for the RX status
5768 * buffers the cpu only reads the last cacheline of the RX descriptor
5769 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5771 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5772 u32 opaque_key, u32 dest_idx_unmasked,
5773 unsigned int *frag_size)
5775 struct tg3_rx_buffer_desc *desc;
5776 struct ring_info *map;
5777 u8 *data;
5778 dma_addr_t mapping;
5779 int skb_size, data_size, dest_idx;
5781 switch (opaque_key) {
5782 case RXD_OPAQUE_RING_STD:
5783 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5784 desc = &tpr->rx_std[dest_idx];
5785 map = &tpr->rx_std_buffers[dest_idx];
5786 data_size = tp->rx_pkt_map_sz;
5787 break;
5789 case RXD_OPAQUE_RING_JUMBO:
5790 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5791 desc = &tpr->rx_jmb[dest_idx].std;
5792 map = &tpr->rx_jmb_buffers[dest_idx];
5793 data_size = TG3_RX_JMB_MAP_SZ;
5794 break;
5796 default:
5797 return -EINVAL;
5800 /* Do not overwrite any of the map or rp information
5801 * until we are sure we can commit to a new buffer.
5803 * Callers depend upon this behavior and assume that
5804 * we leave everything unchanged if we fail.
5806 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5807 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5808 if (skb_size <= PAGE_SIZE) {
5809 data = netdev_alloc_frag(skb_size);
5810 *frag_size = skb_size;
5811 } else {
5812 data = kmalloc(skb_size, GFP_ATOMIC);
5813 *frag_size = 0;
5815 if (!data)
5816 return -ENOMEM;
5818 mapping = pci_map_single(tp->pdev,
5819 data + TG3_RX_OFFSET(tp),
5820 data_size,
5821 PCI_DMA_FROMDEVICE);
5822 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5823 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5824 return -EIO;
5827 map->data = data;
5828 dma_unmap_addr_set(map, mapping, mapping);
5830 desc->addr_hi = ((u64)mapping >> 32);
5831 desc->addr_lo = ((u64)mapping & 0xffffffff);
5833 return data_size;
5836 /* We only need to move over in the address because the other
5837 * members of the RX descriptor are invariant. See notes above
5838 * tg3_alloc_rx_data for full details.
5840 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5841 struct tg3_rx_prodring_set *dpr,
5842 u32 opaque_key, int src_idx,
5843 u32 dest_idx_unmasked)
5845 struct tg3 *tp = tnapi->tp;
5846 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5847 struct ring_info *src_map, *dest_map;
5848 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5849 int dest_idx;
5851 switch (opaque_key) {
5852 case RXD_OPAQUE_RING_STD:
5853 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5854 dest_desc = &dpr->rx_std[dest_idx];
5855 dest_map = &dpr->rx_std_buffers[dest_idx];
5856 src_desc = &spr->rx_std[src_idx];
5857 src_map = &spr->rx_std_buffers[src_idx];
5858 break;
5860 case RXD_OPAQUE_RING_JUMBO:
5861 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5862 dest_desc = &dpr->rx_jmb[dest_idx].std;
5863 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5864 src_desc = &spr->rx_jmb[src_idx].std;
5865 src_map = &spr->rx_jmb_buffers[src_idx];
5866 break;
5868 default:
5869 return;
5872 dest_map->data = src_map->data;
5873 dma_unmap_addr_set(dest_map, mapping,
5874 dma_unmap_addr(src_map, mapping));
5875 dest_desc->addr_hi = src_desc->addr_hi;
5876 dest_desc->addr_lo = src_desc->addr_lo;
5878 /* Ensure that the update to the skb happens after the physical
5879 * addresses have been transferred to the new BD location.
5881 smp_wmb();
5883 src_map->data = NULL;
5886 /* The RX ring scheme is composed of multiple rings which post fresh
5887 * buffers to the chip, and one special ring the chip uses to report
5888 * status back to the host.
5890 * The special ring reports the status of received packets to the
5891 * host. The chip does not write into the original descriptor the
5892 * RX buffer was obtained from. The chip simply takes the original
5893 * descriptor as provided by the host, updates the status and length
5894 * field, then writes this into the next status ring entry.
5896 * Each ring the host uses to post buffers to the chip is described
5897 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5898 * it is first placed into the on-chip ram. When the packet's length
5899 * is known, it walks down the TG3_BDINFO entries to select the ring.
5900 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5901 * which is within the range of the new packet's length is chosen.
5903 * The "separate ring for rx status" scheme may sound queer, but it makes
5904 * sense from a cache coherency perspective. If only the host writes
5905 * to the buffer post rings, and only the chip writes to the rx status
5906 * rings, then cache lines never move beyond shared-modified state.
5907 * If both the host and chip were to write into the same ring, cache line
5908 * eviction could occur since both entities want it in an exclusive state.
5910 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5912 struct tg3 *tp = tnapi->tp;
5913 u32 work_mask, rx_std_posted = 0;
5914 u32 std_prod_idx, jmb_prod_idx;
5915 u32 sw_idx = tnapi->rx_rcb_ptr;
5916 u16 hw_idx;
5917 int received;
5918 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5920 hw_idx = *(tnapi->rx_rcb_prod_idx);
5922 * We need to order the read of hw_idx and the read of
5923 * the opaque cookie.
5925 rmb();
5926 work_mask = 0;
5927 received = 0;
5928 std_prod_idx = tpr->rx_std_prod_idx;
5929 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5930 while (sw_idx != hw_idx && budget > 0) {
5931 struct ring_info *ri;
5932 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5933 unsigned int len;
5934 struct sk_buff *skb;
5935 dma_addr_t dma_addr;
5936 u32 opaque_key, desc_idx, *post_ptr;
5937 u8 *data;
5939 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5940 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5941 if (opaque_key == RXD_OPAQUE_RING_STD) {
5942 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5943 dma_addr = dma_unmap_addr(ri, mapping);
5944 data = ri->data;
5945 post_ptr = &std_prod_idx;
5946 rx_std_posted++;
5947 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5948 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5949 dma_addr = dma_unmap_addr(ri, mapping);
5950 data = ri->data;
5951 post_ptr = &jmb_prod_idx;
5952 } else
5953 goto next_pkt_nopost;
5955 work_mask |= opaque_key;
5957 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5958 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5959 drop_it:
5960 tg3_recycle_rx(tnapi, tpr, opaque_key,
5961 desc_idx, *post_ptr);
5962 drop_it_no_recycle:
5963 /* Other statistics kept track of by card. */
5964 tp->rx_dropped++;
5965 goto next_pkt;
5968 prefetch(data + TG3_RX_OFFSET(tp));
5969 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5970 ETH_FCS_LEN;
5972 if (len > TG3_RX_COPY_THRESH(tp)) {
5973 int skb_size;
5974 unsigned int frag_size;
5976 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5977 *post_ptr, &frag_size);
5978 if (skb_size < 0)
5979 goto drop_it;
5981 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5982 PCI_DMA_FROMDEVICE);
5984 skb = build_skb(data, frag_size);
5985 if (!skb) {
5986 tg3_frag_free(frag_size != 0, data);
5987 goto drop_it_no_recycle;
5989 skb_reserve(skb, TG3_RX_OFFSET(tp));
5990 /* Ensure that the update to the data happens
5991 * after the usage of the old DMA mapping.
5993 smp_wmb();
5995 ri->data = NULL;
5997 } else {
5998 tg3_recycle_rx(tnapi, tpr, opaque_key,
5999 desc_idx, *post_ptr);
6001 skb = netdev_alloc_skb(tp->dev,
6002 len + TG3_RAW_IP_ALIGN);
6003 if (skb == NULL)
6004 goto drop_it_no_recycle;
6006 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6007 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6008 memcpy(skb->data,
6009 data + TG3_RX_OFFSET(tp),
6010 len);
6011 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6014 skb_put(skb, len);
6015 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6016 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6017 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6018 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6019 skb->ip_summed = CHECKSUM_UNNECESSARY;
6020 else
6021 skb_checksum_none_assert(skb);
6023 skb->protocol = eth_type_trans(skb, tp->dev);
6025 if (len > (tp->dev->mtu + ETH_HLEN) &&
6026 skb->protocol != htons(ETH_P_8021Q)) {
6027 dev_kfree_skb(skb);
6028 goto drop_it_no_recycle;
6031 if (desc->type_flags & RXD_FLAG_VLAN &&
6032 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6033 __vlan_hwaccel_put_tag(skb,
6034 desc->err_vlan & RXD_VLAN_MASK);
6036 napi_gro_receive(&tnapi->napi, skb);
6038 received++;
6039 budget--;
6041 next_pkt:
6042 (*post_ptr)++;
6044 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6045 tpr->rx_std_prod_idx = std_prod_idx &
6046 tp->rx_std_ring_mask;
6047 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6048 tpr->rx_std_prod_idx);
6049 work_mask &= ~RXD_OPAQUE_RING_STD;
6050 rx_std_posted = 0;
6052 next_pkt_nopost:
6053 sw_idx++;
6054 sw_idx &= tp->rx_ret_ring_mask;
6056 /* Refresh hw_idx to see if there is new work */
6057 if (sw_idx == hw_idx) {
6058 hw_idx = *(tnapi->rx_rcb_prod_idx);
6059 rmb();
6063 /* ACK the status ring. */
6064 tnapi->rx_rcb_ptr = sw_idx;
6065 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6067 /* Refill RX ring(s). */
6068 if (!tg3_flag(tp, ENABLE_RSS)) {
6069 /* Sync BD data before updating mailbox */
6070 wmb();
6072 if (work_mask & RXD_OPAQUE_RING_STD) {
6073 tpr->rx_std_prod_idx = std_prod_idx &
6074 tp->rx_std_ring_mask;
6075 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6076 tpr->rx_std_prod_idx);
6078 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6079 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6080 tp->rx_jmb_ring_mask;
6081 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6082 tpr->rx_jmb_prod_idx);
6084 mmiowb();
6085 } else if (work_mask) {
6086 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6087 * updated before the producer indices can be updated.
6089 smp_wmb();
6091 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6092 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6094 if (tnapi != &tp->napi[1]) {
6095 tp->rx_refill = true;
6096 napi_schedule(&tp->napi[1].napi);
6100 return received;
6103 static void tg3_poll_link(struct tg3 *tp)
6105 /* handle link change and other phy events */
6106 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6107 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6109 if (sblk->status & SD_STATUS_LINK_CHG) {
6110 sblk->status = SD_STATUS_UPDATED |
6111 (sblk->status & ~SD_STATUS_LINK_CHG);
6112 spin_lock(&tp->lock);
6113 if (tg3_flag(tp, USE_PHYLIB)) {
6114 tw32_f(MAC_STATUS,
6115 (MAC_STATUS_SYNC_CHANGED |
6116 MAC_STATUS_CFG_CHANGED |
6117 MAC_STATUS_MI_COMPLETION |
6118 MAC_STATUS_LNKSTATE_CHANGED));
6119 udelay(40);
6120 } else
6121 tg3_setup_phy(tp, 0);
6122 spin_unlock(&tp->lock);
6127 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6128 struct tg3_rx_prodring_set *dpr,
6129 struct tg3_rx_prodring_set *spr)
6131 u32 si, di, cpycnt, src_prod_idx;
6132 int i, err = 0;
6134 while (1) {
6135 src_prod_idx = spr->rx_std_prod_idx;
6137 /* Make sure updates to the rx_std_buffers[] entries and the
6138 * standard producer index are seen in the correct order.
6140 smp_rmb();
6142 if (spr->rx_std_cons_idx == src_prod_idx)
6143 break;
6145 if (spr->rx_std_cons_idx < src_prod_idx)
6146 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6147 else
6148 cpycnt = tp->rx_std_ring_mask + 1 -
6149 spr->rx_std_cons_idx;
6151 cpycnt = min(cpycnt,
6152 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6154 si = spr->rx_std_cons_idx;
6155 di = dpr->rx_std_prod_idx;
6157 for (i = di; i < di + cpycnt; i++) {
6158 if (dpr->rx_std_buffers[i].data) {
6159 cpycnt = i - di;
6160 err = -ENOSPC;
6161 break;
6165 if (!cpycnt)
6166 break;
6168 /* Ensure that updates to the rx_std_buffers ring and the
6169 * shadowed hardware producer ring from tg3_recycle_skb() are
6170 * ordered correctly WRT the skb check above.
6172 smp_rmb();
6174 memcpy(&dpr->rx_std_buffers[di],
6175 &spr->rx_std_buffers[si],
6176 cpycnt * sizeof(struct ring_info));
6178 for (i = 0; i < cpycnt; i++, di++, si++) {
6179 struct tg3_rx_buffer_desc *sbd, *dbd;
6180 sbd = &spr->rx_std[si];
6181 dbd = &dpr->rx_std[di];
6182 dbd->addr_hi = sbd->addr_hi;
6183 dbd->addr_lo = sbd->addr_lo;
6186 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6187 tp->rx_std_ring_mask;
6188 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6189 tp->rx_std_ring_mask;
6192 while (1) {
6193 src_prod_idx = spr->rx_jmb_prod_idx;
6195 /* Make sure updates to the rx_jmb_buffers[] entries and
6196 * the jumbo producer index are seen in the correct order.
6198 smp_rmb();
6200 if (spr->rx_jmb_cons_idx == src_prod_idx)
6201 break;
6203 if (spr->rx_jmb_cons_idx < src_prod_idx)
6204 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6205 else
6206 cpycnt = tp->rx_jmb_ring_mask + 1 -
6207 spr->rx_jmb_cons_idx;
6209 cpycnt = min(cpycnt,
6210 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6212 si = spr->rx_jmb_cons_idx;
6213 di = dpr->rx_jmb_prod_idx;
6215 for (i = di; i < di + cpycnt; i++) {
6216 if (dpr->rx_jmb_buffers[i].data) {
6217 cpycnt = i - di;
6218 err = -ENOSPC;
6219 break;
6223 if (!cpycnt)
6224 break;
6226 /* Ensure that updates to the rx_jmb_buffers ring and the
6227 * shadowed hardware producer ring from tg3_recycle_skb() are
6228 * ordered correctly WRT the skb check above.
6230 smp_rmb();
6232 memcpy(&dpr->rx_jmb_buffers[di],
6233 &spr->rx_jmb_buffers[si],
6234 cpycnt * sizeof(struct ring_info));
6236 for (i = 0; i < cpycnt; i++, di++, si++) {
6237 struct tg3_rx_buffer_desc *sbd, *dbd;
6238 sbd = &spr->rx_jmb[si].std;
6239 dbd = &dpr->rx_jmb[di].std;
6240 dbd->addr_hi = sbd->addr_hi;
6241 dbd->addr_lo = sbd->addr_lo;
6244 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6245 tp->rx_jmb_ring_mask;
6246 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6247 tp->rx_jmb_ring_mask;
6250 return err;
6253 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6255 struct tg3 *tp = tnapi->tp;
6257 /* run TX completion thread */
6258 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6259 tg3_tx(tnapi);
6260 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6261 return work_done;
6264 if (!tnapi->rx_rcb_prod_idx)
6265 return work_done;
6267 /* run RX thread, within the bounds set by NAPI.
6268 * All RX "locking" is done by ensuring outside
6269 * code synchronizes with tg3->napi.poll()
6271 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6272 work_done += tg3_rx(tnapi, budget - work_done);
6274 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6275 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6276 int i, err = 0;
6277 u32 std_prod_idx = dpr->rx_std_prod_idx;
6278 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6280 tp->rx_refill = false;
6281 for (i = 1; i <= tp->rxq_cnt; i++)
6282 err |= tg3_rx_prodring_xfer(tp, dpr,
6283 &tp->napi[i].prodring);
6285 wmb();
6287 if (std_prod_idx != dpr->rx_std_prod_idx)
6288 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6289 dpr->rx_std_prod_idx);
6291 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6292 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6293 dpr->rx_jmb_prod_idx);
6295 mmiowb();
6297 if (err)
6298 tw32_f(HOSTCC_MODE, tp->coal_now);
6301 return work_done;
6304 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6306 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6307 schedule_work(&tp->reset_task);
6310 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6312 cancel_work_sync(&tp->reset_task);
6313 tg3_flag_clear(tp, RESET_TASK_PENDING);
6314 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6317 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6319 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6320 struct tg3 *tp = tnapi->tp;
6321 int work_done = 0;
6322 struct tg3_hw_status *sblk = tnapi->hw_status;
6324 while (1) {
6325 work_done = tg3_poll_work(tnapi, work_done, budget);
6327 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6328 goto tx_recovery;
6330 if (unlikely(work_done >= budget))
6331 break;
6333 /* tp->last_tag is used in tg3_int_reenable() below
6334 * to tell the hw how much work has been processed,
6335 * so we must read it before checking for more work.
6337 tnapi->last_tag = sblk->status_tag;
6338 tnapi->last_irq_tag = tnapi->last_tag;
6339 rmb();
6341 /* check for RX/TX work to do */
6342 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6343 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6345 /* This test here is not race free, but will reduce
6346 * the number of interrupts by looping again.
6348 if (tnapi == &tp->napi[1] && tp->rx_refill)
6349 continue;
6351 napi_complete(napi);
6352 /* Reenable interrupts. */
6353 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6355 /* This test here is synchronized by napi_schedule()
6356 * and napi_complete() to close the race condition.
6358 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6359 tw32(HOSTCC_MODE, tp->coalesce_mode |
6360 HOSTCC_MODE_ENABLE |
6361 tnapi->coal_now);
6363 mmiowb();
6364 break;
6368 return work_done;
6370 tx_recovery:
6371 /* work_done is guaranteed to be less than budget. */
6372 napi_complete(napi);
6373 tg3_reset_task_schedule(tp);
6374 return work_done;
6377 static void tg3_process_error(struct tg3 *tp)
6379 u32 val;
6380 bool real_error = false;
6382 if (tg3_flag(tp, ERROR_PROCESSED))
6383 return;
6385 /* Check Flow Attention register */
6386 val = tr32(HOSTCC_FLOW_ATTN);
6387 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6388 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6389 real_error = true;
6392 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6393 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6394 real_error = true;
6397 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6398 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6399 real_error = true;
6402 if (!real_error)
6403 return;
6405 tg3_dump_state(tp);
6407 tg3_flag_set(tp, ERROR_PROCESSED);
6408 tg3_reset_task_schedule(tp);
6411 static int tg3_poll(struct napi_struct *napi, int budget)
6413 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6414 struct tg3 *tp = tnapi->tp;
6415 int work_done = 0;
6416 struct tg3_hw_status *sblk = tnapi->hw_status;
6418 while (1) {
6419 if (sblk->status & SD_STATUS_ERROR)
6420 tg3_process_error(tp);
6422 tg3_poll_link(tp);
6424 work_done = tg3_poll_work(tnapi, work_done, budget);
6426 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6427 goto tx_recovery;
6429 if (unlikely(work_done >= budget))
6430 break;
6432 if (tg3_flag(tp, TAGGED_STATUS)) {
6433 /* tp->last_tag is used in tg3_int_reenable() below
6434 * to tell the hw how much work has been processed,
6435 * so we must read it before checking for more work.
6437 tnapi->last_tag = sblk->status_tag;
6438 tnapi->last_irq_tag = tnapi->last_tag;
6439 rmb();
6440 } else
6441 sblk->status &= ~SD_STATUS_UPDATED;
6443 if (likely(!tg3_has_work(tnapi))) {
6444 napi_complete(napi);
6445 tg3_int_reenable(tnapi);
6446 break;
6450 return work_done;
6452 tx_recovery:
6453 /* work_done is guaranteed to be less than budget. */
6454 napi_complete(napi);
6455 tg3_reset_task_schedule(tp);
6456 return work_done;
6459 static void tg3_napi_disable(struct tg3 *tp)
6461 int i;
6463 for (i = tp->irq_cnt - 1; i >= 0; i--)
6464 napi_disable(&tp->napi[i].napi);
6467 static void tg3_napi_enable(struct tg3 *tp)
6469 int i;
6471 for (i = 0; i < tp->irq_cnt; i++)
6472 napi_enable(&tp->napi[i].napi);
6475 static void tg3_napi_init(struct tg3 *tp)
6477 int i;
6479 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6480 for (i = 1; i < tp->irq_cnt; i++)
6481 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6484 static void tg3_napi_fini(struct tg3 *tp)
6486 int i;
6488 for (i = 0; i < tp->irq_cnt; i++)
6489 netif_napi_del(&tp->napi[i].napi);
6492 static inline void tg3_netif_stop(struct tg3 *tp)
6494 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6495 tg3_napi_disable(tp);
6496 netif_tx_disable(tp->dev);
6499 static inline void tg3_netif_start(struct tg3 *tp)
6501 /* NOTE: unconditional netif_tx_wake_all_queues is only
6502 * appropriate so long as all callers are assured to
6503 * have free tx slots (such as after tg3_init_hw)
6505 netif_tx_wake_all_queues(tp->dev);
6507 tg3_napi_enable(tp);
6508 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6509 tg3_enable_ints(tp);
6512 static void tg3_irq_quiesce(struct tg3 *tp)
6514 int i;
6516 BUG_ON(tp->irq_sync);
6518 tp->irq_sync = 1;
6519 smp_mb();
6521 for (i = 0; i < tp->irq_cnt; i++)
6522 synchronize_irq(tp->napi[i].irq_vec);
6525 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6526 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6527 * with as well. Most of the time, this is not necessary except when
6528 * shutting down the device.
6530 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6532 spin_lock_bh(&tp->lock);
6533 if (irq_sync)
6534 tg3_irq_quiesce(tp);
6537 static inline void tg3_full_unlock(struct tg3 *tp)
6539 spin_unlock_bh(&tp->lock);
6542 /* One-shot MSI handler - Chip automatically disables interrupt
6543 * after sending MSI so driver doesn't have to do it.
6545 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6547 struct tg3_napi *tnapi = dev_id;
6548 struct tg3 *tp = tnapi->tp;
6550 prefetch(tnapi->hw_status);
6551 if (tnapi->rx_rcb)
6552 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6554 if (likely(!tg3_irq_sync(tp)))
6555 napi_schedule(&tnapi->napi);
6557 return IRQ_HANDLED;
6560 /* MSI ISR - No need to check for interrupt sharing and no need to
6561 * flush status block and interrupt mailbox. PCI ordering rules
6562 * guarantee that MSI will arrive after the status block.
6564 static irqreturn_t tg3_msi(int irq, void *dev_id)
6566 struct tg3_napi *tnapi = dev_id;
6567 struct tg3 *tp = tnapi->tp;
6569 prefetch(tnapi->hw_status);
6570 if (tnapi->rx_rcb)
6571 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6573 * Writing any value to intr-mbox-0 clears PCI INTA# and
6574 * chip-internal interrupt pending events.
6575 * Writing non-zero to intr-mbox-0 additional tells the
6576 * NIC to stop sending us irqs, engaging "in-intr-handler"
6577 * event coalescing.
6579 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6580 if (likely(!tg3_irq_sync(tp)))
6581 napi_schedule(&tnapi->napi);
6583 return IRQ_RETVAL(1);
6586 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6588 struct tg3_napi *tnapi = dev_id;
6589 struct tg3 *tp = tnapi->tp;
6590 struct tg3_hw_status *sblk = tnapi->hw_status;
6591 unsigned int handled = 1;
6593 /* In INTx mode, it is possible for the interrupt to arrive at
6594 * the CPU before the status block posted prior to the interrupt.
6595 * Reading the PCI State register will confirm whether the
6596 * interrupt is ours and will flush the status block.
6598 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6599 if (tg3_flag(tp, CHIP_RESETTING) ||
6600 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6601 handled = 0;
6602 goto out;
6607 * Writing any value to intr-mbox-0 clears PCI INTA# and
6608 * chip-internal interrupt pending events.
6609 * Writing non-zero to intr-mbox-0 additional tells the
6610 * NIC to stop sending us irqs, engaging "in-intr-handler"
6611 * event coalescing.
6613 * Flush the mailbox to de-assert the IRQ immediately to prevent
6614 * spurious interrupts. The flush impacts performance but
6615 * excessive spurious interrupts can be worse in some cases.
6617 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6618 if (tg3_irq_sync(tp))
6619 goto out;
6620 sblk->status &= ~SD_STATUS_UPDATED;
6621 if (likely(tg3_has_work(tnapi))) {
6622 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6623 napi_schedule(&tnapi->napi);
6624 } else {
6625 /* No work, shared interrupt perhaps? re-enable
6626 * interrupts, and flush that PCI write
6628 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6629 0x00000000);
6631 out:
6632 return IRQ_RETVAL(handled);
6635 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6637 struct tg3_napi *tnapi = dev_id;
6638 struct tg3 *tp = tnapi->tp;
6639 struct tg3_hw_status *sblk = tnapi->hw_status;
6640 unsigned int handled = 1;
6642 /* In INTx mode, it is possible for the interrupt to arrive at
6643 * the CPU before the status block posted prior to the interrupt.
6644 * Reading the PCI State register will confirm whether the
6645 * interrupt is ours and will flush the status block.
6647 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6648 if (tg3_flag(tp, CHIP_RESETTING) ||
6649 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6650 handled = 0;
6651 goto out;
6656 * writing any value to intr-mbox-0 clears PCI INTA# and
6657 * chip-internal interrupt pending events.
6658 * writing non-zero to intr-mbox-0 additional tells the
6659 * NIC to stop sending us irqs, engaging "in-intr-handler"
6660 * event coalescing.
6662 * Flush the mailbox to de-assert the IRQ immediately to prevent
6663 * spurious interrupts. The flush impacts performance but
6664 * excessive spurious interrupts can be worse in some cases.
6666 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6669 * In a shared interrupt configuration, sometimes other devices'
6670 * interrupts will scream. We record the current status tag here
6671 * so that the above check can report that the screaming interrupts
6672 * are unhandled. Eventually they will be silenced.
6674 tnapi->last_irq_tag = sblk->status_tag;
6676 if (tg3_irq_sync(tp))
6677 goto out;
6679 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6681 napi_schedule(&tnapi->napi);
6683 out:
6684 return IRQ_RETVAL(handled);
6687 /* ISR for interrupt test */
6688 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6690 struct tg3_napi *tnapi = dev_id;
6691 struct tg3 *tp = tnapi->tp;
6692 struct tg3_hw_status *sblk = tnapi->hw_status;
6694 if ((sblk->status & SD_STATUS_UPDATED) ||
6695 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6696 tg3_disable_ints(tp);
6697 return IRQ_RETVAL(1);
6699 return IRQ_RETVAL(0);
6702 #ifdef CONFIG_NET_POLL_CONTROLLER
6703 static void tg3_poll_controller(struct net_device *dev)
6705 int i;
6706 struct tg3 *tp = netdev_priv(dev);
6708 for (i = 0; i < tp->irq_cnt; i++)
6709 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6711 #endif
6713 static void tg3_tx_timeout(struct net_device *dev)
6715 struct tg3 *tp = netdev_priv(dev);
6717 if (netif_msg_tx_err(tp)) {
6718 netdev_err(dev, "transmit timed out, resetting\n");
6719 tg3_dump_state(tp);
6722 tg3_reset_task_schedule(tp);
6725 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6726 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6728 u32 base = (u32) mapping & 0xffffffff;
6730 return (base > 0xffffdcc0) && (base + len + 8 < base);
6733 /* Test for DMA addresses > 40-bit */
6734 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6735 int len)
6737 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6738 if (tg3_flag(tp, 40BIT_DMA_BUG))
6739 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6740 return 0;
6741 #else
6742 return 0;
6743 #endif
6746 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6747 dma_addr_t mapping, u32 len, u32 flags,
6748 u32 mss, u32 vlan)
6750 txbd->addr_hi = ((u64) mapping >> 32);
6751 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6752 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6753 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6756 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6757 dma_addr_t map, u32 len, u32 flags,
6758 u32 mss, u32 vlan)
6760 struct tg3 *tp = tnapi->tp;
6761 bool hwbug = false;
6763 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6764 hwbug = true;
6766 if (tg3_4g_overflow_test(map, len))
6767 hwbug = true;
6769 if (tg3_40bit_overflow_test(tp, map, len))
6770 hwbug = true;
6772 if (tp->dma_limit) {
6773 u32 prvidx = *entry;
6774 u32 tmp_flag = flags & ~TXD_FLAG_END;
6775 while (len > tp->dma_limit && *budget) {
6776 u32 frag_len = tp->dma_limit;
6777 len -= tp->dma_limit;
6779 /* Avoid the 8byte DMA problem */
6780 if (len <= 8) {
6781 len += tp->dma_limit / 2;
6782 frag_len = tp->dma_limit / 2;
6785 tnapi->tx_buffers[*entry].fragmented = true;
6787 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6788 frag_len, tmp_flag, mss, vlan);
6789 *budget -= 1;
6790 prvidx = *entry;
6791 *entry = NEXT_TX(*entry);
6793 map += frag_len;
6796 if (len) {
6797 if (*budget) {
6798 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6799 len, flags, mss, vlan);
6800 *budget -= 1;
6801 *entry = NEXT_TX(*entry);
6802 } else {
6803 hwbug = true;
6804 tnapi->tx_buffers[prvidx].fragmented = false;
6807 } else {
6808 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6809 len, flags, mss, vlan);
6810 *entry = NEXT_TX(*entry);
6813 return hwbug;
6816 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6818 int i;
6819 struct sk_buff *skb;
6820 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6822 skb = txb->skb;
6823 txb->skb = NULL;
6825 pci_unmap_single(tnapi->tp->pdev,
6826 dma_unmap_addr(txb, mapping),
6827 skb_headlen(skb),
6828 PCI_DMA_TODEVICE);
6830 while (txb->fragmented) {
6831 txb->fragmented = false;
6832 entry = NEXT_TX(entry);
6833 txb = &tnapi->tx_buffers[entry];
6836 for (i = 0; i <= last; i++) {
6837 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6839 entry = NEXT_TX(entry);
6840 txb = &tnapi->tx_buffers[entry];
6842 pci_unmap_page(tnapi->tp->pdev,
6843 dma_unmap_addr(txb, mapping),
6844 skb_frag_size(frag), PCI_DMA_TODEVICE);
6846 while (txb->fragmented) {
6847 txb->fragmented = false;
6848 entry = NEXT_TX(entry);
6849 txb = &tnapi->tx_buffers[entry];
6854 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6855 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6856 struct sk_buff **pskb,
6857 u32 *entry, u32 *budget,
6858 u32 base_flags, u32 mss, u32 vlan)
6860 struct tg3 *tp = tnapi->tp;
6861 struct sk_buff *new_skb, *skb = *pskb;
6862 dma_addr_t new_addr = 0;
6863 int ret = 0;
6865 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6866 new_skb = skb_copy(skb, GFP_ATOMIC);
6867 else {
6868 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6870 new_skb = skb_copy_expand(skb,
6871 skb_headroom(skb) + more_headroom,
6872 skb_tailroom(skb), GFP_ATOMIC);
6875 if (!new_skb) {
6876 ret = -1;
6877 } else {
6878 /* New SKB is guaranteed to be linear. */
6879 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6880 PCI_DMA_TODEVICE);
6881 /* Make sure the mapping succeeded */
6882 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6883 dev_kfree_skb(new_skb);
6884 ret = -1;
6885 } else {
6886 u32 save_entry = *entry;
6888 base_flags |= TXD_FLAG_END;
6890 tnapi->tx_buffers[*entry].skb = new_skb;
6891 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6892 mapping, new_addr);
6894 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6895 new_skb->len, base_flags,
6896 mss, vlan)) {
6897 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6898 dev_kfree_skb(new_skb);
6899 ret = -1;
6904 dev_kfree_skb(skb);
6905 *pskb = new_skb;
6906 return ret;
6909 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6911 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6912 * TSO header is greater than 80 bytes.
6914 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6916 struct sk_buff *segs, *nskb;
6917 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6919 /* Estimate the number of fragments in the worst case */
6920 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6921 netif_stop_queue(tp->dev);
6923 /* netif_tx_stop_queue() must be done before checking
6924 * checking tx index in tg3_tx_avail() below, because in
6925 * tg3_tx(), we update tx index before checking for
6926 * netif_tx_queue_stopped().
6928 smp_mb();
6929 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6930 return NETDEV_TX_BUSY;
6932 netif_wake_queue(tp->dev);
6935 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6936 if (IS_ERR(segs))
6937 goto tg3_tso_bug_end;
6939 do {
6940 nskb = segs;
6941 segs = segs->next;
6942 nskb->next = NULL;
6943 tg3_start_xmit(nskb, tp->dev);
6944 } while (segs);
6946 tg3_tso_bug_end:
6947 dev_kfree_skb(skb);
6949 return NETDEV_TX_OK;
6952 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6953 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6955 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6957 struct tg3 *tp = netdev_priv(dev);
6958 u32 len, entry, base_flags, mss, vlan = 0;
6959 u32 budget;
6960 int i = -1, would_hit_hwbug;
6961 dma_addr_t mapping;
6962 struct tg3_napi *tnapi;
6963 struct netdev_queue *txq;
6964 unsigned int last;
6966 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6967 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6968 if (tg3_flag(tp, ENABLE_TSS))
6969 tnapi++;
6971 budget = tg3_tx_avail(tnapi);
6973 /* We are running in BH disabled context with netif_tx_lock
6974 * and TX reclaim runs via tp->napi.poll inside of a software
6975 * interrupt. Furthermore, IRQ processing runs lockless so we have
6976 * no IRQ context deadlocks to worry about either. Rejoice!
6978 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6979 if (!netif_tx_queue_stopped(txq)) {
6980 netif_tx_stop_queue(txq);
6982 /* This is a hard error, log it. */
6983 netdev_err(dev,
6984 "BUG! Tx Ring full when queue awake!\n");
6986 return NETDEV_TX_BUSY;
6989 entry = tnapi->tx_prod;
6990 base_flags = 0;
6991 if (skb->ip_summed == CHECKSUM_PARTIAL)
6992 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6994 mss = skb_shinfo(skb)->gso_size;
6995 if (mss) {
6996 struct iphdr *iph;
6997 u32 tcp_opt_len, hdr_len;
6999 if (skb_header_cloned(skb) &&
7000 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7001 goto drop;
7003 iph = ip_hdr(skb);
7004 tcp_opt_len = tcp_optlen(skb);
7006 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7008 if (!skb_is_gso_v6(skb)) {
7009 iph->check = 0;
7010 iph->tot_len = htons(mss + hdr_len);
7013 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7014 tg3_flag(tp, TSO_BUG))
7015 return tg3_tso_bug(tp, skb);
7017 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7018 TXD_FLAG_CPU_POST_DMA);
7020 if (tg3_flag(tp, HW_TSO_1) ||
7021 tg3_flag(tp, HW_TSO_2) ||
7022 tg3_flag(tp, HW_TSO_3)) {
7023 tcp_hdr(skb)->check = 0;
7024 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7025 } else
7026 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7027 iph->daddr, 0,
7028 IPPROTO_TCP,
7031 if (tg3_flag(tp, HW_TSO_3)) {
7032 mss |= (hdr_len & 0xc) << 12;
7033 if (hdr_len & 0x10)
7034 base_flags |= 0x00000010;
7035 base_flags |= (hdr_len & 0x3e0) << 5;
7036 } else if (tg3_flag(tp, HW_TSO_2))
7037 mss |= hdr_len << 9;
7038 else if (tg3_flag(tp, HW_TSO_1) ||
7039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7040 if (tcp_opt_len || iph->ihl > 5) {
7041 int tsflags;
7043 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7044 mss |= (tsflags << 11);
7046 } else {
7047 if (tcp_opt_len || iph->ihl > 5) {
7048 int tsflags;
7050 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7051 base_flags |= tsflags << 12;
7056 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7057 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7058 base_flags |= TXD_FLAG_JMB_PKT;
7060 if (vlan_tx_tag_present(skb)) {
7061 base_flags |= TXD_FLAG_VLAN;
7062 vlan = vlan_tx_tag_get(skb);
7065 len = skb_headlen(skb);
7067 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7068 if (pci_dma_mapping_error(tp->pdev, mapping))
7069 goto drop;
7072 tnapi->tx_buffers[entry].skb = skb;
7073 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7075 would_hit_hwbug = 0;
7077 if (tg3_flag(tp, 5701_DMA_BUG))
7078 would_hit_hwbug = 1;
7080 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7081 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7082 mss, vlan)) {
7083 would_hit_hwbug = 1;
7084 } else if (skb_shinfo(skb)->nr_frags > 0) {
7085 u32 tmp_mss = mss;
7087 if (!tg3_flag(tp, HW_TSO_1) &&
7088 !tg3_flag(tp, HW_TSO_2) &&
7089 !tg3_flag(tp, HW_TSO_3))
7090 tmp_mss = 0;
7092 /* Now loop through additional data
7093 * fragments, and queue them.
7095 last = skb_shinfo(skb)->nr_frags - 1;
7096 for (i = 0; i <= last; i++) {
7097 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7099 len = skb_frag_size(frag);
7100 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7101 len, DMA_TO_DEVICE);
7103 tnapi->tx_buffers[entry].skb = NULL;
7104 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7105 mapping);
7106 if (dma_mapping_error(&tp->pdev->dev, mapping))
7107 goto dma_error;
7109 if (!budget ||
7110 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7111 len, base_flags |
7112 ((i == last) ? TXD_FLAG_END : 0),
7113 tmp_mss, vlan)) {
7114 would_hit_hwbug = 1;
7115 break;
7120 if (would_hit_hwbug) {
7121 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7123 /* If the workaround fails due to memory/mapping
7124 * failure, silently drop this packet.
7126 entry = tnapi->tx_prod;
7127 budget = tg3_tx_avail(tnapi);
7128 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7129 base_flags, mss, vlan))
7130 goto drop_nofree;
7133 skb_tx_timestamp(skb);
7134 netdev_tx_sent_queue(txq, skb->len);
7136 /* Sync BD data before updating mailbox */
7137 wmb();
7139 /* Packets are ready, update Tx producer idx local and on card. */
7140 tw32_tx_mbox(tnapi->prodmbox, entry);
7142 tnapi->tx_prod = entry;
7143 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7144 netif_tx_stop_queue(txq);
7146 /* netif_tx_stop_queue() must be done before checking
7147 * checking tx index in tg3_tx_avail() below, because in
7148 * tg3_tx(), we update tx index before checking for
7149 * netif_tx_queue_stopped().
7151 smp_mb();
7152 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7153 netif_tx_wake_queue(txq);
7156 mmiowb();
7157 return NETDEV_TX_OK;
7159 dma_error:
7160 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7161 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7162 drop:
7163 dev_kfree_skb(skb);
7164 drop_nofree:
7165 tp->tx_dropped++;
7166 return NETDEV_TX_OK;
7169 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7171 if (enable) {
7172 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7173 MAC_MODE_PORT_MODE_MASK);
7175 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7177 if (!tg3_flag(tp, 5705_PLUS))
7178 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7180 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7181 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7182 else
7183 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7184 } else {
7185 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7187 if (tg3_flag(tp, 5705_PLUS) ||
7188 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7189 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7190 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7193 tw32(MAC_MODE, tp->mac_mode);
7194 udelay(40);
7197 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7199 u32 val, bmcr, mac_mode, ptest = 0;
7201 tg3_phy_toggle_apd(tp, false);
7202 tg3_phy_toggle_automdix(tp, 0);
7204 if (extlpbk && tg3_phy_set_extloopbk(tp))
7205 return -EIO;
7207 bmcr = BMCR_FULLDPLX;
7208 switch (speed) {
7209 case SPEED_10:
7210 break;
7211 case SPEED_100:
7212 bmcr |= BMCR_SPEED100;
7213 break;
7214 case SPEED_1000:
7215 default:
7216 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7217 speed = SPEED_100;
7218 bmcr |= BMCR_SPEED100;
7219 } else {
7220 speed = SPEED_1000;
7221 bmcr |= BMCR_SPEED1000;
7225 if (extlpbk) {
7226 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7227 tg3_readphy(tp, MII_CTRL1000, &val);
7228 val |= CTL1000_AS_MASTER |
7229 CTL1000_ENABLE_MASTER;
7230 tg3_writephy(tp, MII_CTRL1000, val);
7231 } else {
7232 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7233 MII_TG3_FET_PTEST_TRIM_2;
7234 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7236 } else
7237 bmcr |= BMCR_LOOPBACK;
7239 tg3_writephy(tp, MII_BMCR, bmcr);
7241 /* The write needs to be flushed for the FETs */
7242 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7243 tg3_readphy(tp, MII_BMCR, &bmcr);
7245 udelay(40);
7247 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7248 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7249 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7250 MII_TG3_FET_PTEST_FRC_TX_LINK |
7251 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7253 /* The write needs to be flushed for the AC131 */
7254 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7257 /* Reset to prevent losing 1st rx packet intermittently */
7258 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7259 tg3_flag(tp, 5780_CLASS)) {
7260 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7261 udelay(10);
7262 tw32_f(MAC_RX_MODE, tp->rx_mode);
7265 mac_mode = tp->mac_mode &
7266 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7267 if (speed == SPEED_1000)
7268 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7269 else
7270 mac_mode |= MAC_MODE_PORT_MODE_MII;
7272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7273 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7275 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7276 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7277 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7278 mac_mode |= MAC_MODE_LINK_POLARITY;
7280 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7281 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7284 tw32(MAC_MODE, mac_mode);
7285 udelay(40);
7287 return 0;
7290 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7292 struct tg3 *tp = netdev_priv(dev);
7294 if (features & NETIF_F_LOOPBACK) {
7295 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7296 return;
7298 spin_lock_bh(&tp->lock);
7299 tg3_mac_loopback(tp, true);
7300 netif_carrier_on(tp->dev);
7301 spin_unlock_bh(&tp->lock);
7302 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7303 } else {
7304 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7305 return;
7307 spin_lock_bh(&tp->lock);
7308 tg3_mac_loopback(tp, false);
7309 /* Force link status check */
7310 tg3_setup_phy(tp, 1);
7311 spin_unlock_bh(&tp->lock);
7312 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7316 static netdev_features_t tg3_fix_features(struct net_device *dev,
7317 netdev_features_t features)
7319 struct tg3 *tp = netdev_priv(dev);
7321 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7322 features &= ~NETIF_F_ALL_TSO;
7324 return features;
7327 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7329 netdev_features_t changed = dev->features ^ features;
7331 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7332 tg3_set_loopback(dev, features);
7334 return 0;
7337 static void tg3_rx_prodring_free(struct tg3 *tp,
7338 struct tg3_rx_prodring_set *tpr)
7340 int i;
7342 if (tpr != &tp->napi[0].prodring) {
7343 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7344 i = (i + 1) & tp->rx_std_ring_mask)
7345 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7346 tp->rx_pkt_map_sz);
7348 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7349 for (i = tpr->rx_jmb_cons_idx;
7350 i != tpr->rx_jmb_prod_idx;
7351 i = (i + 1) & tp->rx_jmb_ring_mask) {
7352 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7353 TG3_RX_JMB_MAP_SZ);
7357 return;
7360 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7361 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7362 tp->rx_pkt_map_sz);
7364 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7365 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7366 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7367 TG3_RX_JMB_MAP_SZ);
7371 /* Initialize rx rings for packet processing.
7373 * The chip has been shut down and the driver detached from
7374 * the networking, so no interrupts or new tx packets will
7375 * end up in the driver. tp->{tx,}lock are held and thus
7376 * we may not sleep.
7378 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7379 struct tg3_rx_prodring_set *tpr)
7381 u32 i, rx_pkt_dma_sz;
7383 tpr->rx_std_cons_idx = 0;
7384 tpr->rx_std_prod_idx = 0;
7385 tpr->rx_jmb_cons_idx = 0;
7386 tpr->rx_jmb_prod_idx = 0;
7388 if (tpr != &tp->napi[0].prodring) {
7389 memset(&tpr->rx_std_buffers[0], 0,
7390 TG3_RX_STD_BUFF_RING_SIZE(tp));
7391 if (tpr->rx_jmb_buffers)
7392 memset(&tpr->rx_jmb_buffers[0], 0,
7393 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7394 goto done;
7397 /* Zero out all descriptors. */
7398 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7400 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7401 if (tg3_flag(tp, 5780_CLASS) &&
7402 tp->dev->mtu > ETH_DATA_LEN)
7403 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7404 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7406 /* Initialize invariants of the rings, we only set this
7407 * stuff once. This works because the card does not
7408 * write into the rx buffer posting rings.
7410 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7411 struct tg3_rx_buffer_desc *rxd;
7413 rxd = &tpr->rx_std[i];
7414 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7415 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7416 rxd->opaque = (RXD_OPAQUE_RING_STD |
7417 (i << RXD_OPAQUE_INDEX_SHIFT));
7420 /* Now allocate fresh SKBs for each rx ring. */
7421 for (i = 0; i < tp->rx_pending; i++) {
7422 unsigned int frag_size;
7424 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7425 &frag_size) < 0) {
7426 netdev_warn(tp->dev,
7427 "Using a smaller RX standard ring. Only "
7428 "%d out of %d buffers were allocated "
7429 "successfully\n", i, tp->rx_pending);
7430 if (i == 0)
7431 goto initfail;
7432 tp->rx_pending = i;
7433 break;
7437 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7438 goto done;
7440 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7442 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7443 goto done;
7445 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7446 struct tg3_rx_buffer_desc *rxd;
7448 rxd = &tpr->rx_jmb[i].std;
7449 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7450 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7451 RXD_FLAG_JUMBO;
7452 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7453 (i << RXD_OPAQUE_INDEX_SHIFT));
7456 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7457 unsigned int frag_size;
7459 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7460 &frag_size) < 0) {
7461 netdev_warn(tp->dev,
7462 "Using a smaller RX jumbo ring. Only %d "
7463 "out of %d buffers were allocated "
7464 "successfully\n", i, tp->rx_jumbo_pending);
7465 if (i == 0)
7466 goto initfail;
7467 tp->rx_jumbo_pending = i;
7468 break;
7472 done:
7473 return 0;
7475 initfail:
7476 tg3_rx_prodring_free(tp, tpr);
7477 return -ENOMEM;
7480 static void tg3_rx_prodring_fini(struct tg3 *tp,
7481 struct tg3_rx_prodring_set *tpr)
7483 kfree(tpr->rx_std_buffers);
7484 tpr->rx_std_buffers = NULL;
7485 kfree(tpr->rx_jmb_buffers);
7486 tpr->rx_jmb_buffers = NULL;
7487 if (tpr->rx_std) {
7488 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7489 tpr->rx_std, tpr->rx_std_mapping);
7490 tpr->rx_std = NULL;
7492 if (tpr->rx_jmb) {
7493 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7494 tpr->rx_jmb, tpr->rx_jmb_mapping);
7495 tpr->rx_jmb = NULL;
7499 static int tg3_rx_prodring_init(struct tg3 *tp,
7500 struct tg3_rx_prodring_set *tpr)
7502 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7503 GFP_KERNEL);
7504 if (!tpr->rx_std_buffers)
7505 return -ENOMEM;
7507 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7508 TG3_RX_STD_RING_BYTES(tp),
7509 &tpr->rx_std_mapping,
7510 GFP_KERNEL);
7511 if (!tpr->rx_std)
7512 goto err_out;
7514 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7515 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7516 GFP_KERNEL);
7517 if (!tpr->rx_jmb_buffers)
7518 goto err_out;
7520 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7521 TG3_RX_JMB_RING_BYTES(tp),
7522 &tpr->rx_jmb_mapping,
7523 GFP_KERNEL);
7524 if (!tpr->rx_jmb)
7525 goto err_out;
7528 return 0;
7530 err_out:
7531 tg3_rx_prodring_fini(tp, tpr);
7532 return -ENOMEM;
7535 /* Free up pending packets in all rx/tx rings.
7537 * The chip has been shut down and the driver detached from
7538 * the networking, so no interrupts or new tx packets will
7539 * end up in the driver. tp->{tx,}lock is not held and we are not
7540 * in an interrupt context and thus may sleep.
7542 static void tg3_free_rings(struct tg3 *tp)
7544 int i, j;
7546 for (j = 0; j < tp->irq_cnt; j++) {
7547 struct tg3_napi *tnapi = &tp->napi[j];
7549 tg3_rx_prodring_free(tp, &tnapi->prodring);
7551 if (!tnapi->tx_buffers)
7552 continue;
7554 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7555 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7557 if (!skb)
7558 continue;
7560 tg3_tx_skb_unmap(tnapi, i,
7561 skb_shinfo(skb)->nr_frags - 1);
7563 dev_kfree_skb_any(skb);
7565 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7569 /* Initialize tx/rx rings for packet processing.
7571 * The chip has been shut down and the driver detached from
7572 * the networking, so no interrupts or new tx packets will
7573 * end up in the driver. tp->{tx,}lock are held and thus
7574 * we may not sleep.
7576 static int tg3_init_rings(struct tg3 *tp)
7578 int i;
7580 /* Free up all the SKBs. */
7581 tg3_free_rings(tp);
7583 for (i = 0; i < tp->irq_cnt; i++) {
7584 struct tg3_napi *tnapi = &tp->napi[i];
7586 tnapi->last_tag = 0;
7587 tnapi->last_irq_tag = 0;
7588 tnapi->hw_status->status = 0;
7589 tnapi->hw_status->status_tag = 0;
7590 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7592 tnapi->tx_prod = 0;
7593 tnapi->tx_cons = 0;
7594 if (tnapi->tx_ring)
7595 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7597 tnapi->rx_rcb_ptr = 0;
7598 if (tnapi->rx_rcb)
7599 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7601 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7602 tg3_free_rings(tp);
7603 return -ENOMEM;
7607 return 0;
7610 static void tg3_mem_tx_release(struct tg3 *tp)
7612 int i;
7614 for (i = 0; i < tp->irq_max; i++) {
7615 struct tg3_napi *tnapi = &tp->napi[i];
7617 if (tnapi->tx_ring) {
7618 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7619 tnapi->tx_ring, tnapi->tx_desc_mapping);
7620 tnapi->tx_ring = NULL;
7623 kfree(tnapi->tx_buffers);
7624 tnapi->tx_buffers = NULL;
7628 static int tg3_mem_tx_acquire(struct tg3 *tp)
7630 int i;
7631 struct tg3_napi *tnapi = &tp->napi[0];
7633 /* If multivector TSS is enabled, vector 0 does not handle
7634 * tx interrupts. Don't allocate any resources for it.
7636 if (tg3_flag(tp, ENABLE_TSS))
7637 tnapi++;
7639 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7640 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7641 TG3_TX_RING_SIZE, GFP_KERNEL);
7642 if (!tnapi->tx_buffers)
7643 goto err_out;
7645 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7646 TG3_TX_RING_BYTES,
7647 &tnapi->tx_desc_mapping,
7648 GFP_KERNEL);
7649 if (!tnapi->tx_ring)
7650 goto err_out;
7653 return 0;
7655 err_out:
7656 tg3_mem_tx_release(tp);
7657 return -ENOMEM;
7660 static void tg3_mem_rx_release(struct tg3 *tp)
7662 int i;
7664 for (i = 0; i < tp->irq_max; i++) {
7665 struct tg3_napi *tnapi = &tp->napi[i];
7667 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7669 if (!tnapi->rx_rcb)
7670 continue;
7672 dma_free_coherent(&tp->pdev->dev,
7673 TG3_RX_RCB_RING_BYTES(tp),
7674 tnapi->rx_rcb,
7675 tnapi->rx_rcb_mapping);
7676 tnapi->rx_rcb = NULL;
7680 static int tg3_mem_rx_acquire(struct tg3 *tp)
7682 unsigned int i, limit;
7684 limit = tp->rxq_cnt;
7686 /* If RSS is enabled, we need a (dummy) producer ring
7687 * set on vector zero. This is the true hw prodring.
7689 if (tg3_flag(tp, ENABLE_RSS))
7690 limit++;
7692 for (i = 0; i < limit; i++) {
7693 struct tg3_napi *tnapi = &tp->napi[i];
7695 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7696 goto err_out;
7698 /* If multivector RSS is enabled, vector 0
7699 * does not handle rx or tx interrupts.
7700 * Don't allocate any resources for it.
7702 if (!i && tg3_flag(tp, ENABLE_RSS))
7703 continue;
7705 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7706 TG3_RX_RCB_RING_BYTES(tp),
7707 &tnapi->rx_rcb_mapping,
7708 GFP_KERNEL);
7709 if (!tnapi->rx_rcb)
7710 goto err_out;
7712 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7715 return 0;
7717 err_out:
7718 tg3_mem_rx_release(tp);
7719 return -ENOMEM;
7723 * Must not be invoked with interrupt sources disabled and
7724 * the hardware shutdown down.
7726 static void tg3_free_consistent(struct tg3 *tp)
7728 int i;
7730 for (i = 0; i < tp->irq_cnt; i++) {
7731 struct tg3_napi *tnapi = &tp->napi[i];
7733 if (tnapi->hw_status) {
7734 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7735 tnapi->hw_status,
7736 tnapi->status_mapping);
7737 tnapi->hw_status = NULL;
7741 tg3_mem_rx_release(tp);
7742 tg3_mem_tx_release(tp);
7744 if (tp->hw_stats) {
7745 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7746 tp->hw_stats, tp->stats_mapping);
7747 tp->hw_stats = NULL;
7752 * Must not be invoked with interrupt sources disabled and
7753 * the hardware shutdown down. Can sleep.
7755 static int tg3_alloc_consistent(struct tg3 *tp)
7757 int i;
7759 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7760 sizeof(struct tg3_hw_stats),
7761 &tp->stats_mapping,
7762 GFP_KERNEL);
7763 if (!tp->hw_stats)
7764 goto err_out;
7766 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7768 for (i = 0; i < tp->irq_cnt; i++) {
7769 struct tg3_napi *tnapi = &tp->napi[i];
7770 struct tg3_hw_status *sblk;
7772 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7773 TG3_HW_STATUS_SIZE,
7774 &tnapi->status_mapping,
7775 GFP_KERNEL);
7776 if (!tnapi->hw_status)
7777 goto err_out;
7779 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7780 sblk = tnapi->hw_status;
7782 if (tg3_flag(tp, ENABLE_RSS)) {
7783 u16 *prodptr = 0;
7786 * When RSS is enabled, the status block format changes
7787 * slightly. The "rx_jumbo_consumer", "reserved",
7788 * and "rx_mini_consumer" members get mapped to the
7789 * other three rx return ring producer indexes.
7791 switch (i) {
7792 case 1:
7793 prodptr = &sblk->idx[0].rx_producer;
7794 break;
7795 case 2:
7796 prodptr = &sblk->rx_jumbo_consumer;
7797 break;
7798 case 3:
7799 prodptr = &sblk->reserved;
7800 break;
7801 case 4:
7802 prodptr = &sblk->rx_mini_consumer;
7803 break;
7805 tnapi->rx_rcb_prod_idx = prodptr;
7806 } else {
7807 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7811 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7812 goto err_out;
7814 return 0;
7816 err_out:
7817 tg3_free_consistent(tp);
7818 return -ENOMEM;
7821 #define MAX_WAIT_CNT 1000
7823 /* To stop a block, clear the enable bit and poll till it
7824 * clears. tp->lock is held.
7826 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7828 unsigned int i;
7829 u32 val;
7831 if (tg3_flag(tp, 5705_PLUS)) {
7832 switch (ofs) {
7833 case RCVLSC_MODE:
7834 case DMAC_MODE:
7835 case MBFREE_MODE:
7836 case BUFMGR_MODE:
7837 case MEMARB_MODE:
7838 /* We can't enable/disable these bits of the
7839 * 5705/5750, just say success.
7841 return 0;
7843 default:
7844 break;
7848 val = tr32(ofs);
7849 val &= ~enable_bit;
7850 tw32_f(ofs, val);
7852 for (i = 0; i < MAX_WAIT_CNT; i++) {
7853 udelay(100);
7854 val = tr32(ofs);
7855 if ((val & enable_bit) == 0)
7856 break;
7859 if (i == MAX_WAIT_CNT && !silent) {
7860 dev_err(&tp->pdev->dev,
7861 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7862 ofs, enable_bit);
7863 return -ENODEV;
7866 return 0;
7869 /* tp->lock is held. */
7870 static int tg3_abort_hw(struct tg3 *tp, int silent)
7872 int i, err;
7874 tg3_disable_ints(tp);
7876 tp->rx_mode &= ~RX_MODE_ENABLE;
7877 tw32_f(MAC_RX_MODE, tp->rx_mode);
7878 udelay(10);
7880 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7881 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7882 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7883 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7884 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7885 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7887 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7888 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7889 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7890 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7891 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7892 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7893 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7895 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7896 tw32_f(MAC_MODE, tp->mac_mode);
7897 udelay(40);
7899 tp->tx_mode &= ~TX_MODE_ENABLE;
7900 tw32_f(MAC_TX_MODE, tp->tx_mode);
7902 for (i = 0; i < MAX_WAIT_CNT; i++) {
7903 udelay(100);
7904 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7905 break;
7907 if (i >= MAX_WAIT_CNT) {
7908 dev_err(&tp->pdev->dev,
7909 "%s timed out, TX_MODE_ENABLE will not clear "
7910 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7911 err |= -ENODEV;
7914 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7915 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7916 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7918 tw32(FTQ_RESET, 0xffffffff);
7919 tw32(FTQ_RESET, 0x00000000);
7921 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7922 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7924 for (i = 0; i < tp->irq_cnt; i++) {
7925 struct tg3_napi *tnapi = &tp->napi[i];
7926 if (tnapi->hw_status)
7927 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7930 return err;
7933 /* Save PCI command register before chip reset */
7934 static void tg3_save_pci_state(struct tg3 *tp)
7936 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7939 /* Restore PCI state after chip reset */
7940 static void tg3_restore_pci_state(struct tg3 *tp)
7942 u32 val;
7944 /* Re-enable indirect register accesses. */
7945 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7946 tp->misc_host_ctrl);
7948 /* Set MAX PCI retry to zero. */
7949 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7950 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7951 tg3_flag(tp, PCIX_MODE))
7952 val |= PCISTATE_RETRY_SAME_DMA;
7953 /* Allow reads and writes to the APE register and memory space. */
7954 if (tg3_flag(tp, ENABLE_APE))
7955 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7956 PCISTATE_ALLOW_APE_SHMEM_WR |
7957 PCISTATE_ALLOW_APE_PSPACE_WR;
7958 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7960 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7962 if (!tg3_flag(tp, PCI_EXPRESS)) {
7963 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7964 tp->pci_cacheline_sz);
7965 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7966 tp->pci_lat_timer);
7969 /* Make sure PCI-X relaxed ordering bit is clear. */
7970 if (tg3_flag(tp, PCIX_MODE)) {
7971 u16 pcix_cmd;
7973 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7974 &pcix_cmd);
7975 pcix_cmd &= ~PCI_X_CMD_ERO;
7976 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7977 pcix_cmd);
7980 if (tg3_flag(tp, 5780_CLASS)) {
7982 /* Chip reset on 5780 will reset MSI enable bit,
7983 * so need to restore it.
7985 if (tg3_flag(tp, USING_MSI)) {
7986 u16 ctrl;
7988 pci_read_config_word(tp->pdev,
7989 tp->msi_cap + PCI_MSI_FLAGS,
7990 &ctrl);
7991 pci_write_config_word(tp->pdev,
7992 tp->msi_cap + PCI_MSI_FLAGS,
7993 ctrl | PCI_MSI_FLAGS_ENABLE);
7994 val = tr32(MSGINT_MODE);
7995 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8000 /* tp->lock is held. */
8001 static int tg3_chip_reset(struct tg3 *tp)
8003 u32 val;
8004 void (*write_op)(struct tg3 *, u32, u32);
8005 int i, err;
8007 tg3_nvram_lock(tp);
8009 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8011 /* No matching tg3_nvram_unlock() after this because
8012 * chip reset below will undo the nvram lock.
8014 tp->nvram_lock_cnt = 0;
8016 /* GRC_MISC_CFG core clock reset will clear the memory
8017 * enable bit in PCI register 4 and the MSI enable bit
8018 * on some chips, so we save relevant registers here.
8020 tg3_save_pci_state(tp);
8022 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8023 tg3_flag(tp, 5755_PLUS))
8024 tw32(GRC_FASTBOOT_PC, 0);
8027 * We must avoid the readl() that normally takes place.
8028 * It locks machines, causes machine checks, and other
8029 * fun things. So, temporarily disable the 5701
8030 * hardware workaround, while we do the reset.
8032 write_op = tp->write32;
8033 if (write_op == tg3_write_flush_reg32)
8034 tp->write32 = tg3_write32;
8036 /* Prevent the irq handler from reading or writing PCI registers
8037 * during chip reset when the memory enable bit in the PCI command
8038 * register may be cleared. The chip does not generate interrupt
8039 * at this time, but the irq handler may still be called due to irq
8040 * sharing or irqpoll.
8042 tg3_flag_set(tp, CHIP_RESETTING);
8043 for (i = 0; i < tp->irq_cnt; i++) {
8044 struct tg3_napi *tnapi = &tp->napi[i];
8045 if (tnapi->hw_status) {
8046 tnapi->hw_status->status = 0;
8047 tnapi->hw_status->status_tag = 0;
8049 tnapi->last_tag = 0;
8050 tnapi->last_irq_tag = 0;
8052 smp_mb();
8054 for (i = 0; i < tp->irq_cnt; i++)
8055 synchronize_irq(tp->napi[i].irq_vec);
8057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8058 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8059 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8062 /* do the reset */
8063 val = GRC_MISC_CFG_CORECLK_RESET;
8065 if (tg3_flag(tp, PCI_EXPRESS)) {
8066 /* Force PCIe 1.0a mode */
8067 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8068 !tg3_flag(tp, 57765_PLUS) &&
8069 tr32(TG3_PCIE_PHY_TSTCTL) ==
8070 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8071 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8073 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8074 tw32(GRC_MISC_CFG, (1 << 29));
8075 val |= (1 << 29);
8079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8080 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8081 tw32(GRC_VCPU_EXT_CTRL,
8082 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8085 /* Manage gphy power for all CPMU absent PCIe devices. */
8086 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8087 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8089 tw32(GRC_MISC_CFG, val);
8091 /* restore 5701 hardware bug workaround write method */
8092 tp->write32 = write_op;
8094 /* Unfortunately, we have to delay before the PCI read back.
8095 * Some 575X chips even will not respond to a PCI cfg access
8096 * when the reset command is given to the chip.
8098 * How do these hardware designers expect things to work
8099 * properly if the PCI write is posted for a long period
8100 * of time? It is always necessary to have some method by
8101 * which a register read back can occur to push the write
8102 * out which does the reset.
8104 * For most tg3 variants the trick below was working.
8105 * Ho hum...
8107 udelay(120);
8109 /* Flush PCI posted writes. The normal MMIO registers
8110 * are inaccessible at this time so this is the only
8111 * way to make this reliably (actually, this is no longer
8112 * the case, see above). I tried to use indirect
8113 * register read/write but this upset some 5701 variants.
8115 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8117 udelay(120);
8119 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
8120 u16 val16;
8122 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8123 int i;
8124 u32 cfg_val;
8126 /* Wait for link training to complete. */
8127 for (i = 0; i < 5000; i++)
8128 udelay(100);
8130 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8131 pci_write_config_dword(tp->pdev, 0xc4,
8132 cfg_val | (1 << 15));
8135 /* Clear the "no snoop" and "relaxed ordering" bits. */
8136 pci_read_config_word(tp->pdev,
8137 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8138 &val16);
8139 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
8140 PCI_EXP_DEVCTL_NOSNOOP_EN);
8142 * Older PCIe devices only support the 128 byte
8143 * MPS setting. Enforce the restriction.
8145 if (!tg3_flag(tp, CPMU_PRESENT))
8146 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
8147 pci_write_config_word(tp->pdev,
8148 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8149 val16);
8151 /* Clear error status */
8152 pci_write_config_word(tp->pdev,
8153 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
8154 PCI_EXP_DEVSTA_CED |
8155 PCI_EXP_DEVSTA_NFED |
8156 PCI_EXP_DEVSTA_FED |
8157 PCI_EXP_DEVSTA_URD);
8160 tg3_restore_pci_state(tp);
8162 tg3_flag_clear(tp, CHIP_RESETTING);
8163 tg3_flag_clear(tp, ERROR_PROCESSED);
8165 val = 0;
8166 if (tg3_flag(tp, 5780_CLASS))
8167 val = tr32(MEMARB_MODE);
8168 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8170 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8171 tg3_stop_fw(tp);
8172 tw32(0x5000, 0x400);
8175 tw32(GRC_MODE, tp->grc_mode);
8177 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8178 val = tr32(0xc4);
8180 tw32(0xc4, val | (1 << 15));
8183 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8184 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8185 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8186 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8187 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8188 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8191 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8192 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8193 val = tp->mac_mode;
8194 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8195 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8196 val = tp->mac_mode;
8197 } else
8198 val = 0;
8200 tw32_f(MAC_MODE, val);
8201 udelay(40);
8203 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8205 err = tg3_poll_fw(tp);
8206 if (err)
8207 return err;
8209 tg3_mdio_start(tp);
8211 if (tg3_flag(tp, PCI_EXPRESS) &&
8212 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8213 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8214 !tg3_flag(tp, 57765_PLUS)) {
8215 val = tr32(0x7c00);
8217 tw32(0x7c00, val | (1 << 25));
8220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8221 val = tr32(TG3_CPMU_CLCK_ORIDE);
8222 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8225 /* Reprobe ASF enable state. */
8226 tg3_flag_clear(tp, ENABLE_ASF);
8227 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8228 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8229 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8230 u32 nic_cfg;
8232 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8233 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8234 tg3_flag_set(tp, ENABLE_ASF);
8235 tp->last_event_jiffies = jiffies;
8236 if (tg3_flag(tp, 5750_PLUS))
8237 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8241 return 0;
8244 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8245 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8247 /* tp->lock is held. */
8248 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8250 int err;
8252 tg3_stop_fw(tp);
8254 tg3_write_sig_pre_reset(tp, kind);
8256 tg3_abort_hw(tp, silent);
8257 err = tg3_chip_reset(tp);
8259 __tg3_set_mac_addr(tp, 0);
8261 tg3_write_sig_legacy(tp, kind);
8262 tg3_write_sig_post_reset(tp, kind);
8264 if (tp->hw_stats) {
8265 /* Save the stats across chip resets... */
8266 tg3_get_nstats(tp, &tp->net_stats_prev);
8267 tg3_get_estats(tp, &tp->estats_prev);
8269 /* And make sure the next sample is new data */
8270 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8273 if (err)
8274 return err;
8276 return 0;
8279 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8281 struct tg3 *tp = netdev_priv(dev);
8282 struct sockaddr *addr = p;
8283 int err = 0, skip_mac_1 = 0;
8285 if (!is_valid_ether_addr(addr->sa_data))
8286 return -EADDRNOTAVAIL;
8288 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8290 if (!netif_running(dev))
8291 return 0;
8293 if (tg3_flag(tp, ENABLE_ASF)) {
8294 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8296 addr0_high = tr32(MAC_ADDR_0_HIGH);
8297 addr0_low = tr32(MAC_ADDR_0_LOW);
8298 addr1_high = tr32(MAC_ADDR_1_HIGH);
8299 addr1_low = tr32(MAC_ADDR_1_LOW);
8301 /* Skip MAC addr 1 if ASF is using it. */
8302 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8303 !(addr1_high == 0 && addr1_low == 0))
8304 skip_mac_1 = 1;
8306 spin_lock_bh(&tp->lock);
8307 __tg3_set_mac_addr(tp, skip_mac_1);
8308 spin_unlock_bh(&tp->lock);
8310 return err;
8313 /* tp->lock is held. */
8314 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8315 dma_addr_t mapping, u32 maxlen_flags,
8316 u32 nic_addr)
8318 tg3_write_mem(tp,
8319 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8320 ((u64) mapping >> 32));
8321 tg3_write_mem(tp,
8322 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8323 ((u64) mapping & 0xffffffff));
8324 tg3_write_mem(tp,
8325 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8326 maxlen_flags);
8328 if (!tg3_flag(tp, 5705_PLUS))
8329 tg3_write_mem(tp,
8330 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8331 nic_addr);
8335 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8337 int i = 0;
8339 if (!tg3_flag(tp, ENABLE_TSS)) {
8340 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8341 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8342 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8343 } else {
8344 tw32(HOSTCC_TXCOL_TICKS, 0);
8345 tw32(HOSTCC_TXMAX_FRAMES, 0);
8346 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8348 for (; i < tp->txq_cnt; i++) {
8349 u32 reg;
8351 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8352 tw32(reg, ec->tx_coalesce_usecs);
8353 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8354 tw32(reg, ec->tx_max_coalesced_frames);
8355 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8356 tw32(reg, ec->tx_max_coalesced_frames_irq);
8360 for (; i < tp->irq_max - 1; i++) {
8361 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8362 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8363 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8367 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8369 int i = 0;
8370 u32 limit = tp->rxq_cnt;
8372 if (!tg3_flag(tp, ENABLE_RSS)) {
8373 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8374 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8375 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8376 limit--;
8377 } else {
8378 tw32(HOSTCC_RXCOL_TICKS, 0);
8379 tw32(HOSTCC_RXMAX_FRAMES, 0);
8380 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8383 for (; i < limit; i++) {
8384 u32 reg;
8386 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8387 tw32(reg, ec->rx_coalesce_usecs);
8388 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8389 tw32(reg, ec->rx_max_coalesced_frames);
8390 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8391 tw32(reg, ec->rx_max_coalesced_frames_irq);
8394 for (; i < tp->irq_max - 1; i++) {
8395 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8396 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8397 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8401 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8403 tg3_coal_tx_init(tp, ec);
8404 tg3_coal_rx_init(tp, ec);
8406 if (!tg3_flag(tp, 5705_PLUS)) {
8407 u32 val = ec->stats_block_coalesce_usecs;
8409 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8410 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8412 if (!netif_carrier_ok(tp->dev))
8413 val = 0;
8415 tw32(HOSTCC_STAT_COAL_TICKS, val);
8419 /* tp->lock is held. */
8420 static void tg3_rings_reset(struct tg3 *tp)
8422 int i;
8423 u32 stblk, txrcb, rxrcb, limit;
8424 struct tg3_napi *tnapi = &tp->napi[0];
8426 /* Disable all transmit rings but the first. */
8427 if (!tg3_flag(tp, 5705_PLUS))
8428 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8429 else if (tg3_flag(tp, 5717_PLUS))
8430 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8431 else if (tg3_flag(tp, 57765_CLASS))
8432 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8433 else
8434 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8436 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8437 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8438 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8439 BDINFO_FLAGS_DISABLED);
8442 /* Disable all receive return rings but the first. */
8443 if (tg3_flag(tp, 5717_PLUS))
8444 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8445 else if (!tg3_flag(tp, 5705_PLUS))
8446 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8447 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8448 tg3_flag(tp, 57765_CLASS))
8449 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8450 else
8451 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8453 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8454 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8455 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8456 BDINFO_FLAGS_DISABLED);
8458 /* Disable interrupts */
8459 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8460 tp->napi[0].chk_msi_cnt = 0;
8461 tp->napi[0].last_rx_cons = 0;
8462 tp->napi[0].last_tx_cons = 0;
8464 /* Zero mailbox registers. */
8465 if (tg3_flag(tp, SUPPORT_MSIX)) {
8466 for (i = 1; i < tp->irq_max; i++) {
8467 tp->napi[i].tx_prod = 0;
8468 tp->napi[i].tx_cons = 0;
8469 if (tg3_flag(tp, ENABLE_TSS))
8470 tw32_mailbox(tp->napi[i].prodmbox, 0);
8471 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8472 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8473 tp->napi[i].chk_msi_cnt = 0;
8474 tp->napi[i].last_rx_cons = 0;
8475 tp->napi[i].last_tx_cons = 0;
8477 if (!tg3_flag(tp, ENABLE_TSS))
8478 tw32_mailbox(tp->napi[0].prodmbox, 0);
8479 } else {
8480 tp->napi[0].tx_prod = 0;
8481 tp->napi[0].tx_cons = 0;
8482 tw32_mailbox(tp->napi[0].prodmbox, 0);
8483 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8486 /* Make sure the NIC-based send BD rings are disabled. */
8487 if (!tg3_flag(tp, 5705_PLUS)) {
8488 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8489 for (i = 0; i < 16; i++)
8490 tw32_tx_mbox(mbox + i * 8, 0);
8493 txrcb = NIC_SRAM_SEND_RCB;
8494 rxrcb = NIC_SRAM_RCV_RET_RCB;
8496 /* Clear status block in ram. */
8497 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8499 /* Set status block DMA address */
8500 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8501 ((u64) tnapi->status_mapping >> 32));
8502 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8503 ((u64) tnapi->status_mapping & 0xffffffff));
8505 if (tnapi->tx_ring) {
8506 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8507 (TG3_TX_RING_SIZE <<
8508 BDINFO_FLAGS_MAXLEN_SHIFT),
8509 NIC_SRAM_TX_BUFFER_DESC);
8510 txrcb += TG3_BDINFO_SIZE;
8513 if (tnapi->rx_rcb) {
8514 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8515 (tp->rx_ret_ring_mask + 1) <<
8516 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8517 rxrcb += TG3_BDINFO_SIZE;
8520 stblk = HOSTCC_STATBLCK_RING1;
8522 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8523 u64 mapping = (u64)tnapi->status_mapping;
8524 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8525 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8527 /* Clear status block in ram. */
8528 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8530 if (tnapi->tx_ring) {
8531 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8532 (TG3_TX_RING_SIZE <<
8533 BDINFO_FLAGS_MAXLEN_SHIFT),
8534 NIC_SRAM_TX_BUFFER_DESC);
8535 txrcb += TG3_BDINFO_SIZE;
8538 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8539 ((tp->rx_ret_ring_mask + 1) <<
8540 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8542 stblk += 8;
8543 rxrcb += TG3_BDINFO_SIZE;
8547 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8549 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8551 if (!tg3_flag(tp, 5750_PLUS) ||
8552 tg3_flag(tp, 5780_CLASS) ||
8553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8555 tg3_flag(tp, 57765_PLUS))
8556 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8557 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8558 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8559 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8560 else
8561 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8563 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8564 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8566 val = min(nic_rep_thresh, host_rep_thresh);
8567 tw32(RCVBDI_STD_THRESH, val);
8569 if (tg3_flag(tp, 57765_PLUS))
8570 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8572 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8573 return;
8575 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8577 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8579 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8580 tw32(RCVBDI_JUMBO_THRESH, val);
8582 if (tg3_flag(tp, 57765_PLUS))
8583 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8586 static inline u32 calc_crc(unsigned char *buf, int len)
8588 u32 reg;
8589 u32 tmp;
8590 int j, k;
8592 reg = 0xffffffff;
8594 for (j = 0; j < len; j++) {
8595 reg ^= buf[j];
8597 for (k = 0; k < 8; k++) {
8598 tmp = reg & 0x01;
8600 reg >>= 1;
8602 if (tmp)
8603 reg ^= 0xedb88320;
8607 return ~reg;
8610 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8612 /* accept or reject all multicast frames */
8613 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8614 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8615 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8616 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8619 static void __tg3_set_rx_mode(struct net_device *dev)
8621 struct tg3 *tp = netdev_priv(dev);
8622 u32 rx_mode;
8624 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8625 RX_MODE_KEEP_VLAN_TAG);
8627 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8628 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8629 * flag clear.
8631 if (!tg3_flag(tp, ENABLE_ASF))
8632 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8633 #endif
8635 if (dev->flags & IFF_PROMISC) {
8636 /* Promiscuous mode. */
8637 rx_mode |= RX_MODE_PROMISC;
8638 } else if (dev->flags & IFF_ALLMULTI) {
8639 /* Accept all multicast. */
8640 tg3_set_multi(tp, 1);
8641 } else if (netdev_mc_empty(dev)) {
8642 /* Reject all multicast. */
8643 tg3_set_multi(tp, 0);
8644 } else {
8645 /* Accept one or more multicast(s). */
8646 struct netdev_hw_addr *ha;
8647 u32 mc_filter[4] = { 0, };
8648 u32 regidx;
8649 u32 bit;
8650 u32 crc;
8652 netdev_for_each_mc_addr(ha, dev) {
8653 crc = calc_crc(ha->addr, ETH_ALEN);
8654 bit = ~crc & 0x7f;
8655 regidx = (bit & 0x60) >> 5;
8656 bit &= 0x1f;
8657 mc_filter[regidx] |= (1 << bit);
8660 tw32(MAC_HASH_REG_0, mc_filter[0]);
8661 tw32(MAC_HASH_REG_1, mc_filter[1]);
8662 tw32(MAC_HASH_REG_2, mc_filter[2]);
8663 tw32(MAC_HASH_REG_3, mc_filter[3]);
8666 if (rx_mode != tp->rx_mode) {
8667 tp->rx_mode = rx_mode;
8668 tw32_f(MAC_RX_MODE, rx_mode);
8669 udelay(10);
8673 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8675 int i;
8677 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8678 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8681 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8683 int i;
8685 if (!tg3_flag(tp, SUPPORT_MSIX))
8686 return;
8688 if (tp->irq_cnt <= 2) {
8689 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8690 return;
8693 /* Validate table against current IRQ count */
8694 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8695 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8696 break;
8699 if (i != TG3_RSS_INDIR_TBL_SIZE)
8700 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8703 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8705 int i = 0;
8706 u32 reg = MAC_RSS_INDIR_TBL_0;
8708 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8709 u32 val = tp->rss_ind_tbl[i];
8710 i++;
8711 for (; i % 8; i++) {
8712 val <<= 4;
8713 val |= tp->rss_ind_tbl[i];
8715 tw32(reg, val);
8716 reg += 4;
8720 /* tp->lock is held. */
8721 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8723 u32 val, rdmac_mode;
8724 int i, err, limit;
8725 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8727 tg3_disable_ints(tp);
8729 tg3_stop_fw(tp);
8731 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8733 if (tg3_flag(tp, INIT_COMPLETE))
8734 tg3_abort_hw(tp, 1);
8736 /* Enable MAC control of LPI */
8737 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8738 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8739 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8740 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8742 tw32_f(TG3_CPMU_EEE_CTRL,
8743 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8745 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8746 TG3_CPMU_EEEMD_LPI_IN_TX |
8747 TG3_CPMU_EEEMD_LPI_IN_RX |
8748 TG3_CPMU_EEEMD_EEE_ENABLE;
8750 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8751 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8753 if (tg3_flag(tp, ENABLE_APE))
8754 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8756 tw32_f(TG3_CPMU_EEE_MODE, val);
8758 tw32_f(TG3_CPMU_EEE_DBTMR1,
8759 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8760 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8762 tw32_f(TG3_CPMU_EEE_DBTMR2,
8763 TG3_CPMU_DBTMR2_APE_TX_2047US |
8764 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8767 if (reset_phy)
8768 tg3_phy_reset(tp);
8770 err = tg3_chip_reset(tp);
8771 if (err)
8772 return err;
8774 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8776 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8777 val = tr32(TG3_CPMU_CTRL);
8778 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8779 tw32(TG3_CPMU_CTRL, val);
8781 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8782 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8783 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8784 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8786 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8787 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8788 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8789 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8791 val = tr32(TG3_CPMU_HST_ACC);
8792 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8793 val |= CPMU_HST_ACC_MACCLK_6_25;
8794 tw32(TG3_CPMU_HST_ACC, val);
8797 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8798 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8799 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8800 PCIE_PWR_MGMT_L1_THRESH_4MS;
8801 tw32(PCIE_PWR_MGMT_THRESH, val);
8803 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8804 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8806 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8808 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8809 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8812 if (tg3_flag(tp, L1PLLPD_EN)) {
8813 u32 grc_mode = tr32(GRC_MODE);
8815 /* Access the lower 1K of PL PCIE block registers. */
8816 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8817 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8819 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8820 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8821 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8823 tw32(GRC_MODE, grc_mode);
8826 if (tg3_flag(tp, 57765_CLASS)) {
8827 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8828 u32 grc_mode = tr32(GRC_MODE);
8830 /* Access the lower 1K of PL PCIE block registers. */
8831 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8832 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8834 val = tr32(TG3_PCIE_TLDLPL_PORT +
8835 TG3_PCIE_PL_LO_PHYCTL5);
8836 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8837 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8839 tw32(GRC_MODE, grc_mode);
8842 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8843 u32 grc_mode = tr32(GRC_MODE);
8845 /* Access the lower 1K of DL PCIE block registers. */
8846 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8847 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8849 val = tr32(TG3_PCIE_TLDLPL_PORT +
8850 TG3_PCIE_DL_LO_FTSMAX);
8851 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8852 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8853 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8855 tw32(GRC_MODE, grc_mode);
8858 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8859 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8860 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8861 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8864 /* This works around an issue with Athlon chipsets on
8865 * B3 tigon3 silicon. This bit has no effect on any
8866 * other revision. But do not set this on PCI Express
8867 * chips and don't even touch the clocks if the CPMU is present.
8869 if (!tg3_flag(tp, CPMU_PRESENT)) {
8870 if (!tg3_flag(tp, PCI_EXPRESS))
8871 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8872 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8875 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8876 tg3_flag(tp, PCIX_MODE)) {
8877 val = tr32(TG3PCI_PCISTATE);
8878 val |= PCISTATE_RETRY_SAME_DMA;
8879 tw32(TG3PCI_PCISTATE, val);
8882 if (tg3_flag(tp, ENABLE_APE)) {
8883 /* Allow reads and writes to the
8884 * APE register and memory space.
8886 val = tr32(TG3PCI_PCISTATE);
8887 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8888 PCISTATE_ALLOW_APE_SHMEM_WR |
8889 PCISTATE_ALLOW_APE_PSPACE_WR;
8890 tw32(TG3PCI_PCISTATE, val);
8893 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8894 /* Enable some hw fixes. */
8895 val = tr32(TG3PCI_MSI_DATA);
8896 val |= (1 << 26) | (1 << 28) | (1 << 29);
8897 tw32(TG3PCI_MSI_DATA, val);
8900 /* Descriptor ring init may make accesses to the
8901 * NIC SRAM area to setup the TX descriptors, so we
8902 * can only do this after the hardware has been
8903 * successfully reset.
8905 err = tg3_init_rings(tp);
8906 if (err)
8907 return err;
8909 if (tg3_flag(tp, 57765_PLUS)) {
8910 val = tr32(TG3PCI_DMA_RW_CTRL) &
8911 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8912 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8913 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8914 if (!tg3_flag(tp, 57765_CLASS) &&
8915 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8916 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8917 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8918 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8919 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8920 /* This value is determined during the probe time DMA
8921 * engine test, tg3_test_dma.
8923 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8926 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8927 GRC_MODE_4X_NIC_SEND_RINGS |
8928 GRC_MODE_NO_TX_PHDR_CSUM |
8929 GRC_MODE_NO_RX_PHDR_CSUM);
8930 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8932 /* Pseudo-header checksum is done by hardware logic and not
8933 * the offload processers, so make the chip do the pseudo-
8934 * header checksums on receive. For transmit it is more
8935 * convenient to do the pseudo-header checksum in software
8936 * as Linux does that on transmit for us in all cases.
8938 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8940 tw32(GRC_MODE,
8941 tp->grc_mode |
8942 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8944 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8945 val = tr32(GRC_MISC_CFG);
8946 val &= ~0xff;
8947 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8948 tw32(GRC_MISC_CFG, val);
8950 /* Initialize MBUF/DESC pool. */
8951 if (tg3_flag(tp, 5750_PLUS)) {
8952 /* Do nothing. */
8953 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8954 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8956 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8957 else
8958 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8959 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8960 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8961 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8962 int fw_len;
8964 fw_len = tp->fw_len;
8965 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8966 tw32(BUFMGR_MB_POOL_ADDR,
8967 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8968 tw32(BUFMGR_MB_POOL_SIZE,
8969 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8972 if (tp->dev->mtu <= ETH_DATA_LEN) {
8973 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8974 tp->bufmgr_config.mbuf_read_dma_low_water);
8975 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8976 tp->bufmgr_config.mbuf_mac_rx_low_water);
8977 tw32(BUFMGR_MB_HIGH_WATER,
8978 tp->bufmgr_config.mbuf_high_water);
8979 } else {
8980 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8981 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8982 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8983 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8984 tw32(BUFMGR_MB_HIGH_WATER,
8985 tp->bufmgr_config.mbuf_high_water_jumbo);
8987 tw32(BUFMGR_DMA_LOW_WATER,
8988 tp->bufmgr_config.dma_low_water);
8989 tw32(BUFMGR_DMA_HIGH_WATER,
8990 tp->bufmgr_config.dma_high_water);
8992 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8993 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8994 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8996 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8997 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8998 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8999 tw32(BUFMGR_MODE, val);
9000 for (i = 0; i < 2000; i++) {
9001 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9002 break;
9003 udelay(10);
9005 if (i >= 2000) {
9006 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9007 return -ENODEV;
9010 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9011 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9013 tg3_setup_rxbd_thresholds(tp);
9015 /* Initialize TG3_BDINFO's at:
9016 * RCVDBDI_STD_BD: standard eth size rx ring
9017 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9018 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9020 * like so:
9021 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9022 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9023 * ring attribute flags
9024 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9026 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9027 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9029 * The size of each ring is fixed in the firmware, but the location is
9030 * configurable.
9032 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9033 ((u64) tpr->rx_std_mapping >> 32));
9034 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9035 ((u64) tpr->rx_std_mapping & 0xffffffff));
9036 if (!tg3_flag(tp, 5717_PLUS))
9037 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9038 NIC_SRAM_RX_BUFFER_DESC);
9040 /* Disable the mini ring */
9041 if (!tg3_flag(tp, 5705_PLUS))
9042 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9043 BDINFO_FLAGS_DISABLED);
9045 /* Program the jumbo buffer descriptor ring control
9046 * blocks on those devices that have them.
9048 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9049 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9051 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9052 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9053 ((u64) tpr->rx_jmb_mapping >> 32));
9054 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9055 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9056 val = TG3_RX_JMB_RING_SIZE(tp) <<
9057 BDINFO_FLAGS_MAXLEN_SHIFT;
9058 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9059 val | BDINFO_FLAGS_USE_EXT_RECV);
9060 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9061 tg3_flag(tp, 57765_CLASS))
9062 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9063 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9064 } else {
9065 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9066 BDINFO_FLAGS_DISABLED);
9069 if (tg3_flag(tp, 57765_PLUS)) {
9070 val = TG3_RX_STD_RING_SIZE(tp);
9071 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9072 val |= (TG3_RX_STD_DMA_SZ << 2);
9073 } else
9074 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9075 } else
9076 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9078 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9080 tpr->rx_std_prod_idx = tp->rx_pending;
9081 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9083 tpr->rx_jmb_prod_idx =
9084 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9085 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9087 tg3_rings_reset(tp);
9089 /* Initialize MAC address and backoff seed. */
9090 __tg3_set_mac_addr(tp, 0);
9092 /* MTU + ethernet header + FCS + optional VLAN tag */
9093 tw32(MAC_RX_MTU_SIZE,
9094 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9096 /* The slot time is changed by tg3_setup_phy if we
9097 * run at gigabit with half duplex.
9099 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9100 (6 << TX_LENGTHS_IPG_SHIFT) |
9101 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9104 val |= tr32(MAC_TX_LENGTHS) &
9105 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9106 TX_LENGTHS_CNT_DWN_VAL_MSK);
9108 tw32(MAC_TX_LENGTHS, val);
9110 /* Receive rules. */
9111 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9112 tw32(RCVLPC_CONFIG, 0x0181);
9114 /* Calculate RDMAC_MODE setting early, we need it to determine
9115 * the RCVLPC_STATE_ENABLE mask.
9117 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9118 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9119 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9120 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9121 RDMAC_MODE_LNGREAD_ENAB);
9123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9124 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9126 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9127 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9129 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9130 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9131 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9134 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9135 if (tg3_flag(tp, TSO_CAPABLE) &&
9136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9137 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9138 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9139 !tg3_flag(tp, IS_5788)) {
9140 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9144 if (tg3_flag(tp, PCI_EXPRESS))
9145 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9147 if (tg3_flag(tp, HW_TSO_1) ||
9148 tg3_flag(tp, HW_TSO_2) ||
9149 tg3_flag(tp, HW_TSO_3))
9150 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9152 if (tg3_flag(tp, 57765_PLUS) ||
9153 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9154 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9155 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9158 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9162 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9163 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9164 tg3_flag(tp, 57765_PLUS)) {
9165 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9166 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9167 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9168 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9169 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9170 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9171 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9172 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9174 tw32(TG3_RDMA_RSRVCTRL_REG,
9175 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9178 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9180 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9181 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9182 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9183 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9186 /* Receive/send statistics. */
9187 if (tg3_flag(tp, 5750_PLUS)) {
9188 val = tr32(RCVLPC_STATS_ENABLE);
9189 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9190 tw32(RCVLPC_STATS_ENABLE, val);
9191 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9192 tg3_flag(tp, TSO_CAPABLE)) {
9193 val = tr32(RCVLPC_STATS_ENABLE);
9194 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9195 tw32(RCVLPC_STATS_ENABLE, val);
9196 } else {
9197 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9199 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9200 tw32(SNDDATAI_STATSENAB, 0xffffff);
9201 tw32(SNDDATAI_STATSCTRL,
9202 (SNDDATAI_SCTRL_ENABLE |
9203 SNDDATAI_SCTRL_FASTUPD));
9205 /* Setup host coalescing engine. */
9206 tw32(HOSTCC_MODE, 0);
9207 for (i = 0; i < 2000; i++) {
9208 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9209 break;
9210 udelay(10);
9213 __tg3_set_coalesce(tp, &tp->coal);
9215 if (!tg3_flag(tp, 5705_PLUS)) {
9216 /* Status/statistics block address. See tg3_timer,
9217 * the tg3_periodic_fetch_stats call there, and
9218 * tg3_get_stats to see how this works for 5705/5750 chips.
9220 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9221 ((u64) tp->stats_mapping >> 32));
9222 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9223 ((u64) tp->stats_mapping & 0xffffffff));
9224 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9226 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9228 /* Clear statistics and status block memory areas */
9229 for (i = NIC_SRAM_STATS_BLK;
9230 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9231 i += sizeof(u32)) {
9232 tg3_write_mem(tp, i, 0);
9233 udelay(40);
9237 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9239 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9240 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9241 if (!tg3_flag(tp, 5705_PLUS))
9242 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9244 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9245 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9246 /* reset to prevent losing 1st rx packet intermittently */
9247 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9248 udelay(10);
9251 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9252 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9253 MAC_MODE_FHDE_ENABLE;
9254 if (tg3_flag(tp, ENABLE_APE))
9255 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9256 if (!tg3_flag(tp, 5705_PLUS) &&
9257 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9258 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9259 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9260 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9261 udelay(40);
9263 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9264 * If TG3_FLAG_IS_NIC is zero, we should read the
9265 * register to preserve the GPIO settings for LOMs. The GPIOs,
9266 * whether used as inputs or outputs, are set by boot code after
9267 * reset.
9269 if (!tg3_flag(tp, IS_NIC)) {
9270 u32 gpio_mask;
9272 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9273 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9274 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9277 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9278 GRC_LCLCTRL_GPIO_OUTPUT3;
9280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9281 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9283 tp->grc_local_ctrl &= ~gpio_mask;
9284 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9286 /* GPIO1 must be driven high for eeprom write protect */
9287 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9288 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9289 GRC_LCLCTRL_GPIO_OUTPUT1);
9291 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9292 udelay(100);
9294 if (tg3_flag(tp, USING_MSIX)) {
9295 val = tr32(MSGINT_MODE);
9296 val |= MSGINT_MODE_ENABLE;
9297 if (tp->irq_cnt > 1)
9298 val |= MSGINT_MODE_MULTIVEC_EN;
9299 if (!tg3_flag(tp, 1SHOT_MSI))
9300 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9301 tw32(MSGINT_MODE, val);
9304 if (!tg3_flag(tp, 5705_PLUS)) {
9305 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9306 udelay(40);
9309 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9310 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9311 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9312 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9313 WDMAC_MODE_LNGREAD_ENAB);
9315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9316 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9317 if (tg3_flag(tp, TSO_CAPABLE) &&
9318 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9319 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9320 /* nothing */
9321 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9322 !tg3_flag(tp, IS_5788)) {
9323 val |= WDMAC_MODE_RX_ACCEL;
9327 /* Enable host coalescing bug fix */
9328 if (tg3_flag(tp, 5755_PLUS))
9329 val |= WDMAC_MODE_STATUS_TAG_FIX;
9331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9332 val |= WDMAC_MODE_BURST_ALL_DATA;
9334 tw32_f(WDMAC_MODE, val);
9335 udelay(40);
9337 if (tg3_flag(tp, PCIX_MODE)) {
9338 u16 pcix_cmd;
9340 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9341 &pcix_cmd);
9342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9343 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9344 pcix_cmd |= PCI_X_CMD_READ_2K;
9345 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9346 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9347 pcix_cmd |= PCI_X_CMD_READ_2K;
9349 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9350 pcix_cmd);
9353 tw32_f(RDMAC_MODE, rdmac_mode);
9354 udelay(40);
9356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9357 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9358 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9359 break;
9361 if (i < TG3_NUM_RDMA_CHANNELS) {
9362 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9363 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9364 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9365 tg3_flag_set(tp, 5719_RDMA_BUG);
9369 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9370 if (!tg3_flag(tp, 5705_PLUS))
9371 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9373 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9374 tw32(SNDDATAC_MODE,
9375 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9376 else
9377 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9379 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9380 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9381 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9382 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9383 val |= RCVDBDI_MODE_LRG_RING_SZ;
9384 tw32(RCVDBDI_MODE, val);
9385 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9386 if (tg3_flag(tp, HW_TSO_1) ||
9387 tg3_flag(tp, HW_TSO_2) ||
9388 tg3_flag(tp, HW_TSO_3))
9389 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9390 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9391 if (tg3_flag(tp, ENABLE_TSS))
9392 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9393 tw32(SNDBDI_MODE, val);
9394 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9396 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9397 err = tg3_load_5701_a0_firmware_fix(tp);
9398 if (err)
9399 return err;
9402 if (tg3_flag(tp, TSO_CAPABLE)) {
9403 err = tg3_load_tso_firmware(tp);
9404 if (err)
9405 return err;
9408 tp->tx_mode = TX_MODE_ENABLE;
9410 if (tg3_flag(tp, 5755_PLUS) ||
9411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9412 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9414 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9415 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9416 tp->tx_mode &= ~val;
9417 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9420 tw32_f(MAC_TX_MODE, tp->tx_mode);
9421 udelay(100);
9423 if (tg3_flag(tp, ENABLE_RSS)) {
9424 tg3_rss_write_indir_tbl(tp);
9426 /* Setup the "secret" hash key. */
9427 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9428 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9429 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9430 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9431 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9432 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9433 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9434 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9435 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9436 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9439 tp->rx_mode = RX_MODE_ENABLE;
9440 if (tg3_flag(tp, 5755_PLUS))
9441 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9443 if (tg3_flag(tp, ENABLE_RSS))
9444 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9445 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9446 RX_MODE_RSS_IPV6_HASH_EN |
9447 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9448 RX_MODE_RSS_IPV4_HASH_EN |
9449 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9451 tw32_f(MAC_RX_MODE, tp->rx_mode);
9452 udelay(10);
9454 tw32(MAC_LED_CTRL, tp->led_ctrl);
9456 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9457 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9458 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9459 udelay(10);
9461 tw32_f(MAC_RX_MODE, tp->rx_mode);
9462 udelay(10);
9464 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9465 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9466 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9467 /* Set drive transmission level to 1.2V */
9468 /* only if the signal pre-emphasis bit is not set */
9469 val = tr32(MAC_SERDES_CFG);
9470 val &= 0xfffff000;
9471 val |= 0x880;
9472 tw32(MAC_SERDES_CFG, val);
9474 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9475 tw32(MAC_SERDES_CFG, 0x616000);
9478 /* Prevent chip from dropping frames when flow control
9479 * is enabled.
9481 if (tg3_flag(tp, 57765_CLASS))
9482 val = 1;
9483 else
9484 val = 2;
9485 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9488 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9489 /* Use hardware link auto-negotiation */
9490 tg3_flag_set(tp, HW_AUTONEG);
9493 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9494 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9495 u32 tmp;
9497 tmp = tr32(SERDES_RX_CTRL);
9498 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9499 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9500 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9501 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9504 if (!tg3_flag(tp, USE_PHYLIB)) {
9505 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9506 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9508 err = tg3_setup_phy(tp, 0);
9509 if (err)
9510 return err;
9512 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9513 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9514 u32 tmp;
9516 /* Clear CRC stats. */
9517 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9518 tg3_writephy(tp, MII_TG3_TEST1,
9519 tmp | MII_TG3_TEST1_CRC_EN);
9520 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9525 __tg3_set_rx_mode(tp->dev);
9527 /* Initialize receive rules. */
9528 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9529 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9530 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9531 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9533 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9534 limit = 8;
9535 else
9536 limit = 16;
9537 if (tg3_flag(tp, ENABLE_ASF))
9538 limit -= 4;
9539 switch (limit) {
9540 case 16:
9541 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9542 case 15:
9543 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9544 case 14:
9545 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9546 case 13:
9547 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9548 case 12:
9549 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9550 case 11:
9551 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9552 case 10:
9553 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9554 case 9:
9555 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9556 case 8:
9557 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9558 case 7:
9559 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9560 case 6:
9561 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9562 case 5:
9563 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9564 case 4:
9565 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9566 case 3:
9567 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9568 case 2:
9569 case 1:
9571 default:
9572 break;
9575 if (tg3_flag(tp, ENABLE_APE))
9576 /* Write our heartbeat update interval to APE. */
9577 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9578 APE_HOST_HEARTBEAT_INT_DISABLE);
9580 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9582 return 0;
9585 /* Called at device open time to get the chip ready for
9586 * packet processing. Invoked with tp->lock held.
9588 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9590 tg3_switch_clocks(tp);
9592 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9594 return tg3_reset_hw(tp, reset_phy);
9597 #if IS_ENABLED(CONFIG_HWMON)
9598 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9600 int i;
9602 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9603 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9605 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9606 off += len;
9608 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9609 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9610 memset(ocir, 0, TG3_OCIR_LEN);
9614 /* sysfs attributes for hwmon */
9615 static ssize_t tg3_show_temp(struct device *dev,
9616 struct device_attribute *devattr, char *buf)
9618 struct pci_dev *pdev = to_pci_dev(dev);
9619 struct net_device *netdev = pci_get_drvdata(pdev);
9620 struct tg3 *tp = netdev_priv(netdev);
9621 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9622 u32 temperature;
9624 spin_lock_bh(&tp->lock);
9625 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9626 sizeof(temperature));
9627 spin_unlock_bh(&tp->lock);
9628 return sprintf(buf, "%u\n", temperature);
9632 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9633 TG3_TEMP_SENSOR_OFFSET);
9634 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9635 TG3_TEMP_CAUTION_OFFSET);
9636 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9637 TG3_TEMP_MAX_OFFSET);
9639 static struct attribute *tg3_attributes[] = {
9640 &sensor_dev_attr_temp1_input.dev_attr.attr,
9641 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9642 &sensor_dev_attr_temp1_max.dev_attr.attr,
9643 NULL
9646 static const struct attribute_group tg3_group = {
9647 .attrs = tg3_attributes,
9650 #endif
9652 static void tg3_hwmon_close(struct tg3 *tp)
9654 #if IS_ENABLED(CONFIG_HWMON)
9655 if (tp->hwmon_dev) {
9656 hwmon_device_unregister(tp->hwmon_dev);
9657 tp->hwmon_dev = NULL;
9658 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9660 #endif
9663 static void tg3_hwmon_open(struct tg3 *tp)
9665 #if IS_ENABLED(CONFIG_HWMON)
9666 int i, err;
9667 u32 size = 0;
9668 struct pci_dev *pdev = tp->pdev;
9669 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9671 tg3_sd_scan_scratchpad(tp, ocirs);
9673 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9674 if (!ocirs[i].src_data_length)
9675 continue;
9677 size += ocirs[i].src_hdr_length;
9678 size += ocirs[i].src_data_length;
9681 if (!size)
9682 return;
9684 /* Register hwmon sysfs hooks */
9685 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9686 if (err) {
9687 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9688 return;
9691 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9692 if (IS_ERR(tp->hwmon_dev)) {
9693 tp->hwmon_dev = NULL;
9694 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9695 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9697 #endif
9701 #define TG3_STAT_ADD32(PSTAT, REG) \
9702 do { u32 __val = tr32(REG); \
9703 (PSTAT)->low += __val; \
9704 if ((PSTAT)->low < __val) \
9705 (PSTAT)->high += 1; \
9706 } while (0)
9708 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9710 struct tg3_hw_stats *sp = tp->hw_stats;
9712 if (!netif_carrier_ok(tp->dev))
9713 return;
9715 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9716 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9717 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9718 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9719 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9720 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9721 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9722 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9723 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9724 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9725 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9726 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9727 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9728 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9729 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9730 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9731 u32 val;
9733 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9734 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9735 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9736 tg3_flag_clear(tp, 5719_RDMA_BUG);
9739 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9740 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9741 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9742 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9743 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9744 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9745 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9746 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9747 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9748 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9749 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9750 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9751 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9752 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9754 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9755 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9756 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9757 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9758 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9759 } else {
9760 u32 val = tr32(HOSTCC_FLOW_ATTN);
9761 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9762 if (val) {
9763 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9764 sp->rx_discards.low += val;
9765 if (sp->rx_discards.low < val)
9766 sp->rx_discards.high += 1;
9768 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9770 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9773 static void tg3_chk_missed_msi(struct tg3 *tp)
9775 u32 i;
9777 for (i = 0; i < tp->irq_cnt; i++) {
9778 struct tg3_napi *tnapi = &tp->napi[i];
9780 if (tg3_has_work(tnapi)) {
9781 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9782 tnapi->last_tx_cons == tnapi->tx_cons) {
9783 if (tnapi->chk_msi_cnt < 1) {
9784 tnapi->chk_msi_cnt++;
9785 return;
9787 tg3_msi(0, tnapi);
9790 tnapi->chk_msi_cnt = 0;
9791 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9792 tnapi->last_tx_cons = tnapi->tx_cons;
9796 static void tg3_timer(unsigned long __opaque)
9798 struct tg3 *tp = (struct tg3 *) __opaque;
9800 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9801 goto restart_timer;
9803 spin_lock(&tp->lock);
9805 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9806 tg3_flag(tp, 57765_CLASS))
9807 tg3_chk_missed_msi(tp);
9809 if (!tg3_flag(tp, TAGGED_STATUS)) {
9810 /* All of this garbage is because when using non-tagged
9811 * IRQ status the mailbox/status_block protocol the chip
9812 * uses with the cpu is race prone.
9814 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9815 tw32(GRC_LOCAL_CTRL,
9816 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9817 } else {
9818 tw32(HOSTCC_MODE, tp->coalesce_mode |
9819 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9822 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9823 spin_unlock(&tp->lock);
9824 tg3_reset_task_schedule(tp);
9825 goto restart_timer;
9829 /* This part only runs once per second. */
9830 if (!--tp->timer_counter) {
9831 if (tg3_flag(tp, 5705_PLUS))
9832 tg3_periodic_fetch_stats(tp);
9834 if (tp->setlpicnt && !--tp->setlpicnt)
9835 tg3_phy_eee_enable(tp);
9837 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9838 u32 mac_stat;
9839 int phy_event;
9841 mac_stat = tr32(MAC_STATUS);
9843 phy_event = 0;
9844 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9845 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9846 phy_event = 1;
9847 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9848 phy_event = 1;
9850 if (phy_event)
9851 tg3_setup_phy(tp, 0);
9852 } else if (tg3_flag(tp, POLL_SERDES)) {
9853 u32 mac_stat = tr32(MAC_STATUS);
9854 int need_setup = 0;
9856 if (netif_carrier_ok(tp->dev) &&
9857 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9858 need_setup = 1;
9860 if (!netif_carrier_ok(tp->dev) &&
9861 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9862 MAC_STATUS_SIGNAL_DET))) {
9863 need_setup = 1;
9865 if (need_setup) {
9866 if (!tp->serdes_counter) {
9867 tw32_f(MAC_MODE,
9868 (tp->mac_mode &
9869 ~MAC_MODE_PORT_MODE_MASK));
9870 udelay(40);
9871 tw32_f(MAC_MODE, tp->mac_mode);
9872 udelay(40);
9874 tg3_setup_phy(tp, 0);
9876 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9877 tg3_flag(tp, 5780_CLASS)) {
9878 tg3_serdes_parallel_detect(tp);
9881 tp->timer_counter = tp->timer_multiplier;
9884 /* Heartbeat is only sent once every 2 seconds.
9886 * The heartbeat is to tell the ASF firmware that the host
9887 * driver is still alive. In the event that the OS crashes,
9888 * ASF needs to reset the hardware to free up the FIFO space
9889 * that may be filled with rx packets destined for the host.
9890 * If the FIFO is full, ASF will no longer function properly.
9892 * Unintended resets have been reported on real time kernels
9893 * where the timer doesn't run on time. Netpoll will also have
9894 * same problem.
9896 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9897 * to check the ring condition when the heartbeat is expiring
9898 * before doing the reset. This will prevent most unintended
9899 * resets.
9901 if (!--tp->asf_counter) {
9902 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9903 tg3_wait_for_event_ack(tp);
9905 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9906 FWCMD_NICDRV_ALIVE3);
9907 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9908 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9909 TG3_FW_UPDATE_TIMEOUT_SEC);
9911 tg3_generate_fw_event(tp);
9913 tp->asf_counter = tp->asf_multiplier;
9916 spin_unlock(&tp->lock);
9918 restart_timer:
9919 tp->timer.expires = jiffies + tp->timer_offset;
9920 add_timer(&tp->timer);
9923 static void __devinit tg3_timer_init(struct tg3 *tp)
9925 if (tg3_flag(tp, TAGGED_STATUS) &&
9926 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9927 !tg3_flag(tp, 57765_CLASS))
9928 tp->timer_offset = HZ;
9929 else
9930 tp->timer_offset = HZ / 10;
9932 BUG_ON(tp->timer_offset > HZ);
9934 tp->timer_multiplier = (HZ / tp->timer_offset);
9935 tp->asf_multiplier = (HZ / tp->timer_offset) *
9936 TG3_FW_UPDATE_FREQ_SEC;
9938 init_timer(&tp->timer);
9939 tp->timer.data = (unsigned long) tp;
9940 tp->timer.function = tg3_timer;
9943 static void tg3_timer_start(struct tg3 *tp)
9945 tp->asf_counter = tp->asf_multiplier;
9946 tp->timer_counter = tp->timer_multiplier;
9948 tp->timer.expires = jiffies + tp->timer_offset;
9949 add_timer(&tp->timer);
9952 static void tg3_timer_stop(struct tg3 *tp)
9954 del_timer_sync(&tp->timer);
9957 /* Restart hardware after configuration changes, self-test, etc.
9958 * Invoked with tp->lock held.
9960 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9961 __releases(tp->lock)
9962 __acquires(tp->lock)
9964 int err;
9966 err = tg3_init_hw(tp, reset_phy);
9967 if (err) {
9968 netdev_err(tp->dev,
9969 "Failed to re-initialize device, aborting\n");
9970 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9971 tg3_full_unlock(tp);
9972 tg3_timer_stop(tp);
9973 tp->irq_sync = 0;
9974 tg3_napi_enable(tp);
9975 dev_close(tp->dev);
9976 tg3_full_lock(tp, 0);
9978 return err;
9981 static void tg3_reset_task(struct work_struct *work)
9983 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9984 int err;
9986 tg3_full_lock(tp, 0);
9988 if (!netif_running(tp->dev)) {
9989 tg3_flag_clear(tp, RESET_TASK_PENDING);
9990 tg3_full_unlock(tp);
9991 return;
9994 tg3_full_unlock(tp);
9996 tg3_phy_stop(tp);
9998 tg3_netif_stop(tp);
10000 tg3_full_lock(tp, 1);
10002 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10003 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10004 tp->write32_rx_mbox = tg3_write_flush_reg32;
10005 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10006 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10009 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10010 err = tg3_init_hw(tp, 1);
10011 if (err)
10012 goto out;
10014 tg3_netif_start(tp);
10016 out:
10017 tg3_full_unlock(tp);
10019 if (!err)
10020 tg3_phy_start(tp);
10022 tg3_flag_clear(tp, RESET_TASK_PENDING);
10025 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10027 irq_handler_t fn;
10028 unsigned long flags;
10029 char *name;
10030 struct tg3_napi *tnapi = &tp->napi[irq_num];
10032 if (tp->irq_cnt == 1)
10033 name = tp->dev->name;
10034 else {
10035 name = &tnapi->irq_lbl[0];
10036 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10037 name[IFNAMSIZ-1] = 0;
10040 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10041 fn = tg3_msi;
10042 if (tg3_flag(tp, 1SHOT_MSI))
10043 fn = tg3_msi_1shot;
10044 flags = 0;
10045 } else {
10046 fn = tg3_interrupt;
10047 if (tg3_flag(tp, TAGGED_STATUS))
10048 fn = tg3_interrupt_tagged;
10049 flags = IRQF_SHARED;
10052 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10055 static int tg3_test_interrupt(struct tg3 *tp)
10057 struct tg3_napi *tnapi = &tp->napi[0];
10058 struct net_device *dev = tp->dev;
10059 int err, i, intr_ok = 0;
10060 u32 val;
10062 if (!netif_running(dev))
10063 return -ENODEV;
10065 tg3_disable_ints(tp);
10067 free_irq(tnapi->irq_vec, tnapi);
10070 * Turn off MSI one shot mode. Otherwise this test has no
10071 * observable way to know whether the interrupt was delivered.
10073 if (tg3_flag(tp, 57765_PLUS)) {
10074 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10075 tw32(MSGINT_MODE, val);
10078 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10079 IRQF_SHARED, dev->name, tnapi);
10080 if (err)
10081 return err;
10083 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10084 tg3_enable_ints(tp);
10086 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10087 tnapi->coal_now);
10089 for (i = 0; i < 5; i++) {
10090 u32 int_mbox, misc_host_ctrl;
10092 int_mbox = tr32_mailbox(tnapi->int_mbox);
10093 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10095 if ((int_mbox != 0) ||
10096 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10097 intr_ok = 1;
10098 break;
10101 if (tg3_flag(tp, 57765_PLUS) &&
10102 tnapi->hw_status->status_tag != tnapi->last_tag)
10103 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10105 msleep(10);
10108 tg3_disable_ints(tp);
10110 free_irq(tnapi->irq_vec, tnapi);
10112 err = tg3_request_irq(tp, 0);
10114 if (err)
10115 return err;
10117 if (intr_ok) {
10118 /* Reenable MSI one shot mode. */
10119 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10120 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10121 tw32(MSGINT_MODE, val);
10123 return 0;
10126 return -EIO;
10129 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10130 * successfully restored
10132 static int tg3_test_msi(struct tg3 *tp)
10134 int err;
10135 u16 pci_cmd;
10137 if (!tg3_flag(tp, USING_MSI))
10138 return 0;
10140 /* Turn off SERR reporting in case MSI terminates with Master
10141 * Abort.
10143 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10144 pci_write_config_word(tp->pdev, PCI_COMMAND,
10145 pci_cmd & ~PCI_COMMAND_SERR);
10147 err = tg3_test_interrupt(tp);
10149 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10151 if (!err)
10152 return 0;
10154 /* other failures */
10155 if (err != -EIO)
10156 return err;
10158 /* MSI test failed, go back to INTx mode */
10159 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10160 "to INTx mode. Please report this failure to the PCI "
10161 "maintainer and include system chipset information\n");
10163 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10165 pci_disable_msi(tp->pdev);
10167 tg3_flag_clear(tp, USING_MSI);
10168 tp->napi[0].irq_vec = tp->pdev->irq;
10170 err = tg3_request_irq(tp, 0);
10171 if (err)
10172 return err;
10174 /* Need to reset the chip because the MSI cycle may have terminated
10175 * with Master Abort.
10177 tg3_full_lock(tp, 1);
10179 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10180 err = tg3_init_hw(tp, 1);
10182 tg3_full_unlock(tp);
10184 if (err)
10185 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10187 return err;
10190 static int tg3_request_firmware(struct tg3 *tp)
10192 const __be32 *fw_data;
10194 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10195 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10196 tp->fw_needed);
10197 return -ENOENT;
10200 fw_data = (void *)tp->fw->data;
10202 /* Firmware blob starts with version numbers, followed by
10203 * start address and _full_ length including BSS sections
10204 * (which must be longer than the actual data, of course
10207 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10208 if (tp->fw_len < (tp->fw->size - 12)) {
10209 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10210 tp->fw_len, tp->fw_needed);
10211 release_firmware(tp->fw);
10212 tp->fw = NULL;
10213 return -EINVAL;
10216 /* We no longer need firmware; we have it. */
10217 tp->fw_needed = NULL;
10218 return 0;
10221 static u32 tg3_irq_count(struct tg3 *tp)
10223 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10225 if (irq_cnt > 1) {
10226 /* We want as many rx rings enabled as there are cpus.
10227 * In multiqueue MSI-X mode, the first MSI-X vector
10228 * only deals with link interrupts, etc, so we add
10229 * one to the number of vectors we are requesting.
10231 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10234 return irq_cnt;
10237 static bool tg3_enable_msix(struct tg3 *tp)
10239 int i, rc;
10240 struct msix_entry msix_ent[tp->irq_max];
10242 tp->rxq_cnt = netif_get_num_default_rss_queues();
10243 if (tp->rxq_cnt > tp->rxq_max)
10244 tp->rxq_cnt = tp->rxq_max;
10245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
10246 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
10247 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10249 tp->irq_cnt = tg3_irq_count(tp);
10251 for (i = 0; i < tp->irq_max; i++) {
10252 msix_ent[i].entry = i;
10253 msix_ent[i].vector = 0;
10256 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10257 if (rc < 0) {
10258 return false;
10259 } else if (rc != 0) {
10260 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10261 return false;
10262 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10263 tp->irq_cnt, rc);
10264 tp->irq_cnt = rc;
10265 tp->rxq_cnt = max(rc - 1, 1);
10266 if (tp->txq_cnt)
10267 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10270 for (i = 0; i < tp->irq_max; i++)
10271 tp->napi[i].irq_vec = msix_ent[i].vector;
10273 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10274 pci_disable_msix(tp->pdev);
10275 return false;
10278 if (tp->irq_cnt == 1)
10279 return true;
10281 tg3_flag_set(tp, ENABLE_RSS);
10283 if (tp->txq_cnt > 1)
10284 tg3_flag_set(tp, ENABLE_TSS);
10286 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10288 return true;
10291 static void tg3_ints_init(struct tg3 *tp)
10293 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10294 !tg3_flag(tp, TAGGED_STATUS)) {
10295 /* All MSI supporting chips should support tagged
10296 * status. Assert that this is the case.
10298 netdev_warn(tp->dev,
10299 "MSI without TAGGED_STATUS? Not using MSI\n");
10300 goto defcfg;
10303 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10304 tg3_flag_set(tp, USING_MSIX);
10305 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10306 tg3_flag_set(tp, USING_MSI);
10308 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10309 u32 msi_mode = tr32(MSGINT_MODE);
10310 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10311 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10312 if (!tg3_flag(tp, 1SHOT_MSI))
10313 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10314 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10316 defcfg:
10317 if (!tg3_flag(tp, USING_MSIX)) {
10318 tp->irq_cnt = 1;
10319 tp->napi[0].irq_vec = tp->pdev->irq;
10322 if (tp->irq_cnt == 1) {
10323 tp->txq_cnt = 1;
10324 tp->rxq_cnt = 1;
10325 netif_set_real_num_tx_queues(tp->dev, 1);
10326 netif_set_real_num_rx_queues(tp->dev, 1);
10330 static void tg3_ints_fini(struct tg3 *tp)
10332 if (tg3_flag(tp, USING_MSIX))
10333 pci_disable_msix(tp->pdev);
10334 else if (tg3_flag(tp, USING_MSI))
10335 pci_disable_msi(tp->pdev);
10336 tg3_flag_clear(tp, USING_MSI);
10337 tg3_flag_clear(tp, USING_MSIX);
10338 tg3_flag_clear(tp, ENABLE_RSS);
10339 tg3_flag_clear(tp, ENABLE_TSS);
10342 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
10344 struct net_device *dev = tp->dev;
10345 int i, err;
10348 * Setup interrupts first so we know how
10349 * many NAPI resources to allocate
10351 tg3_ints_init(tp);
10353 tg3_rss_check_indir_tbl(tp);
10355 /* The placement of this call is tied
10356 * to the setup and use of Host TX descriptors.
10358 err = tg3_alloc_consistent(tp);
10359 if (err)
10360 goto err_out1;
10362 tg3_napi_init(tp);
10364 tg3_napi_enable(tp);
10366 for (i = 0; i < tp->irq_cnt; i++) {
10367 struct tg3_napi *tnapi = &tp->napi[i];
10368 err = tg3_request_irq(tp, i);
10369 if (err) {
10370 for (i--; i >= 0; i--) {
10371 tnapi = &tp->napi[i];
10372 free_irq(tnapi->irq_vec, tnapi);
10374 goto err_out2;
10378 tg3_full_lock(tp, 0);
10380 err = tg3_init_hw(tp, reset_phy);
10381 if (err) {
10382 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10383 tg3_free_rings(tp);
10386 tg3_full_unlock(tp);
10388 if (err)
10389 goto err_out3;
10391 if (test_irq && tg3_flag(tp, USING_MSI)) {
10392 err = tg3_test_msi(tp);
10394 if (err) {
10395 tg3_full_lock(tp, 0);
10396 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10397 tg3_free_rings(tp);
10398 tg3_full_unlock(tp);
10400 goto err_out2;
10403 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10404 u32 val = tr32(PCIE_TRANSACTION_CFG);
10406 tw32(PCIE_TRANSACTION_CFG,
10407 val | PCIE_TRANS_CFG_1SHOT_MSI);
10411 tg3_phy_start(tp);
10413 tg3_hwmon_open(tp);
10415 tg3_full_lock(tp, 0);
10417 tg3_timer_start(tp);
10418 tg3_flag_set(tp, INIT_COMPLETE);
10419 tg3_enable_ints(tp);
10421 tg3_full_unlock(tp);
10423 netif_tx_start_all_queues(dev);
10426 * Reset loopback feature if it was turned on while the device was down
10427 * make sure that it's installed properly now.
10429 if (dev->features & NETIF_F_LOOPBACK)
10430 tg3_set_loopback(dev, dev->features);
10432 return 0;
10434 err_out3:
10435 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10436 struct tg3_napi *tnapi = &tp->napi[i];
10437 free_irq(tnapi->irq_vec, tnapi);
10440 err_out2:
10441 tg3_napi_disable(tp);
10442 tg3_napi_fini(tp);
10443 tg3_free_consistent(tp);
10445 err_out1:
10446 tg3_ints_fini(tp);
10448 return err;
10451 static int tg3_open(struct net_device *dev)
10453 struct tg3 *tp = netdev_priv(dev);
10454 int err;
10456 if (tp->fw_needed) {
10457 err = tg3_request_firmware(tp);
10458 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10459 if (err)
10460 return err;
10461 } else if (err) {
10462 netdev_warn(tp->dev, "TSO capability disabled\n");
10463 tg3_flag_clear(tp, TSO_CAPABLE);
10464 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10465 netdev_notice(tp->dev, "TSO capability restored\n");
10466 tg3_flag_set(tp, TSO_CAPABLE);
10470 netif_carrier_off(tp->dev);
10472 err = tg3_power_up(tp);
10473 if (err)
10474 return err;
10476 tg3_full_lock(tp, 0);
10478 tg3_disable_ints(tp);
10479 tg3_flag_clear(tp, INIT_COMPLETE);
10481 tg3_full_unlock(tp);
10483 err = tg3_start(tp, true, true);
10484 if (err) {
10485 tg3_frob_aux_power(tp, false);
10486 pci_set_power_state(tp->pdev, PCI_D3hot);
10488 return err;
10491 static int tg3_close(struct net_device *dev)
10493 int i;
10494 struct tg3 *tp = netdev_priv(dev);
10496 tg3_napi_disable(tp);
10497 tg3_reset_task_cancel(tp);
10499 netif_tx_stop_all_queues(dev);
10501 tg3_timer_stop(tp);
10503 tg3_hwmon_close(tp);
10505 tg3_phy_stop(tp);
10507 tg3_full_lock(tp, 1);
10509 tg3_disable_ints(tp);
10511 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10512 tg3_free_rings(tp);
10513 tg3_flag_clear(tp, INIT_COMPLETE);
10515 tg3_full_unlock(tp);
10517 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10518 struct tg3_napi *tnapi = &tp->napi[i];
10519 free_irq(tnapi->irq_vec, tnapi);
10522 tg3_ints_fini(tp);
10524 /* Clear stats across close / open calls */
10525 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10526 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10528 tg3_napi_fini(tp);
10530 tg3_free_consistent(tp);
10532 tg3_power_down(tp);
10534 netif_carrier_off(tp->dev);
10536 return 0;
10539 static inline u64 get_stat64(tg3_stat64_t *val)
10541 return ((u64)val->high << 32) | ((u64)val->low);
10544 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10546 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10548 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10549 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10551 u32 val;
10553 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10554 tg3_writephy(tp, MII_TG3_TEST1,
10555 val | MII_TG3_TEST1_CRC_EN);
10556 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10557 } else
10558 val = 0;
10560 tp->phy_crc_errors += val;
10562 return tp->phy_crc_errors;
10565 return get_stat64(&hw_stats->rx_fcs_errors);
10568 #define ESTAT_ADD(member) \
10569 estats->member = old_estats->member + \
10570 get_stat64(&hw_stats->member)
10572 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10574 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10575 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10577 ESTAT_ADD(rx_octets);
10578 ESTAT_ADD(rx_fragments);
10579 ESTAT_ADD(rx_ucast_packets);
10580 ESTAT_ADD(rx_mcast_packets);
10581 ESTAT_ADD(rx_bcast_packets);
10582 ESTAT_ADD(rx_fcs_errors);
10583 ESTAT_ADD(rx_align_errors);
10584 ESTAT_ADD(rx_xon_pause_rcvd);
10585 ESTAT_ADD(rx_xoff_pause_rcvd);
10586 ESTAT_ADD(rx_mac_ctrl_rcvd);
10587 ESTAT_ADD(rx_xoff_entered);
10588 ESTAT_ADD(rx_frame_too_long_errors);
10589 ESTAT_ADD(rx_jabbers);
10590 ESTAT_ADD(rx_undersize_packets);
10591 ESTAT_ADD(rx_in_length_errors);
10592 ESTAT_ADD(rx_out_length_errors);
10593 ESTAT_ADD(rx_64_or_less_octet_packets);
10594 ESTAT_ADD(rx_65_to_127_octet_packets);
10595 ESTAT_ADD(rx_128_to_255_octet_packets);
10596 ESTAT_ADD(rx_256_to_511_octet_packets);
10597 ESTAT_ADD(rx_512_to_1023_octet_packets);
10598 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10599 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10600 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10601 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10602 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10604 ESTAT_ADD(tx_octets);
10605 ESTAT_ADD(tx_collisions);
10606 ESTAT_ADD(tx_xon_sent);
10607 ESTAT_ADD(tx_xoff_sent);
10608 ESTAT_ADD(tx_flow_control);
10609 ESTAT_ADD(tx_mac_errors);
10610 ESTAT_ADD(tx_single_collisions);
10611 ESTAT_ADD(tx_mult_collisions);
10612 ESTAT_ADD(tx_deferred);
10613 ESTAT_ADD(tx_excessive_collisions);
10614 ESTAT_ADD(tx_late_collisions);
10615 ESTAT_ADD(tx_collide_2times);
10616 ESTAT_ADD(tx_collide_3times);
10617 ESTAT_ADD(tx_collide_4times);
10618 ESTAT_ADD(tx_collide_5times);
10619 ESTAT_ADD(tx_collide_6times);
10620 ESTAT_ADD(tx_collide_7times);
10621 ESTAT_ADD(tx_collide_8times);
10622 ESTAT_ADD(tx_collide_9times);
10623 ESTAT_ADD(tx_collide_10times);
10624 ESTAT_ADD(tx_collide_11times);
10625 ESTAT_ADD(tx_collide_12times);
10626 ESTAT_ADD(tx_collide_13times);
10627 ESTAT_ADD(tx_collide_14times);
10628 ESTAT_ADD(tx_collide_15times);
10629 ESTAT_ADD(tx_ucast_packets);
10630 ESTAT_ADD(tx_mcast_packets);
10631 ESTAT_ADD(tx_bcast_packets);
10632 ESTAT_ADD(tx_carrier_sense_errors);
10633 ESTAT_ADD(tx_discards);
10634 ESTAT_ADD(tx_errors);
10636 ESTAT_ADD(dma_writeq_full);
10637 ESTAT_ADD(dma_write_prioq_full);
10638 ESTAT_ADD(rxbds_empty);
10639 ESTAT_ADD(rx_discards);
10640 ESTAT_ADD(rx_errors);
10641 ESTAT_ADD(rx_threshold_hit);
10643 ESTAT_ADD(dma_readq_full);
10644 ESTAT_ADD(dma_read_prioq_full);
10645 ESTAT_ADD(tx_comp_queue_full);
10647 ESTAT_ADD(ring_set_send_prod_index);
10648 ESTAT_ADD(ring_status_update);
10649 ESTAT_ADD(nic_irqs);
10650 ESTAT_ADD(nic_avoided_irqs);
10651 ESTAT_ADD(nic_tx_threshold_hit);
10653 ESTAT_ADD(mbuf_lwm_thresh_hit);
10656 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10658 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10659 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10661 stats->rx_packets = old_stats->rx_packets +
10662 get_stat64(&hw_stats->rx_ucast_packets) +
10663 get_stat64(&hw_stats->rx_mcast_packets) +
10664 get_stat64(&hw_stats->rx_bcast_packets);
10666 stats->tx_packets = old_stats->tx_packets +
10667 get_stat64(&hw_stats->tx_ucast_packets) +
10668 get_stat64(&hw_stats->tx_mcast_packets) +
10669 get_stat64(&hw_stats->tx_bcast_packets);
10671 stats->rx_bytes = old_stats->rx_bytes +
10672 get_stat64(&hw_stats->rx_octets);
10673 stats->tx_bytes = old_stats->tx_bytes +
10674 get_stat64(&hw_stats->tx_octets);
10676 stats->rx_errors = old_stats->rx_errors +
10677 get_stat64(&hw_stats->rx_errors);
10678 stats->tx_errors = old_stats->tx_errors +
10679 get_stat64(&hw_stats->tx_errors) +
10680 get_stat64(&hw_stats->tx_mac_errors) +
10681 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10682 get_stat64(&hw_stats->tx_discards);
10684 stats->multicast = old_stats->multicast +
10685 get_stat64(&hw_stats->rx_mcast_packets);
10686 stats->collisions = old_stats->collisions +
10687 get_stat64(&hw_stats->tx_collisions);
10689 stats->rx_length_errors = old_stats->rx_length_errors +
10690 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10691 get_stat64(&hw_stats->rx_undersize_packets);
10693 stats->rx_over_errors = old_stats->rx_over_errors +
10694 get_stat64(&hw_stats->rxbds_empty);
10695 stats->rx_frame_errors = old_stats->rx_frame_errors +
10696 get_stat64(&hw_stats->rx_align_errors);
10697 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10698 get_stat64(&hw_stats->tx_discards);
10699 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10700 get_stat64(&hw_stats->tx_carrier_sense_errors);
10702 stats->rx_crc_errors = old_stats->rx_crc_errors +
10703 tg3_calc_crc_errors(tp);
10705 stats->rx_missed_errors = old_stats->rx_missed_errors +
10706 get_stat64(&hw_stats->rx_discards);
10708 stats->rx_dropped = tp->rx_dropped;
10709 stats->tx_dropped = tp->tx_dropped;
10712 static int tg3_get_regs_len(struct net_device *dev)
10714 return TG3_REG_BLK_SIZE;
10717 static void tg3_get_regs(struct net_device *dev,
10718 struct ethtool_regs *regs, void *_p)
10720 struct tg3 *tp = netdev_priv(dev);
10722 regs->version = 0;
10724 memset(_p, 0, TG3_REG_BLK_SIZE);
10726 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10727 return;
10729 tg3_full_lock(tp, 0);
10731 tg3_dump_legacy_regs(tp, (u32 *)_p);
10733 tg3_full_unlock(tp);
10736 static int tg3_get_eeprom_len(struct net_device *dev)
10738 struct tg3 *tp = netdev_priv(dev);
10740 return tp->nvram_size;
10743 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10745 struct tg3 *tp = netdev_priv(dev);
10746 int ret;
10747 u8 *pd;
10748 u32 i, offset, len, b_offset, b_count;
10749 __be32 val;
10751 if (tg3_flag(tp, NO_NVRAM))
10752 return -EINVAL;
10754 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10755 return -EAGAIN;
10757 offset = eeprom->offset;
10758 len = eeprom->len;
10759 eeprom->len = 0;
10761 eeprom->magic = TG3_EEPROM_MAGIC;
10763 if (offset & 3) {
10764 /* adjustments to start on required 4 byte boundary */
10765 b_offset = offset & 3;
10766 b_count = 4 - b_offset;
10767 if (b_count > len) {
10768 /* i.e. offset=1 len=2 */
10769 b_count = len;
10771 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10772 if (ret)
10773 return ret;
10774 memcpy(data, ((char *)&val) + b_offset, b_count);
10775 len -= b_count;
10776 offset += b_count;
10777 eeprom->len += b_count;
10780 /* read bytes up to the last 4 byte boundary */
10781 pd = &data[eeprom->len];
10782 for (i = 0; i < (len - (len & 3)); i += 4) {
10783 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10784 if (ret) {
10785 eeprom->len += i;
10786 return ret;
10788 memcpy(pd + i, &val, 4);
10790 eeprom->len += i;
10792 if (len & 3) {
10793 /* read last bytes not ending on 4 byte boundary */
10794 pd = &data[eeprom->len];
10795 b_count = len & 3;
10796 b_offset = offset + len - b_count;
10797 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10798 if (ret)
10799 return ret;
10800 memcpy(pd, &val, b_count);
10801 eeprom->len += b_count;
10803 return 0;
10806 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10808 struct tg3 *tp = netdev_priv(dev);
10809 int ret;
10810 u32 offset, len, b_offset, odd_len;
10811 u8 *buf;
10812 __be32 start, end;
10814 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10815 return -EAGAIN;
10817 if (tg3_flag(tp, NO_NVRAM) ||
10818 eeprom->magic != TG3_EEPROM_MAGIC)
10819 return -EINVAL;
10821 offset = eeprom->offset;
10822 len = eeprom->len;
10824 if ((b_offset = (offset & 3))) {
10825 /* adjustments to start on required 4 byte boundary */
10826 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10827 if (ret)
10828 return ret;
10829 len += b_offset;
10830 offset &= ~3;
10831 if (len < 4)
10832 len = 4;
10835 odd_len = 0;
10836 if (len & 3) {
10837 /* adjustments to end on required 4 byte boundary */
10838 odd_len = 1;
10839 len = (len + 3) & ~3;
10840 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10841 if (ret)
10842 return ret;
10845 buf = data;
10846 if (b_offset || odd_len) {
10847 buf = kmalloc(len, GFP_KERNEL);
10848 if (!buf)
10849 return -ENOMEM;
10850 if (b_offset)
10851 memcpy(buf, &start, 4);
10852 if (odd_len)
10853 memcpy(buf+len-4, &end, 4);
10854 memcpy(buf + b_offset, data, eeprom->len);
10857 ret = tg3_nvram_write_block(tp, offset, len, buf);
10859 if (buf != data)
10860 kfree(buf);
10862 return ret;
10865 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10867 struct tg3 *tp = netdev_priv(dev);
10869 if (tg3_flag(tp, USE_PHYLIB)) {
10870 struct phy_device *phydev;
10871 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10872 return -EAGAIN;
10873 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10874 return phy_ethtool_gset(phydev, cmd);
10877 cmd->supported = (SUPPORTED_Autoneg);
10879 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10880 cmd->supported |= (SUPPORTED_1000baseT_Half |
10881 SUPPORTED_1000baseT_Full);
10883 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10884 cmd->supported |= (SUPPORTED_100baseT_Half |
10885 SUPPORTED_100baseT_Full |
10886 SUPPORTED_10baseT_Half |
10887 SUPPORTED_10baseT_Full |
10888 SUPPORTED_TP);
10889 cmd->port = PORT_TP;
10890 } else {
10891 cmd->supported |= SUPPORTED_FIBRE;
10892 cmd->port = PORT_FIBRE;
10895 cmd->advertising = tp->link_config.advertising;
10896 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10897 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10898 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10899 cmd->advertising |= ADVERTISED_Pause;
10900 } else {
10901 cmd->advertising |= ADVERTISED_Pause |
10902 ADVERTISED_Asym_Pause;
10904 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10905 cmd->advertising |= ADVERTISED_Asym_Pause;
10908 if (netif_running(dev) && netif_carrier_ok(dev)) {
10909 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10910 cmd->duplex = tp->link_config.active_duplex;
10911 cmd->lp_advertising = tp->link_config.rmt_adv;
10912 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10913 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10914 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10915 else
10916 cmd->eth_tp_mdix = ETH_TP_MDI;
10918 } else {
10919 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10920 cmd->duplex = DUPLEX_UNKNOWN;
10921 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10923 cmd->phy_address = tp->phy_addr;
10924 cmd->transceiver = XCVR_INTERNAL;
10925 cmd->autoneg = tp->link_config.autoneg;
10926 cmd->maxtxpkt = 0;
10927 cmd->maxrxpkt = 0;
10928 return 0;
10931 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10933 struct tg3 *tp = netdev_priv(dev);
10934 u32 speed = ethtool_cmd_speed(cmd);
10936 if (tg3_flag(tp, USE_PHYLIB)) {
10937 struct phy_device *phydev;
10938 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10939 return -EAGAIN;
10940 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10941 return phy_ethtool_sset(phydev, cmd);
10944 if (cmd->autoneg != AUTONEG_ENABLE &&
10945 cmd->autoneg != AUTONEG_DISABLE)
10946 return -EINVAL;
10948 if (cmd->autoneg == AUTONEG_DISABLE &&
10949 cmd->duplex != DUPLEX_FULL &&
10950 cmd->duplex != DUPLEX_HALF)
10951 return -EINVAL;
10953 if (cmd->autoneg == AUTONEG_ENABLE) {
10954 u32 mask = ADVERTISED_Autoneg |
10955 ADVERTISED_Pause |
10956 ADVERTISED_Asym_Pause;
10958 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10959 mask |= ADVERTISED_1000baseT_Half |
10960 ADVERTISED_1000baseT_Full;
10962 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10963 mask |= ADVERTISED_100baseT_Half |
10964 ADVERTISED_100baseT_Full |
10965 ADVERTISED_10baseT_Half |
10966 ADVERTISED_10baseT_Full |
10967 ADVERTISED_TP;
10968 else
10969 mask |= ADVERTISED_FIBRE;
10971 if (cmd->advertising & ~mask)
10972 return -EINVAL;
10974 mask &= (ADVERTISED_1000baseT_Half |
10975 ADVERTISED_1000baseT_Full |
10976 ADVERTISED_100baseT_Half |
10977 ADVERTISED_100baseT_Full |
10978 ADVERTISED_10baseT_Half |
10979 ADVERTISED_10baseT_Full);
10981 cmd->advertising &= mask;
10982 } else {
10983 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10984 if (speed != SPEED_1000)
10985 return -EINVAL;
10987 if (cmd->duplex != DUPLEX_FULL)
10988 return -EINVAL;
10989 } else {
10990 if (speed != SPEED_100 &&
10991 speed != SPEED_10)
10992 return -EINVAL;
10996 tg3_full_lock(tp, 0);
10998 tp->link_config.autoneg = cmd->autoneg;
10999 if (cmd->autoneg == AUTONEG_ENABLE) {
11000 tp->link_config.advertising = (cmd->advertising |
11001 ADVERTISED_Autoneg);
11002 tp->link_config.speed = SPEED_UNKNOWN;
11003 tp->link_config.duplex = DUPLEX_UNKNOWN;
11004 } else {
11005 tp->link_config.advertising = 0;
11006 tp->link_config.speed = speed;
11007 tp->link_config.duplex = cmd->duplex;
11010 if (netif_running(dev))
11011 tg3_setup_phy(tp, 1);
11013 tg3_full_unlock(tp);
11015 return 0;
11018 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11020 struct tg3 *tp = netdev_priv(dev);
11022 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11023 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11024 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11025 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11028 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11030 struct tg3 *tp = netdev_priv(dev);
11032 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11033 wol->supported = WAKE_MAGIC;
11034 else
11035 wol->supported = 0;
11036 wol->wolopts = 0;
11037 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11038 wol->wolopts = WAKE_MAGIC;
11039 memset(&wol->sopass, 0, sizeof(wol->sopass));
11042 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11044 struct tg3 *tp = netdev_priv(dev);
11045 struct device *dp = &tp->pdev->dev;
11047 if (wol->wolopts & ~WAKE_MAGIC)
11048 return -EINVAL;
11049 if ((wol->wolopts & WAKE_MAGIC) &&
11050 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11051 return -EINVAL;
11053 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11055 spin_lock_bh(&tp->lock);
11056 if (device_may_wakeup(dp))
11057 tg3_flag_set(tp, WOL_ENABLE);
11058 else
11059 tg3_flag_clear(tp, WOL_ENABLE);
11060 spin_unlock_bh(&tp->lock);
11062 return 0;
11065 static u32 tg3_get_msglevel(struct net_device *dev)
11067 struct tg3 *tp = netdev_priv(dev);
11068 return tp->msg_enable;
11071 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11073 struct tg3 *tp = netdev_priv(dev);
11074 tp->msg_enable = value;
11077 static int tg3_nway_reset(struct net_device *dev)
11079 struct tg3 *tp = netdev_priv(dev);
11080 int r;
11082 if (!netif_running(dev))
11083 return -EAGAIN;
11085 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11086 return -EINVAL;
11088 if (tg3_flag(tp, USE_PHYLIB)) {
11089 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11090 return -EAGAIN;
11091 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11092 } else {
11093 u32 bmcr;
11095 spin_lock_bh(&tp->lock);
11096 r = -EINVAL;
11097 tg3_readphy(tp, MII_BMCR, &bmcr);
11098 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11099 ((bmcr & BMCR_ANENABLE) ||
11100 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11101 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11102 BMCR_ANENABLE);
11103 r = 0;
11105 spin_unlock_bh(&tp->lock);
11108 return r;
11111 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11113 struct tg3 *tp = netdev_priv(dev);
11115 ering->rx_max_pending = tp->rx_std_ring_mask;
11116 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11117 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11118 else
11119 ering->rx_jumbo_max_pending = 0;
11121 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11123 ering->rx_pending = tp->rx_pending;
11124 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11125 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11126 else
11127 ering->rx_jumbo_pending = 0;
11129 ering->tx_pending = tp->napi[0].tx_pending;
11132 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11134 struct tg3 *tp = netdev_priv(dev);
11135 int i, irq_sync = 0, err = 0;
11137 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11138 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11139 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11140 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11141 (tg3_flag(tp, TSO_BUG) &&
11142 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11143 return -EINVAL;
11145 if (netif_running(dev)) {
11146 tg3_phy_stop(tp);
11147 tg3_netif_stop(tp);
11148 irq_sync = 1;
11151 tg3_full_lock(tp, irq_sync);
11153 tp->rx_pending = ering->rx_pending;
11155 if (tg3_flag(tp, MAX_RXPEND_64) &&
11156 tp->rx_pending > 63)
11157 tp->rx_pending = 63;
11158 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11160 for (i = 0; i < tp->irq_max; i++)
11161 tp->napi[i].tx_pending = ering->tx_pending;
11163 if (netif_running(dev)) {
11164 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11165 err = tg3_restart_hw(tp, 1);
11166 if (!err)
11167 tg3_netif_start(tp);
11170 tg3_full_unlock(tp);
11172 if (irq_sync && !err)
11173 tg3_phy_start(tp);
11175 return err;
11178 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11180 struct tg3 *tp = netdev_priv(dev);
11182 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11184 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11185 epause->rx_pause = 1;
11186 else
11187 epause->rx_pause = 0;
11189 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11190 epause->tx_pause = 1;
11191 else
11192 epause->tx_pause = 0;
11195 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11197 struct tg3 *tp = netdev_priv(dev);
11198 int err = 0;
11200 if (tg3_flag(tp, USE_PHYLIB)) {
11201 u32 newadv;
11202 struct phy_device *phydev;
11204 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11206 if (!(phydev->supported & SUPPORTED_Pause) ||
11207 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11208 (epause->rx_pause != epause->tx_pause)))
11209 return -EINVAL;
11211 tp->link_config.flowctrl = 0;
11212 if (epause->rx_pause) {
11213 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11215 if (epause->tx_pause) {
11216 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11217 newadv = ADVERTISED_Pause;
11218 } else
11219 newadv = ADVERTISED_Pause |
11220 ADVERTISED_Asym_Pause;
11221 } else if (epause->tx_pause) {
11222 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11223 newadv = ADVERTISED_Asym_Pause;
11224 } else
11225 newadv = 0;
11227 if (epause->autoneg)
11228 tg3_flag_set(tp, PAUSE_AUTONEG);
11229 else
11230 tg3_flag_clear(tp, PAUSE_AUTONEG);
11232 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11233 u32 oldadv = phydev->advertising &
11234 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11235 if (oldadv != newadv) {
11236 phydev->advertising &=
11237 ~(ADVERTISED_Pause |
11238 ADVERTISED_Asym_Pause);
11239 phydev->advertising |= newadv;
11240 if (phydev->autoneg) {
11242 * Always renegotiate the link to
11243 * inform our link partner of our
11244 * flow control settings, even if the
11245 * flow control is forced. Let
11246 * tg3_adjust_link() do the final
11247 * flow control setup.
11249 return phy_start_aneg(phydev);
11253 if (!epause->autoneg)
11254 tg3_setup_flow_control(tp, 0, 0);
11255 } else {
11256 tp->link_config.advertising &=
11257 ~(ADVERTISED_Pause |
11258 ADVERTISED_Asym_Pause);
11259 tp->link_config.advertising |= newadv;
11261 } else {
11262 int irq_sync = 0;
11264 if (netif_running(dev)) {
11265 tg3_netif_stop(tp);
11266 irq_sync = 1;
11269 tg3_full_lock(tp, irq_sync);
11271 if (epause->autoneg)
11272 tg3_flag_set(tp, PAUSE_AUTONEG);
11273 else
11274 tg3_flag_clear(tp, PAUSE_AUTONEG);
11275 if (epause->rx_pause)
11276 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11277 else
11278 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11279 if (epause->tx_pause)
11280 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11281 else
11282 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11284 if (netif_running(dev)) {
11285 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11286 err = tg3_restart_hw(tp, 1);
11287 if (!err)
11288 tg3_netif_start(tp);
11291 tg3_full_unlock(tp);
11294 return err;
11297 static int tg3_get_sset_count(struct net_device *dev, int sset)
11299 switch (sset) {
11300 case ETH_SS_TEST:
11301 return TG3_NUM_TEST;
11302 case ETH_SS_STATS:
11303 return TG3_NUM_STATS;
11304 default:
11305 return -EOPNOTSUPP;
11309 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11310 u32 *rules __always_unused)
11312 struct tg3 *tp = netdev_priv(dev);
11314 if (!tg3_flag(tp, SUPPORT_MSIX))
11315 return -EOPNOTSUPP;
11317 switch (info->cmd) {
11318 case ETHTOOL_GRXRINGS:
11319 if (netif_running(tp->dev))
11320 info->data = tp->rxq_cnt;
11321 else {
11322 info->data = num_online_cpus();
11323 if (info->data > TG3_RSS_MAX_NUM_QS)
11324 info->data = TG3_RSS_MAX_NUM_QS;
11327 /* The first interrupt vector only
11328 * handles link interrupts.
11330 info->data -= 1;
11331 return 0;
11333 default:
11334 return -EOPNOTSUPP;
11338 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11340 u32 size = 0;
11341 struct tg3 *tp = netdev_priv(dev);
11343 if (tg3_flag(tp, SUPPORT_MSIX))
11344 size = TG3_RSS_INDIR_TBL_SIZE;
11346 return size;
11349 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11351 struct tg3 *tp = netdev_priv(dev);
11352 int i;
11354 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11355 indir[i] = tp->rss_ind_tbl[i];
11357 return 0;
11360 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11362 struct tg3 *tp = netdev_priv(dev);
11363 size_t i;
11365 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11366 tp->rss_ind_tbl[i] = indir[i];
11368 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11369 return 0;
11371 /* It is legal to write the indirection
11372 * table while the device is running.
11374 tg3_full_lock(tp, 0);
11375 tg3_rss_write_indir_tbl(tp);
11376 tg3_full_unlock(tp);
11378 return 0;
11381 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11383 switch (stringset) {
11384 case ETH_SS_STATS:
11385 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11386 break;
11387 case ETH_SS_TEST:
11388 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11389 break;
11390 default:
11391 WARN_ON(1); /* we need a WARN() */
11392 break;
11396 static int tg3_set_phys_id(struct net_device *dev,
11397 enum ethtool_phys_id_state state)
11399 struct tg3 *tp = netdev_priv(dev);
11401 if (!netif_running(tp->dev))
11402 return -EAGAIN;
11404 switch (state) {
11405 case ETHTOOL_ID_ACTIVE:
11406 return 1; /* cycle on/off once per second */
11408 case ETHTOOL_ID_ON:
11409 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11410 LED_CTRL_1000MBPS_ON |
11411 LED_CTRL_100MBPS_ON |
11412 LED_CTRL_10MBPS_ON |
11413 LED_CTRL_TRAFFIC_OVERRIDE |
11414 LED_CTRL_TRAFFIC_BLINK |
11415 LED_CTRL_TRAFFIC_LED);
11416 break;
11418 case ETHTOOL_ID_OFF:
11419 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11420 LED_CTRL_TRAFFIC_OVERRIDE);
11421 break;
11423 case ETHTOOL_ID_INACTIVE:
11424 tw32(MAC_LED_CTRL, tp->led_ctrl);
11425 break;
11428 return 0;
11431 static void tg3_get_ethtool_stats(struct net_device *dev,
11432 struct ethtool_stats *estats, u64 *tmp_stats)
11434 struct tg3 *tp = netdev_priv(dev);
11436 if (tp->hw_stats)
11437 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11438 else
11439 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11442 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11444 int i;
11445 __be32 *buf;
11446 u32 offset = 0, len = 0;
11447 u32 magic, val;
11449 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11450 return NULL;
11452 if (magic == TG3_EEPROM_MAGIC) {
11453 for (offset = TG3_NVM_DIR_START;
11454 offset < TG3_NVM_DIR_END;
11455 offset += TG3_NVM_DIRENT_SIZE) {
11456 if (tg3_nvram_read(tp, offset, &val))
11457 return NULL;
11459 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11460 TG3_NVM_DIRTYPE_EXTVPD)
11461 break;
11464 if (offset != TG3_NVM_DIR_END) {
11465 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11466 if (tg3_nvram_read(tp, offset + 4, &offset))
11467 return NULL;
11469 offset = tg3_nvram_logical_addr(tp, offset);
11473 if (!offset || !len) {
11474 offset = TG3_NVM_VPD_OFF;
11475 len = TG3_NVM_VPD_LEN;
11478 buf = kmalloc(len, GFP_KERNEL);
11479 if (buf == NULL)
11480 return NULL;
11482 if (magic == TG3_EEPROM_MAGIC) {
11483 for (i = 0; i < len; i += 4) {
11484 /* The data is in little-endian format in NVRAM.
11485 * Use the big-endian read routines to preserve
11486 * the byte order as it exists in NVRAM.
11488 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11489 goto error;
11491 } else {
11492 u8 *ptr;
11493 ssize_t cnt;
11494 unsigned int pos = 0;
11496 ptr = (u8 *)&buf[0];
11497 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11498 cnt = pci_read_vpd(tp->pdev, pos,
11499 len - pos, ptr);
11500 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11501 cnt = 0;
11502 else if (cnt < 0)
11503 goto error;
11505 if (pos != len)
11506 goto error;
11509 *vpdlen = len;
11511 return buf;
11513 error:
11514 kfree(buf);
11515 return NULL;
11518 #define NVRAM_TEST_SIZE 0x100
11519 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11520 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11521 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11522 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11523 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11524 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11525 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11526 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11528 static int tg3_test_nvram(struct tg3 *tp)
11530 u32 csum, magic, len;
11531 __be32 *buf;
11532 int i, j, k, err = 0, size;
11534 if (tg3_flag(tp, NO_NVRAM))
11535 return 0;
11537 if (tg3_nvram_read(tp, 0, &magic) != 0)
11538 return -EIO;
11540 if (magic == TG3_EEPROM_MAGIC)
11541 size = NVRAM_TEST_SIZE;
11542 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11543 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11544 TG3_EEPROM_SB_FORMAT_1) {
11545 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11546 case TG3_EEPROM_SB_REVISION_0:
11547 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11548 break;
11549 case TG3_EEPROM_SB_REVISION_2:
11550 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11551 break;
11552 case TG3_EEPROM_SB_REVISION_3:
11553 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11554 break;
11555 case TG3_EEPROM_SB_REVISION_4:
11556 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11557 break;
11558 case TG3_EEPROM_SB_REVISION_5:
11559 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11560 break;
11561 case TG3_EEPROM_SB_REVISION_6:
11562 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11563 break;
11564 default:
11565 return -EIO;
11567 } else
11568 return 0;
11569 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11570 size = NVRAM_SELFBOOT_HW_SIZE;
11571 else
11572 return -EIO;
11574 buf = kmalloc(size, GFP_KERNEL);
11575 if (buf == NULL)
11576 return -ENOMEM;
11578 err = -EIO;
11579 for (i = 0, j = 0; i < size; i += 4, j++) {
11580 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11581 if (err)
11582 break;
11584 if (i < size)
11585 goto out;
11587 /* Selfboot format */
11588 magic = be32_to_cpu(buf[0]);
11589 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11590 TG3_EEPROM_MAGIC_FW) {
11591 u8 *buf8 = (u8 *) buf, csum8 = 0;
11593 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11594 TG3_EEPROM_SB_REVISION_2) {
11595 /* For rev 2, the csum doesn't include the MBA. */
11596 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11597 csum8 += buf8[i];
11598 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11599 csum8 += buf8[i];
11600 } else {
11601 for (i = 0; i < size; i++)
11602 csum8 += buf8[i];
11605 if (csum8 == 0) {
11606 err = 0;
11607 goto out;
11610 err = -EIO;
11611 goto out;
11614 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11615 TG3_EEPROM_MAGIC_HW) {
11616 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11617 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11618 u8 *buf8 = (u8 *) buf;
11620 /* Separate the parity bits and the data bytes. */
11621 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11622 if ((i == 0) || (i == 8)) {
11623 int l;
11624 u8 msk;
11626 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11627 parity[k++] = buf8[i] & msk;
11628 i++;
11629 } else if (i == 16) {
11630 int l;
11631 u8 msk;
11633 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11634 parity[k++] = buf8[i] & msk;
11635 i++;
11637 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11638 parity[k++] = buf8[i] & msk;
11639 i++;
11641 data[j++] = buf8[i];
11644 err = -EIO;
11645 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11646 u8 hw8 = hweight8(data[i]);
11648 if ((hw8 & 0x1) && parity[i])
11649 goto out;
11650 else if (!(hw8 & 0x1) && !parity[i])
11651 goto out;
11653 err = 0;
11654 goto out;
11657 err = -EIO;
11659 /* Bootstrap checksum at offset 0x10 */
11660 csum = calc_crc((unsigned char *) buf, 0x10);
11661 if (csum != le32_to_cpu(buf[0x10/4]))
11662 goto out;
11664 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11665 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11666 if (csum != le32_to_cpu(buf[0xfc/4]))
11667 goto out;
11669 kfree(buf);
11671 buf = tg3_vpd_readblock(tp, &len);
11672 if (!buf)
11673 return -ENOMEM;
11675 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11676 if (i > 0) {
11677 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11678 if (j < 0)
11679 goto out;
11681 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11682 goto out;
11684 i += PCI_VPD_LRDT_TAG_SIZE;
11685 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11686 PCI_VPD_RO_KEYWORD_CHKSUM);
11687 if (j > 0) {
11688 u8 csum8 = 0;
11690 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11692 for (i = 0; i <= j; i++)
11693 csum8 += ((u8 *)buf)[i];
11695 if (csum8)
11696 goto out;
11700 err = 0;
11702 out:
11703 kfree(buf);
11704 return err;
11707 #define TG3_SERDES_TIMEOUT_SEC 2
11708 #define TG3_COPPER_TIMEOUT_SEC 6
11710 static int tg3_test_link(struct tg3 *tp)
11712 int i, max;
11714 if (!netif_running(tp->dev))
11715 return -ENODEV;
11717 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11718 max = TG3_SERDES_TIMEOUT_SEC;
11719 else
11720 max = TG3_COPPER_TIMEOUT_SEC;
11722 for (i = 0; i < max; i++) {
11723 if (netif_carrier_ok(tp->dev))
11724 return 0;
11726 if (msleep_interruptible(1000))
11727 break;
11730 return -EIO;
11733 /* Only test the commonly used registers */
11734 static int tg3_test_registers(struct tg3 *tp)
11736 int i, is_5705, is_5750;
11737 u32 offset, read_mask, write_mask, val, save_val, read_val;
11738 static struct {
11739 u16 offset;
11740 u16 flags;
11741 #define TG3_FL_5705 0x1
11742 #define TG3_FL_NOT_5705 0x2
11743 #define TG3_FL_NOT_5788 0x4
11744 #define TG3_FL_NOT_5750 0x8
11745 u32 read_mask;
11746 u32 write_mask;
11747 } reg_tbl[] = {
11748 /* MAC Control Registers */
11749 { MAC_MODE, TG3_FL_NOT_5705,
11750 0x00000000, 0x00ef6f8c },
11751 { MAC_MODE, TG3_FL_5705,
11752 0x00000000, 0x01ef6b8c },
11753 { MAC_STATUS, TG3_FL_NOT_5705,
11754 0x03800107, 0x00000000 },
11755 { MAC_STATUS, TG3_FL_5705,
11756 0x03800100, 0x00000000 },
11757 { MAC_ADDR_0_HIGH, 0x0000,
11758 0x00000000, 0x0000ffff },
11759 { MAC_ADDR_0_LOW, 0x0000,
11760 0x00000000, 0xffffffff },
11761 { MAC_RX_MTU_SIZE, 0x0000,
11762 0x00000000, 0x0000ffff },
11763 { MAC_TX_MODE, 0x0000,
11764 0x00000000, 0x00000070 },
11765 { MAC_TX_LENGTHS, 0x0000,
11766 0x00000000, 0x00003fff },
11767 { MAC_RX_MODE, TG3_FL_NOT_5705,
11768 0x00000000, 0x000007fc },
11769 { MAC_RX_MODE, TG3_FL_5705,
11770 0x00000000, 0x000007dc },
11771 { MAC_HASH_REG_0, 0x0000,
11772 0x00000000, 0xffffffff },
11773 { MAC_HASH_REG_1, 0x0000,
11774 0x00000000, 0xffffffff },
11775 { MAC_HASH_REG_2, 0x0000,
11776 0x00000000, 0xffffffff },
11777 { MAC_HASH_REG_3, 0x0000,
11778 0x00000000, 0xffffffff },
11780 /* Receive Data and Receive BD Initiator Control Registers. */
11781 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11782 0x00000000, 0xffffffff },
11783 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11784 0x00000000, 0xffffffff },
11785 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11786 0x00000000, 0x00000003 },
11787 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11788 0x00000000, 0xffffffff },
11789 { RCVDBDI_STD_BD+0, 0x0000,
11790 0x00000000, 0xffffffff },
11791 { RCVDBDI_STD_BD+4, 0x0000,
11792 0x00000000, 0xffffffff },
11793 { RCVDBDI_STD_BD+8, 0x0000,
11794 0x00000000, 0xffff0002 },
11795 { RCVDBDI_STD_BD+0xc, 0x0000,
11796 0x00000000, 0xffffffff },
11798 /* Receive BD Initiator Control Registers. */
11799 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11800 0x00000000, 0xffffffff },
11801 { RCVBDI_STD_THRESH, TG3_FL_5705,
11802 0x00000000, 0x000003ff },
11803 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11804 0x00000000, 0xffffffff },
11806 /* Host Coalescing Control Registers. */
11807 { HOSTCC_MODE, TG3_FL_NOT_5705,
11808 0x00000000, 0x00000004 },
11809 { HOSTCC_MODE, TG3_FL_5705,
11810 0x00000000, 0x000000f6 },
11811 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11812 0x00000000, 0xffffffff },
11813 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11814 0x00000000, 0x000003ff },
11815 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11816 0x00000000, 0xffffffff },
11817 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11818 0x00000000, 0x000003ff },
11819 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11820 0x00000000, 0xffffffff },
11821 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11822 0x00000000, 0x000000ff },
11823 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11824 0x00000000, 0xffffffff },
11825 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11826 0x00000000, 0x000000ff },
11827 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11828 0x00000000, 0xffffffff },
11829 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11830 0x00000000, 0xffffffff },
11831 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11832 0x00000000, 0xffffffff },
11833 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11834 0x00000000, 0x000000ff },
11835 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11836 0x00000000, 0xffffffff },
11837 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11838 0x00000000, 0x000000ff },
11839 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11840 0x00000000, 0xffffffff },
11841 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11842 0x00000000, 0xffffffff },
11843 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11844 0x00000000, 0xffffffff },
11845 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11846 0x00000000, 0xffffffff },
11847 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11848 0x00000000, 0xffffffff },
11849 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11850 0xffffffff, 0x00000000 },
11851 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11852 0xffffffff, 0x00000000 },
11854 /* Buffer Manager Control Registers. */
11855 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11856 0x00000000, 0x007fff80 },
11857 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11858 0x00000000, 0x007fffff },
11859 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11860 0x00000000, 0x0000003f },
11861 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11862 0x00000000, 0x000001ff },
11863 { BUFMGR_MB_HIGH_WATER, 0x0000,
11864 0x00000000, 0x000001ff },
11865 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11866 0xffffffff, 0x00000000 },
11867 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11868 0xffffffff, 0x00000000 },
11870 /* Mailbox Registers */
11871 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11872 0x00000000, 0x000001ff },
11873 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11874 0x00000000, 0x000001ff },
11875 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11876 0x00000000, 0x000007ff },
11877 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11878 0x00000000, 0x000001ff },
11880 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11883 is_5705 = is_5750 = 0;
11884 if (tg3_flag(tp, 5705_PLUS)) {
11885 is_5705 = 1;
11886 if (tg3_flag(tp, 5750_PLUS))
11887 is_5750 = 1;
11890 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11891 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11892 continue;
11894 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11895 continue;
11897 if (tg3_flag(tp, IS_5788) &&
11898 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11899 continue;
11901 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11902 continue;
11904 offset = (u32) reg_tbl[i].offset;
11905 read_mask = reg_tbl[i].read_mask;
11906 write_mask = reg_tbl[i].write_mask;
11908 /* Save the original register content */
11909 save_val = tr32(offset);
11911 /* Determine the read-only value. */
11912 read_val = save_val & read_mask;
11914 /* Write zero to the register, then make sure the read-only bits
11915 * are not changed and the read/write bits are all zeros.
11917 tw32(offset, 0);
11919 val = tr32(offset);
11921 /* Test the read-only and read/write bits. */
11922 if (((val & read_mask) != read_val) || (val & write_mask))
11923 goto out;
11925 /* Write ones to all the bits defined by RdMask and WrMask, then
11926 * make sure the read-only bits are not changed and the
11927 * read/write bits are all ones.
11929 tw32(offset, read_mask | write_mask);
11931 val = tr32(offset);
11933 /* Test the read-only bits. */
11934 if ((val & read_mask) != read_val)
11935 goto out;
11937 /* Test the read/write bits. */
11938 if ((val & write_mask) != write_mask)
11939 goto out;
11941 tw32(offset, save_val);
11944 return 0;
11946 out:
11947 if (netif_msg_hw(tp))
11948 netdev_err(tp->dev,
11949 "Register test failed at offset %x\n", offset);
11950 tw32(offset, save_val);
11951 return -EIO;
11954 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11956 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11957 int i;
11958 u32 j;
11960 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11961 for (j = 0; j < len; j += 4) {
11962 u32 val;
11964 tg3_write_mem(tp, offset + j, test_pattern[i]);
11965 tg3_read_mem(tp, offset + j, &val);
11966 if (val != test_pattern[i])
11967 return -EIO;
11970 return 0;
11973 static int tg3_test_memory(struct tg3 *tp)
11975 static struct mem_entry {
11976 u32 offset;
11977 u32 len;
11978 } mem_tbl_570x[] = {
11979 { 0x00000000, 0x00b50},
11980 { 0x00002000, 0x1c000},
11981 { 0xffffffff, 0x00000}
11982 }, mem_tbl_5705[] = {
11983 { 0x00000100, 0x0000c},
11984 { 0x00000200, 0x00008},
11985 { 0x00004000, 0x00800},
11986 { 0x00006000, 0x01000},
11987 { 0x00008000, 0x02000},
11988 { 0x00010000, 0x0e000},
11989 { 0xffffffff, 0x00000}
11990 }, mem_tbl_5755[] = {
11991 { 0x00000200, 0x00008},
11992 { 0x00004000, 0x00800},
11993 { 0x00006000, 0x00800},
11994 { 0x00008000, 0x02000},
11995 { 0x00010000, 0x0c000},
11996 { 0xffffffff, 0x00000}
11997 }, mem_tbl_5906[] = {
11998 { 0x00000200, 0x00008},
11999 { 0x00004000, 0x00400},
12000 { 0x00006000, 0x00400},
12001 { 0x00008000, 0x01000},
12002 { 0x00010000, 0x01000},
12003 { 0xffffffff, 0x00000}
12004 }, mem_tbl_5717[] = {
12005 { 0x00000200, 0x00008},
12006 { 0x00010000, 0x0a000},
12007 { 0x00020000, 0x13c00},
12008 { 0xffffffff, 0x00000}
12009 }, mem_tbl_57765[] = {
12010 { 0x00000200, 0x00008},
12011 { 0x00004000, 0x00800},
12012 { 0x00006000, 0x09800},
12013 { 0x00010000, 0x0a000},
12014 { 0xffffffff, 0x00000}
12016 struct mem_entry *mem_tbl;
12017 int err = 0;
12018 int i;
12020 if (tg3_flag(tp, 5717_PLUS))
12021 mem_tbl = mem_tbl_5717;
12022 else if (tg3_flag(tp, 57765_CLASS))
12023 mem_tbl = mem_tbl_57765;
12024 else if (tg3_flag(tp, 5755_PLUS))
12025 mem_tbl = mem_tbl_5755;
12026 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12027 mem_tbl = mem_tbl_5906;
12028 else if (tg3_flag(tp, 5705_PLUS))
12029 mem_tbl = mem_tbl_5705;
12030 else
12031 mem_tbl = mem_tbl_570x;
12033 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12034 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12035 if (err)
12036 break;
12039 return err;
12042 #define TG3_TSO_MSS 500
12044 #define TG3_TSO_IP_HDR_LEN 20
12045 #define TG3_TSO_TCP_HDR_LEN 20
12046 #define TG3_TSO_TCP_OPT_LEN 12
12048 static const u8 tg3_tso_header[] = {
12049 0x08, 0x00,
12050 0x45, 0x00, 0x00, 0x00,
12051 0x00, 0x00, 0x40, 0x00,
12052 0x40, 0x06, 0x00, 0x00,
12053 0x0a, 0x00, 0x00, 0x01,
12054 0x0a, 0x00, 0x00, 0x02,
12055 0x0d, 0x00, 0xe0, 0x00,
12056 0x00, 0x00, 0x01, 0x00,
12057 0x00, 0x00, 0x02, 0x00,
12058 0x80, 0x10, 0x10, 0x00,
12059 0x14, 0x09, 0x00, 0x00,
12060 0x01, 0x01, 0x08, 0x0a,
12061 0x11, 0x11, 0x11, 0x11,
12062 0x11, 0x11, 0x11, 0x11,
12065 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12067 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12068 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12069 u32 budget;
12070 struct sk_buff *skb;
12071 u8 *tx_data, *rx_data;
12072 dma_addr_t map;
12073 int num_pkts, tx_len, rx_len, i, err;
12074 struct tg3_rx_buffer_desc *desc;
12075 struct tg3_napi *tnapi, *rnapi;
12076 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12078 tnapi = &tp->napi[0];
12079 rnapi = &tp->napi[0];
12080 if (tp->irq_cnt > 1) {
12081 if (tg3_flag(tp, ENABLE_RSS))
12082 rnapi = &tp->napi[1];
12083 if (tg3_flag(tp, ENABLE_TSS))
12084 tnapi = &tp->napi[1];
12086 coal_now = tnapi->coal_now | rnapi->coal_now;
12088 err = -EIO;
12090 tx_len = pktsz;
12091 skb = netdev_alloc_skb(tp->dev, tx_len);
12092 if (!skb)
12093 return -ENOMEM;
12095 tx_data = skb_put(skb, tx_len);
12096 memcpy(tx_data, tp->dev->dev_addr, 6);
12097 memset(tx_data + 6, 0x0, 8);
12099 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12101 if (tso_loopback) {
12102 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12104 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12105 TG3_TSO_TCP_OPT_LEN;
12107 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12108 sizeof(tg3_tso_header));
12109 mss = TG3_TSO_MSS;
12111 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12112 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12114 /* Set the total length field in the IP header */
12115 iph->tot_len = htons((u16)(mss + hdr_len));
12117 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12118 TXD_FLAG_CPU_POST_DMA);
12120 if (tg3_flag(tp, HW_TSO_1) ||
12121 tg3_flag(tp, HW_TSO_2) ||
12122 tg3_flag(tp, HW_TSO_3)) {
12123 struct tcphdr *th;
12124 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12125 th = (struct tcphdr *)&tx_data[val];
12126 th->check = 0;
12127 } else
12128 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12130 if (tg3_flag(tp, HW_TSO_3)) {
12131 mss |= (hdr_len & 0xc) << 12;
12132 if (hdr_len & 0x10)
12133 base_flags |= 0x00000010;
12134 base_flags |= (hdr_len & 0x3e0) << 5;
12135 } else if (tg3_flag(tp, HW_TSO_2))
12136 mss |= hdr_len << 9;
12137 else if (tg3_flag(tp, HW_TSO_1) ||
12138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12139 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12140 } else {
12141 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12144 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12145 } else {
12146 num_pkts = 1;
12147 data_off = ETH_HLEN;
12149 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12150 tx_len > VLAN_ETH_FRAME_LEN)
12151 base_flags |= TXD_FLAG_JMB_PKT;
12154 for (i = data_off; i < tx_len; i++)
12155 tx_data[i] = (u8) (i & 0xff);
12157 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12158 if (pci_dma_mapping_error(tp->pdev, map)) {
12159 dev_kfree_skb(skb);
12160 return -EIO;
12163 val = tnapi->tx_prod;
12164 tnapi->tx_buffers[val].skb = skb;
12165 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12167 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12168 rnapi->coal_now);
12170 udelay(10);
12172 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12174 budget = tg3_tx_avail(tnapi);
12175 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12176 base_flags | TXD_FLAG_END, mss, 0)) {
12177 tnapi->tx_buffers[val].skb = NULL;
12178 dev_kfree_skb(skb);
12179 return -EIO;
12182 tnapi->tx_prod++;
12184 /* Sync BD data before updating mailbox */
12185 wmb();
12187 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12188 tr32_mailbox(tnapi->prodmbox);
12190 udelay(10);
12192 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12193 for (i = 0; i < 35; i++) {
12194 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12195 coal_now);
12197 udelay(10);
12199 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12200 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12201 if ((tx_idx == tnapi->tx_prod) &&
12202 (rx_idx == (rx_start_idx + num_pkts)))
12203 break;
12206 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12207 dev_kfree_skb(skb);
12209 if (tx_idx != tnapi->tx_prod)
12210 goto out;
12212 if (rx_idx != rx_start_idx + num_pkts)
12213 goto out;
12215 val = data_off;
12216 while (rx_idx != rx_start_idx) {
12217 desc = &rnapi->rx_rcb[rx_start_idx++];
12218 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12219 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12221 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12222 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12223 goto out;
12225 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12226 - ETH_FCS_LEN;
12228 if (!tso_loopback) {
12229 if (rx_len != tx_len)
12230 goto out;
12232 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12233 if (opaque_key != RXD_OPAQUE_RING_STD)
12234 goto out;
12235 } else {
12236 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12237 goto out;
12239 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12240 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12241 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12242 goto out;
12245 if (opaque_key == RXD_OPAQUE_RING_STD) {
12246 rx_data = tpr->rx_std_buffers[desc_idx].data;
12247 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12248 mapping);
12249 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12250 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12251 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12252 mapping);
12253 } else
12254 goto out;
12256 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12257 PCI_DMA_FROMDEVICE);
12259 rx_data += TG3_RX_OFFSET(tp);
12260 for (i = data_off; i < rx_len; i++, val++) {
12261 if (*(rx_data + i) != (u8) (val & 0xff))
12262 goto out;
12266 err = 0;
12268 /* tg3_free_rings will unmap and free the rx_data */
12269 out:
12270 return err;
12273 #define TG3_STD_LOOPBACK_FAILED 1
12274 #define TG3_JMB_LOOPBACK_FAILED 2
12275 #define TG3_TSO_LOOPBACK_FAILED 4
12276 #define TG3_LOOPBACK_FAILED \
12277 (TG3_STD_LOOPBACK_FAILED | \
12278 TG3_JMB_LOOPBACK_FAILED | \
12279 TG3_TSO_LOOPBACK_FAILED)
12281 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12283 int err = -EIO;
12284 u32 eee_cap;
12285 u32 jmb_pkt_sz = 9000;
12287 if (tp->dma_limit)
12288 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12290 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12291 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12293 if (!netif_running(tp->dev)) {
12294 data[0] = TG3_LOOPBACK_FAILED;
12295 data[1] = TG3_LOOPBACK_FAILED;
12296 if (do_extlpbk)
12297 data[2] = TG3_LOOPBACK_FAILED;
12298 goto done;
12301 err = tg3_reset_hw(tp, 1);
12302 if (err) {
12303 data[0] = TG3_LOOPBACK_FAILED;
12304 data[1] = TG3_LOOPBACK_FAILED;
12305 if (do_extlpbk)
12306 data[2] = TG3_LOOPBACK_FAILED;
12307 goto done;
12310 if (tg3_flag(tp, ENABLE_RSS)) {
12311 int i;
12313 /* Reroute all rx packets to the 1st queue */
12314 for (i = MAC_RSS_INDIR_TBL_0;
12315 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12316 tw32(i, 0x0);
12319 /* HW errata - mac loopback fails in some cases on 5780.
12320 * Normal traffic and PHY loopback are not affected by
12321 * errata. Also, the MAC loopback test is deprecated for
12322 * all newer ASIC revisions.
12324 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12325 !tg3_flag(tp, CPMU_PRESENT)) {
12326 tg3_mac_loopback(tp, true);
12328 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12329 data[0] |= TG3_STD_LOOPBACK_FAILED;
12331 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12332 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12333 data[0] |= TG3_JMB_LOOPBACK_FAILED;
12335 tg3_mac_loopback(tp, false);
12338 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12339 !tg3_flag(tp, USE_PHYLIB)) {
12340 int i;
12342 tg3_phy_lpbk_set(tp, 0, false);
12344 /* Wait for link */
12345 for (i = 0; i < 100; i++) {
12346 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12347 break;
12348 mdelay(1);
12351 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12352 data[1] |= TG3_STD_LOOPBACK_FAILED;
12353 if (tg3_flag(tp, TSO_CAPABLE) &&
12354 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12355 data[1] |= TG3_TSO_LOOPBACK_FAILED;
12356 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12357 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12358 data[1] |= TG3_JMB_LOOPBACK_FAILED;
12360 if (do_extlpbk) {
12361 tg3_phy_lpbk_set(tp, 0, true);
12363 /* All link indications report up, but the hardware
12364 * isn't really ready for about 20 msec. Double it
12365 * to be sure.
12367 mdelay(40);
12369 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12370 data[2] |= TG3_STD_LOOPBACK_FAILED;
12371 if (tg3_flag(tp, TSO_CAPABLE) &&
12372 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12373 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12374 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12375 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12376 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12379 /* Re-enable gphy autopowerdown. */
12380 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12381 tg3_phy_toggle_apd(tp, true);
12384 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12386 done:
12387 tp->phy_flags |= eee_cap;
12389 return err;
12392 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12393 u64 *data)
12395 struct tg3 *tp = netdev_priv(dev);
12396 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12398 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12399 tg3_power_up(tp)) {
12400 etest->flags |= ETH_TEST_FL_FAILED;
12401 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12402 return;
12405 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12407 if (tg3_test_nvram(tp) != 0) {
12408 etest->flags |= ETH_TEST_FL_FAILED;
12409 data[0] = 1;
12411 if (!doextlpbk && tg3_test_link(tp)) {
12412 etest->flags |= ETH_TEST_FL_FAILED;
12413 data[1] = 1;
12415 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12416 int err, err2 = 0, irq_sync = 0;
12418 if (netif_running(dev)) {
12419 tg3_phy_stop(tp);
12420 tg3_netif_stop(tp);
12421 irq_sync = 1;
12424 tg3_full_lock(tp, irq_sync);
12426 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12427 err = tg3_nvram_lock(tp);
12428 tg3_halt_cpu(tp, RX_CPU_BASE);
12429 if (!tg3_flag(tp, 5705_PLUS))
12430 tg3_halt_cpu(tp, TX_CPU_BASE);
12431 if (!err)
12432 tg3_nvram_unlock(tp);
12434 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12435 tg3_phy_reset(tp);
12437 if (tg3_test_registers(tp) != 0) {
12438 etest->flags |= ETH_TEST_FL_FAILED;
12439 data[2] = 1;
12442 if (tg3_test_memory(tp) != 0) {
12443 etest->flags |= ETH_TEST_FL_FAILED;
12444 data[3] = 1;
12447 if (doextlpbk)
12448 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12450 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12451 etest->flags |= ETH_TEST_FL_FAILED;
12453 tg3_full_unlock(tp);
12455 if (tg3_test_interrupt(tp) != 0) {
12456 etest->flags |= ETH_TEST_FL_FAILED;
12457 data[7] = 1;
12460 tg3_full_lock(tp, 0);
12462 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12463 if (netif_running(dev)) {
12464 tg3_flag_set(tp, INIT_COMPLETE);
12465 err2 = tg3_restart_hw(tp, 1);
12466 if (!err2)
12467 tg3_netif_start(tp);
12470 tg3_full_unlock(tp);
12472 if (irq_sync && !err2)
12473 tg3_phy_start(tp);
12475 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12476 tg3_power_down(tp);
12480 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12482 struct mii_ioctl_data *data = if_mii(ifr);
12483 struct tg3 *tp = netdev_priv(dev);
12484 int err;
12486 if (tg3_flag(tp, USE_PHYLIB)) {
12487 struct phy_device *phydev;
12488 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12489 return -EAGAIN;
12490 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12491 return phy_mii_ioctl(phydev, ifr, cmd);
12494 switch (cmd) {
12495 case SIOCGMIIPHY:
12496 data->phy_id = tp->phy_addr;
12498 /* fallthru */
12499 case SIOCGMIIREG: {
12500 u32 mii_regval;
12502 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12503 break; /* We have no PHY */
12505 if (!netif_running(dev))
12506 return -EAGAIN;
12508 spin_lock_bh(&tp->lock);
12509 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12510 spin_unlock_bh(&tp->lock);
12512 data->val_out = mii_regval;
12514 return err;
12517 case SIOCSMIIREG:
12518 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12519 break; /* We have no PHY */
12521 if (!netif_running(dev))
12522 return -EAGAIN;
12524 spin_lock_bh(&tp->lock);
12525 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12526 spin_unlock_bh(&tp->lock);
12528 return err;
12530 default:
12531 /* do nothing */
12532 break;
12534 return -EOPNOTSUPP;
12537 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12539 struct tg3 *tp = netdev_priv(dev);
12541 memcpy(ec, &tp->coal, sizeof(*ec));
12542 return 0;
12545 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12547 struct tg3 *tp = netdev_priv(dev);
12548 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12549 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12551 if (!tg3_flag(tp, 5705_PLUS)) {
12552 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12553 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12554 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12555 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12558 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12559 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12560 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12561 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12562 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12563 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12564 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12565 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12566 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12567 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12568 return -EINVAL;
12570 /* No rx interrupts will be generated if both are zero */
12571 if ((ec->rx_coalesce_usecs == 0) &&
12572 (ec->rx_max_coalesced_frames == 0))
12573 return -EINVAL;
12575 /* No tx interrupts will be generated if both are zero */
12576 if ((ec->tx_coalesce_usecs == 0) &&
12577 (ec->tx_max_coalesced_frames == 0))
12578 return -EINVAL;
12580 /* Only copy relevant parameters, ignore all others. */
12581 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12582 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12583 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12584 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12585 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12586 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12587 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12588 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12589 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12591 if (netif_running(dev)) {
12592 tg3_full_lock(tp, 0);
12593 __tg3_set_coalesce(tp, &tp->coal);
12594 tg3_full_unlock(tp);
12596 return 0;
12599 static const struct ethtool_ops tg3_ethtool_ops = {
12600 .get_settings = tg3_get_settings,
12601 .set_settings = tg3_set_settings,
12602 .get_drvinfo = tg3_get_drvinfo,
12603 .get_regs_len = tg3_get_regs_len,
12604 .get_regs = tg3_get_regs,
12605 .get_wol = tg3_get_wol,
12606 .set_wol = tg3_set_wol,
12607 .get_msglevel = tg3_get_msglevel,
12608 .set_msglevel = tg3_set_msglevel,
12609 .nway_reset = tg3_nway_reset,
12610 .get_link = ethtool_op_get_link,
12611 .get_eeprom_len = tg3_get_eeprom_len,
12612 .get_eeprom = tg3_get_eeprom,
12613 .set_eeprom = tg3_set_eeprom,
12614 .get_ringparam = tg3_get_ringparam,
12615 .set_ringparam = tg3_set_ringparam,
12616 .get_pauseparam = tg3_get_pauseparam,
12617 .set_pauseparam = tg3_set_pauseparam,
12618 .self_test = tg3_self_test,
12619 .get_strings = tg3_get_strings,
12620 .set_phys_id = tg3_set_phys_id,
12621 .get_ethtool_stats = tg3_get_ethtool_stats,
12622 .get_coalesce = tg3_get_coalesce,
12623 .set_coalesce = tg3_set_coalesce,
12624 .get_sset_count = tg3_get_sset_count,
12625 .get_rxnfc = tg3_get_rxnfc,
12626 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12627 .get_rxfh_indir = tg3_get_rxfh_indir,
12628 .set_rxfh_indir = tg3_set_rxfh_indir,
12629 .get_ts_info = ethtool_op_get_ts_info,
12632 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12633 struct rtnl_link_stats64 *stats)
12635 struct tg3 *tp = netdev_priv(dev);
12637 spin_lock_bh(&tp->lock);
12638 if (!tp->hw_stats) {
12639 spin_unlock_bh(&tp->lock);
12640 return &tp->net_stats_prev;
12643 tg3_get_nstats(tp, stats);
12644 spin_unlock_bh(&tp->lock);
12646 return stats;
12649 static void tg3_set_rx_mode(struct net_device *dev)
12651 struct tg3 *tp = netdev_priv(dev);
12653 if (!netif_running(dev))
12654 return;
12656 tg3_full_lock(tp, 0);
12657 __tg3_set_rx_mode(dev);
12658 tg3_full_unlock(tp);
12661 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12662 int new_mtu)
12664 dev->mtu = new_mtu;
12666 if (new_mtu > ETH_DATA_LEN) {
12667 if (tg3_flag(tp, 5780_CLASS)) {
12668 netdev_update_features(dev);
12669 tg3_flag_clear(tp, TSO_CAPABLE);
12670 } else {
12671 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12673 } else {
12674 if (tg3_flag(tp, 5780_CLASS)) {
12675 tg3_flag_set(tp, TSO_CAPABLE);
12676 netdev_update_features(dev);
12678 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12682 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12684 struct tg3 *tp = netdev_priv(dev);
12685 int err, reset_phy = 0;
12687 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12688 return -EINVAL;
12690 if (!netif_running(dev)) {
12691 /* We'll just catch it later when the
12692 * device is up'd.
12694 tg3_set_mtu(dev, tp, new_mtu);
12695 return 0;
12698 tg3_phy_stop(tp);
12700 tg3_netif_stop(tp);
12702 tg3_full_lock(tp, 1);
12704 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12706 tg3_set_mtu(dev, tp, new_mtu);
12708 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12709 * breaks all requests to 256 bytes.
12711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12712 reset_phy = 1;
12714 err = tg3_restart_hw(tp, reset_phy);
12716 if (!err)
12717 tg3_netif_start(tp);
12719 tg3_full_unlock(tp);
12721 if (!err)
12722 tg3_phy_start(tp);
12724 return err;
12727 static const struct net_device_ops tg3_netdev_ops = {
12728 .ndo_open = tg3_open,
12729 .ndo_stop = tg3_close,
12730 .ndo_start_xmit = tg3_start_xmit,
12731 .ndo_get_stats64 = tg3_get_stats64,
12732 .ndo_validate_addr = eth_validate_addr,
12733 .ndo_set_rx_mode = tg3_set_rx_mode,
12734 .ndo_set_mac_address = tg3_set_mac_addr,
12735 .ndo_do_ioctl = tg3_ioctl,
12736 .ndo_tx_timeout = tg3_tx_timeout,
12737 .ndo_change_mtu = tg3_change_mtu,
12738 .ndo_fix_features = tg3_fix_features,
12739 .ndo_set_features = tg3_set_features,
12740 #ifdef CONFIG_NET_POLL_CONTROLLER
12741 .ndo_poll_controller = tg3_poll_controller,
12742 #endif
12745 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12747 u32 cursize, val, magic;
12749 tp->nvram_size = EEPROM_CHIP_SIZE;
12751 if (tg3_nvram_read(tp, 0, &magic) != 0)
12752 return;
12754 if ((magic != TG3_EEPROM_MAGIC) &&
12755 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12756 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12757 return;
12760 * Size the chip by reading offsets at increasing powers of two.
12761 * When we encounter our validation signature, we know the addressing
12762 * has wrapped around, and thus have our chip size.
12764 cursize = 0x10;
12766 while (cursize < tp->nvram_size) {
12767 if (tg3_nvram_read(tp, cursize, &val) != 0)
12768 return;
12770 if (val == magic)
12771 break;
12773 cursize <<= 1;
12776 tp->nvram_size = cursize;
12779 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12781 u32 val;
12783 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12784 return;
12786 /* Selfboot format */
12787 if (val != TG3_EEPROM_MAGIC) {
12788 tg3_get_eeprom_size(tp);
12789 return;
12792 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12793 if (val != 0) {
12794 /* This is confusing. We want to operate on the
12795 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12796 * call will read from NVRAM and byteswap the data
12797 * according to the byteswapping settings for all
12798 * other register accesses. This ensures the data we
12799 * want will always reside in the lower 16-bits.
12800 * However, the data in NVRAM is in LE format, which
12801 * means the data from the NVRAM read will always be
12802 * opposite the endianness of the CPU. The 16-bit
12803 * byteswap then brings the data to CPU endianness.
12805 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12806 return;
12809 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12812 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12814 u32 nvcfg1;
12816 nvcfg1 = tr32(NVRAM_CFG1);
12817 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12818 tg3_flag_set(tp, FLASH);
12819 } else {
12820 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12821 tw32(NVRAM_CFG1, nvcfg1);
12824 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12825 tg3_flag(tp, 5780_CLASS)) {
12826 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12827 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12828 tp->nvram_jedecnum = JEDEC_ATMEL;
12829 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12830 tg3_flag_set(tp, NVRAM_BUFFERED);
12831 break;
12832 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12833 tp->nvram_jedecnum = JEDEC_ATMEL;
12834 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12835 break;
12836 case FLASH_VENDOR_ATMEL_EEPROM:
12837 tp->nvram_jedecnum = JEDEC_ATMEL;
12838 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12839 tg3_flag_set(tp, NVRAM_BUFFERED);
12840 break;
12841 case FLASH_VENDOR_ST:
12842 tp->nvram_jedecnum = JEDEC_ST;
12843 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12844 tg3_flag_set(tp, NVRAM_BUFFERED);
12845 break;
12846 case FLASH_VENDOR_SAIFUN:
12847 tp->nvram_jedecnum = JEDEC_SAIFUN;
12848 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12849 break;
12850 case FLASH_VENDOR_SST_SMALL:
12851 case FLASH_VENDOR_SST_LARGE:
12852 tp->nvram_jedecnum = JEDEC_SST;
12853 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12854 break;
12856 } else {
12857 tp->nvram_jedecnum = JEDEC_ATMEL;
12858 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12859 tg3_flag_set(tp, NVRAM_BUFFERED);
12863 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12865 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12866 case FLASH_5752PAGE_SIZE_256:
12867 tp->nvram_pagesize = 256;
12868 break;
12869 case FLASH_5752PAGE_SIZE_512:
12870 tp->nvram_pagesize = 512;
12871 break;
12872 case FLASH_5752PAGE_SIZE_1K:
12873 tp->nvram_pagesize = 1024;
12874 break;
12875 case FLASH_5752PAGE_SIZE_2K:
12876 tp->nvram_pagesize = 2048;
12877 break;
12878 case FLASH_5752PAGE_SIZE_4K:
12879 tp->nvram_pagesize = 4096;
12880 break;
12881 case FLASH_5752PAGE_SIZE_264:
12882 tp->nvram_pagesize = 264;
12883 break;
12884 case FLASH_5752PAGE_SIZE_528:
12885 tp->nvram_pagesize = 528;
12886 break;
12890 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12892 u32 nvcfg1;
12894 nvcfg1 = tr32(NVRAM_CFG1);
12896 /* NVRAM protection for TPM */
12897 if (nvcfg1 & (1 << 27))
12898 tg3_flag_set(tp, PROTECTED_NVRAM);
12900 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12901 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12902 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12903 tp->nvram_jedecnum = JEDEC_ATMEL;
12904 tg3_flag_set(tp, NVRAM_BUFFERED);
12905 break;
12906 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12907 tp->nvram_jedecnum = JEDEC_ATMEL;
12908 tg3_flag_set(tp, NVRAM_BUFFERED);
12909 tg3_flag_set(tp, FLASH);
12910 break;
12911 case FLASH_5752VENDOR_ST_M45PE10:
12912 case FLASH_5752VENDOR_ST_M45PE20:
12913 case FLASH_5752VENDOR_ST_M45PE40:
12914 tp->nvram_jedecnum = JEDEC_ST;
12915 tg3_flag_set(tp, NVRAM_BUFFERED);
12916 tg3_flag_set(tp, FLASH);
12917 break;
12920 if (tg3_flag(tp, FLASH)) {
12921 tg3_nvram_get_pagesize(tp, nvcfg1);
12922 } else {
12923 /* For eeprom, set pagesize to maximum eeprom size */
12924 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12926 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12927 tw32(NVRAM_CFG1, nvcfg1);
12931 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12933 u32 nvcfg1, protect = 0;
12935 nvcfg1 = tr32(NVRAM_CFG1);
12937 /* NVRAM protection for TPM */
12938 if (nvcfg1 & (1 << 27)) {
12939 tg3_flag_set(tp, PROTECTED_NVRAM);
12940 protect = 1;
12943 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12944 switch (nvcfg1) {
12945 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12946 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12947 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12948 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12949 tp->nvram_jedecnum = JEDEC_ATMEL;
12950 tg3_flag_set(tp, NVRAM_BUFFERED);
12951 tg3_flag_set(tp, FLASH);
12952 tp->nvram_pagesize = 264;
12953 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12954 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12955 tp->nvram_size = (protect ? 0x3e200 :
12956 TG3_NVRAM_SIZE_512KB);
12957 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12958 tp->nvram_size = (protect ? 0x1f200 :
12959 TG3_NVRAM_SIZE_256KB);
12960 else
12961 tp->nvram_size = (protect ? 0x1f200 :
12962 TG3_NVRAM_SIZE_128KB);
12963 break;
12964 case FLASH_5752VENDOR_ST_M45PE10:
12965 case FLASH_5752VENDOR_ST_M45PE20:
12966 case FLASH_5752VENDOR_ST_M45PE40:
12967 tp->nvram_jedecnum = JEDEC_ST;
12968 tg3_flag_set(tp, NVRAM_BUFFERED);
12969 tg3_flag_set(tp, FLASH);
12970 tp->nvram_pagesize = 256;
12971 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12972 tp->nvram_size = (protect ?
12973 TG3_NVRAM_SIZE_64KB :
12974 TG3_NVRAM_SIZE_128KB);
12975 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12976 tp->nvram_size = (protect ?
12977 TG3_NVRAM_SIZE_64KB :
12978 TG3_NVRAM_SIZE_256KB);
12979 else
12980 tp->nvram_size = (protect ?
12981 TG3_NVRAM_SIZE_128KB :
12982 TG3_NVRAM_SIZE_512KB);
12983 break;
12987 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12989 u32 nvcfg1;
12991 nvcfg1 = tr32(NVRAM_CFG1);
12993 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12994 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12995 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12996 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12997 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12998 tp->nvram_jedecnum = JEDEC_ATMEL;
12999 tg3_flag_set(tp, NVRAM_BUFFERED);
13000 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13002 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13003 tw32(NVRAM_CFG1, nvcfg1);
13004 break;
13005 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13006 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13007 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13008 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13009 tp->nvram_jedecnum = JEDEC_ATMEL;
13010 tg3_flag_set(tp, NVRAM_BUFFERED);
13011 tg3_flag_set(tp, FLASH);
13012 tp->nvram_pagesize = 264;
13013 break;
13014 case FLASH_5752VENDOR_ST_M45PE10:
13015 case FLASH_5752VENDOR_ST_M45PE20:
13016 case FLASH_5752VENDOR_ST_M45PE40:
13017 tp->nvram_jedecnum = JEDEC_ST;
13018 tg3_flag_set(tp, NVRAM_BUFFERED);
13019 tg3_flag_set(tp, FLASH);
13020 tp->nvram_pagesize = 256;
13021 break;
13025 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13027 u32 nvcfg1, protect = 0;
13029 nvcfg1 = tr32(NVRAM_CFG1);
13031 /* NVRAM protection for TPM */
13032 if (nvcfg1 & (1 << 27)) {
13033 tg3_flag_set(tp, PROTECTED_NVRAM);
13034 protect = 1;
13037 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13038 switch (nvcfg1) {
13039 case FLASH_5761VENDOR_ATMEL_ADB021D:
13040 case FLASH_5761VENDOR_ATMEL_ADB041D:
13041 case FLASH_5761VENDOR_ATMEL_ADB081D:
13042 case FLASH_5761VENDOR_ATMEL_ADB161D:
13043 case FLASH_5761VENDOR_ATMEL_MDB021D:
13044 case FLASH_5761VENDOR_ATMEL_MDB041D:
13045 case FLASH_5761VENDOR_ATMEL_MDB081D:
13046 case FLASH_5761VENDOR_ATMEL_MDB161D:
13047 tp->nvram_jedecnum = JEDEC_ATMEL;
13048 tg3_flag_set(tp, NVRAM_BUFFERED);
13049 tg3_flag_set(tp, FLASH);
13050 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13051 tp->nvram_pagesize = 256;
13052 break;
13053 case FLASH_5761VENDOR_ST_A_M45PE20:
13054 case FLASH_5761VENDOR_ST_A_M45PE40:
13055 case FLASH_5761VENDOR_ST_A_M45PE80:
13056 case FLASH_5761VENDOR_ST_A_M45PE16:
13057 case FLASH_5761VENDOR_ST_M_M45PE20:
13058 case FLASH_5761VENDOR_ST_M_M45PE40:
13059 case FLASH_5761VENDOR_ST_M_M45PE80:
13060 case FLASH_5761VENDOR_ST_M_M45PE16:
13061 tp->nvram_jedecnum = JEDEC_ST;
13062 tg3_flag_set(tp, NVRAM_BUFFERED);
13063 tg3_flag_set(tp, FLASH);
13064 tp->nvram_pagesize = 256;
13065 break;
13068 if (protect) {
13069 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13070 } else {
13071 switch (nvcfg1) {
13072 case FLASH_5761VENDOR_ATMEL_ADB161D:
13073 case FLASH_5761VENDOR_ATMEL_MDB161D:
13074 case FLASH_5761VENDOR_ST_A_M45PE16:
13075 case FLASH_5761VENDOR_ST_M_M45PE16:
13076 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13077 break;
13078 case FLASH_5761VENDOR_ATMEL_ADB081D:
13079 case FLASH_5761VENDOR_ATMEL_MDB081D:
13080 case FLASH_5761VENDOR_ST_A_M45PE80:
13081 case FLASH_5761VENDOR_ST_M_M45PE80:
13082 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13083 break;
13084 case FLASH_5761VENDOR_ATMEL_ADB041D:
13085 case FLASH_5761VENDOR_ATMEL_MDB041D:
13086 case FLASH_5761VENDOR_ST_A_M45PE40:
13087 case FLASH_5761VENDOR_ST_M_M45PE40:
13088 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13089 break;
13090 case FLASH_5761VENDOR_ATMEL_ADB021D:
13091 case FLASH_5761VENDOR_ATMEL_MDB021D:
13092 case FLASH_5761VENDOR_ST_A_M45PE20:
13093 case FLASH_5761VENDOR_ST_M_M45PE20:
13094 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13095 break;
13100 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
13102 tp->nvram_jedecnum = JEDEC_ATMEL;
13103 tg3_flag_set(tp, NVRAM_BUFFERED);
13104 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13107 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13109 u32 nvcfg1;
13111 nvcfg1 = tr32(NVRAM_CFG1);
13113 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13114 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13115 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13116 tp->nvram_jedecnum = JEDEC_ATMEL;
13117 tg3_flag_set(tp, NVRAM_BUFFERED);
13118 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13120 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13121 tw32(NVRAM_CFG1, nvcfg1);
13122 return;
13123 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13124 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13125 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13126 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13127 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13128 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13129 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13130 tp->nvram_jedecnum = JEDEC_ATMEL;
13131 tg3_flag_set(tp, NVRAM_BUFFERED);
13132 tg3_flag_set(tp, FLASH);
13134 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13135 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13136 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13137 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13138 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13139 break;
13140 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13141 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13142 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13143 break;
13144 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13145 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13146 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13147 break;
13149 break;
13150 case FLASH_5752VENDOR_ST_M45PE10:
13151 case FLASH_5752VENDOR_ST_M45PE20:
13152 case FLASH_5752VENDOR_ST_M45PE40:
13153 tp->nvram_jedecnum = JEDEC_ST;
13154 tg3_flag_set(tp, NVRAM_BUFFERED);
13155 tg3_flag_set(tp, FLASH);
13157 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13158 case FLASH_5752VENDOR_ST_M45PE10:
13159 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13160 break;
13161 case FLASH_5752VENDOR_ST_M45PE20:
13162 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13163 break;
13164 case FLASH_5752VENDOR_ST_M45PE40:
13165 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13166 break;
13168 break;
13169 default:
13170 tg3_flag_set(tp, NO_NVRAM);
13171 return;
13174 tg3_nvram_get_pagesize(tp, nvcfg1);
13175 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13176 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13180 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13182 u32 nvcfg1;
13184 nvcfg1 = tr32(NVRAM_CFG1);
13186 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13187 case FLASH_5717VENDOR_ATMEL_EEPROM:
13188 case FLASH_5717VENDOR_MICRO_EEPROM:
13189 tp->nvram_jedecnum = JEDEC_ATMEL;
13190 tg3_flag_set(tp, NVRAM_BUFFERED);
13191 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13193 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13194 tw32(NVRAM_CFG1, nvcfg1);
13195 return;
13196 case FLASH_5717VENDOR_ATMEL_MDB011D:
13197 case FLASH_5717VENDOR_ATMEL_ADB011B:
13198 case FLASH_5717VENDOR_ATMEL_ADB011D:
13199 case FLASH_5717VENDOR_ATMEL_MDB021D:
13200 case FLASH_5717VENDOR_ATMEL_ADB021B:
13201 case FLASH_5717VENDOR_ATMEL_ADB021D:
13202 case FLASH_5717VENDOR_ATMEL_45USPT:
13203 tp->nvram_jedecnum = JEDEC_ATMEL;
13204 tg3_flag_set(tp, NVRAM_BUFFERED);
13205 tg3_flag_set(tp, FLASH);
13207 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13208 case FLASH_5717VENDOR_ATMEL_MDB021D:
13209 /* Detect size with tg3_nvram_get_size() */
13210 break;
13211 case FLASH_5717VENDOR_ATMEL_ADB021B:
13212 case FLASH_5717VENDOR_ATMEL_ADB021D:
13213 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13214 break;
13215 default:
13216 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13217 break;
13219 break;
13220 case FLASH_5717VENDOR_ST_M_M25PE10:
13221 case FLASH_5717VENDOR_ST_A_M25PE10:
13222 case FLASH_5717VENDOR_ST_M_M45PE10:
13223 case FLASH_5717VENDOR_ST_A_M45PE10:
13224 case FLASH_5717VENDOR_ST_M_M25PE20:
13225 case FLASH_5717VENDOR_ST_A_M25PE20:
13226 case FLASH_5717VENDOR_ST_M_M45PE20:
13227 case FLASH_5717VENDOR_ST_A_M45PE20:
13228 case FLASH_5717VENDOR_ST_25USPT:
13229 case FLASH_5717VENDOR_ST_45USPT:
13230 tp->nvram_jedecnum = JEDEC_ST;
13231 tg3_flag_set(tp, NVRAM_BUFFERED);
13232 tg3_flag_set(tp, FLASH);
13234 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13235 case FLASH_5717VENDOR_ST_M_M25PE20:
13236 case FLASH_5717VENDOR_ST_M_M45PE20:
13237 /* Detect size with tg3_nvram_get_size() */
13238 break;
13239 case FLASH_5717VENDOR_ST_A_M25PE20:
13240 case FLASH_5717VENDOR_ST_A_M45PE20:
13241 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13242 break;
13243 default:
13244 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13245 break;
13247 break;
13248 default:
13249 tg3_flag_set(tp, NO_NVRAM);
13250 return;
13253 tg3_nvram_get_pagesize(tp, nvcfg1);
13254 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13255 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13258 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13260 u32 nvcfg1, nvmpinstrp;
13262 nvcfg1 = tr32(NVRAM_CFG1);
13263 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13265 switch (nvmpinstrp) {
13266 case FLASH_5720_EEPROM_HD:
13267 case FLASH_5720_EEPROM_LD:
13268 tp->nvram_jedecnum = JEDEC_ATMEL;
13269 tg3_flag_set(tp, NVRAM_BUFFERED);
13271 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13272 tw32(NVRAM_CFG1, nvcfg1);
13273 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13274 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13275 else
13276 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13277 return;
13278 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13279 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13280 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13281 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13282 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13283 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13284 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13285 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13286 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13287 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13288 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13289 case FLASH_5720VENDOR_ATMEL_45USPT:
13290 tp->nvram_jedecnum = JEDEC_ATMEL;
13291 tg3_flag_set(tp, NVRAM_BUFFERED);
13292 tg3_flag_set(tp, FLASH);
13294 switch (nvmpinstrp) {
13295 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13296 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13297 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13298 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13299 break;
13300 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13301 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13302 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13303 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13304 break;
13305 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13306 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13307 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13308 break;
13309 default:
13310 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13311 break;
13313 break;
13314 case FLASH_5720VENDOR_M_ST_M25PE10:
13315 case FLASH_5720VENDOR_M_ST_M45PE10:
13316 case FLASH_5720VENDOR_A_ST_M25PE10:
13317 case FLASH_5720VENDOR_A_ST_M45PE10:
13318 case FLASH_5720VENDOR_M_ST_M25PE20:
13319 case FLASH_5720VENDOR_M_ST_M45PE20:
13320 case FLASH_5720VENDOR_A_ST_M25PE20:
13321 case FLASH_5720VENDOR_A_ST_M45PE20:
13322 case FLASH_5720VENDOR_M_ST_M25PE40:
13323 case FLASH_5720VENDOR_M_ST_M45PE40:
13324 case FLASH_5720VENDOR_A_ST_M25PE40:
13325 case FLASH_5720VENDOR_A_ST_M45PE40:
13326 case FLASH_5720VENDOR_M_ST_M25PE80:
13327 case FLASH_5720VENDOR_M_ST_M45PE80:
13328 case FLASH_5720VENDOR_A_ST_M25PE80:
13329 case FLASH_5720VENDOR_A_ST_M45PE80:
13330 case FLASH_5720VENDOR_ST_25USPT:
13331 case FLASH_5720VENDOR_ST_45USPT:
13332 tp->nvram_jedecnum = JEDEC_ST;
13333 tg3_flag_set(tp, NVRAM_BUFFERED);
13334 tg3_flag_set(tp, FLASH);
13336 switch (nvmpinstrp) {
13337 case FLASH_5720VENDOR_M_ST_M25PE20:
13338 case FLASH_5720VENDOR_M_ST_M45PE20:
13339 case FLASH_5720VENDOR_A_ST_M25PE20:
13340 case FLASH_5720VENDOR_A_ST_M45PE20:
13341 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13342 break;
13343 case FLASH_5720VENDOR_M_ST_M25PE40:
13344 case FLASH_5720VENDOR_M_ST_M45PE40:
13345 case FLASH_5720VENDOR_A_ST_M25PE40:
13346 case FLASH_5720VENDOR_A_ST_M45PE40:
13347 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13348 break;
13349 case FLASH_5720VENDOR_M_ST_M25PE80:
13350 case FLASH_5720VENDOR_M_ST_M45PE80:
13351 case FLASH_5720VENDOR_A_ST_M25PE80:
13352 case FLASH_5720VENDOR_A_ST_M45PE80:
13353 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13354 break;
13355 default:
13356 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13357 break;
13359 break;
13360 default:
13361 tg3_flag_set(tp, NO_NVRAM);
13362 return;
13365 tg3_nvram_get_pagesize(tp, nvcfg1);
13366 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13367 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13370 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13371 static void __devinit tg3_nvram_init(struct tg3 *tp)
13373 tw32_f(GRC_EEPROM_ADDR,
13374 (EEPROM_ADDR_FSM_RESET |
13375 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13376 EEPROM_ADDR_CLKPERD_SHIFT)));
13378 msleep(1);
13380 /* Enable seeprom accesses. */
13381 tw32_f(GRC_LOCAL_CTRL,
13382 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13383 udelay(100);
13385 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13386 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13387 tg3_flag_set(tp, NVRAM);
13389 if (tg3_nvram_lock(tp)) {
13390 netdev_warn(tp->dev,
13391 "Cannot get nvram lock, %s failed\n",
13392 __func__);
13393 return;
13395 tg3_enable_nvram_access(tp);
13397 tp->nvram_size = 0;
13399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13400 tg3_get_5752_nvram_info(tp);
13401 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13402 tg3_get_5755_nvram_info(tp);
13403 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13405 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13406 tg3_get_5787_nvram_info(tp);
13407 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13408 tg3_get_5761_nvram_info(tp);
13409 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13410 tg3_get_5906_nvram_info(tp);
13411 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13412 tg3_flag(tp, 57765_CLASS))
13413 tg3_get_57780_nvram_info(tp);
13414 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13415 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13416 tg3_get_5717_nvram_info(tp);
13417 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13418 tg3_get_5720_nvram_info(tp);
13419 else
13420 tg3_get_nvram_info(tp);
13422 if (tp->nvram_size == 0)
13423 tg3_get_nvram_size(tp);
13425 tg3_disable_nvram_access(tp);
13426 tg3_nvram_unlock(tp);
13428 } else {
13429 tg3_flag_clear(tp, NVRAM);
13430 tg3_flag_clear(tp, NVRAM_BUFFERED);
13432 tg3_get_eeprom_size(tp);
13436 struct subsys_tbl_ent {
13437 u16 subsys_vendor, subsys_devid;
13438 u32 phy_id;
13441 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13442 /* Broadcom boards. */
13443 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13444 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13445 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13446 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13447 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13448 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13449 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13450 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13451 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13452 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13453 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13454 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13455 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13456 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13457 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13458 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13459 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13460 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13461 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13462 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13463 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13464 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13466 /* 3com boards. */
13467 { TG3PCI_SUBVENDOR_ID_3COM,
13468 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13469 { TG3PCI_SUBVENDOR_ID_3COM,
13470 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13471 { TG3PCI_SUBVENDOR_ID_3COM,
13472 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13473 { TG3PCI_SUBVENDOR_ID_3COM,
13474 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13475 { TG3PCI_SUBVENDOR_ID_3COM,
13476 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13478 /* DELL boards. */
13479 { TG3PCI_SUBVENDOR_ID_DELL,
13480 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13481 { TG3PCI_SUBVENDOR_ID_DELL,
13482 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13483 { TG3PCI_SUBVENDOR_ID_DELL,
13484 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13485 { TG3PCI_SUBVENDOR_ID_DELL,
13486 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13488 /* Compaq boards. */
13489 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13490 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13491 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13492 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13493 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13494 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13495 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13496 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13497 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13498 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13500 /* IBM boards. */
13501 { TG3PCI_SUBVENDOR_ID_IBM,
13502 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13505 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13507 int i;
13509 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13510 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13511 tp->pdev->subsystem_vendor) &&
13512 (subsys_id_to_phy_id[i].subsys_devid ==
13513 tp->pdev->subsystem_device))
13514 return &subsys_id_to_phy_id[i];
13516 return NULL;
13519 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13521 u32 val;
13523 tp->phy_id = TG3_PHY_ID_INVALID;
13524 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13526 /* Assume an onboard device and WOL capable by default. */
13527 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13528 tg3_flag_set(tp, WOL_CAP);
13530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13531 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13532 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13533 tg3_flag_set(tp, IS_NIC);
13535 val = tr32(VCPU_CFGSHDW);
13536 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13537 tg3_flag_set(tp, ASPM_WORKAROUND);
13538 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13539 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13540 tg3_flag_set(tp, WOL_ENABLE);
13541 device_set_wakeup_enable(&tp->pdev->dev, true);
13543 goto done;
13546 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13547 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13548 u32 nic_cfg, led_cfg;
13549 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13550 int eeprom_phy_serdes = 0;
13552 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13553 tp->nic_sram_data_cfg = nic_cfg;
13555 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13556 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13557 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13558 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13559 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13560 (ver > 0) && (ver < 0x100))
13561 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13564 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13566 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13567 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13568 eeprom_phy_serdes = 1;
13570 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13571 if (nic_phy_id != 0) {
13572 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13573 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13575 eeprom_phy_id = (id1 >> 16) << 10;
13576 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13577 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13578 } else
13579 eeprom_phy_id = 0;
13581 tp->phy_id = eeprom_phy_id;
13582 if (eeprom_phy_serdes) {
13583 if (!tg3_flag(tp, 5705_PLUS))
13584 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13585 else
13586 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13589 if (tg3_flag(tp, 5750_PLUS))
13590 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13591 SHASTA_EXT_LED_MODE_MASK);
13592 else
13593 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13595 switch (led_cfg) {
13596 default:
13597 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13598 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13599 break;
13601 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13602 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13603 break;
13605 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13606 tp->led_ctrl = LED_CTRL_MODE_MAC;
13608 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13609 * read on some older 5700/5701 bootcode.
13611 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13612 ASIC_REV_5700 ||
13613 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13614 ASIC_REV_5701)
13615 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13617 break;
13619 case SHASTA_EXT_LED_SHARED:
13620 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13621 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13622 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13623 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13624 LED_CTRL_MODE_PHY_2);
13625 break;
13627 case SHASTA_EXT_LED_MAC:
13628 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13629 break;
13631 case SHASTA_EXT_LED_COMBO:
13632 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13633 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13634 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13635 LED_CTRL_MODE_PHY_2);
13636 break;
13640 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13641 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13642 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13643 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13645 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13646 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13648 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13649 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13650 if ((tp->pdev->subsystem_vendor ==
13651 PCI_VENDOR_ID_ARIMA) &&
13652 (tp->pdev->subsystem_device == 0x205a ||
13653 tp->pdev->subsystem_device == 0x2063))
13654 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13655 } else {
13656 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13657 tg3_flag_set(tp, IS_NIC);
13660 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13661 tg3_flag_set(tp, ENABLE_ASF);
13662 if (tg3_flag(tp, 5750_PLUS))
13663 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13666 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13667 tg3_flag(tp, 5750_PLUS))
13668 tg3_flag_set(tp, ENABLE_APE);
13670 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13671 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13672 tg3_flag_clear(tp, WOL_CAP);
13674 if (tg3_flag(tp, WOL_CAP) &&
13675 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13676 tg3_flag_set(tp, WOL_ENABLE);
13677 device_set_wakeup_enable(&tp->pdev->dev, true);
13680 if (cfg2 & (1 << 17))
13681 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13683 /* serdes signal pre-emphasis in register 0x590 set by */
13684 /* bootcode if bit 18 is set */
13685 if (cfg2 & (1 << 18))
13686 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13688 if ((tg3_flag(tp, 57765_PLUS) ||
13689 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13690 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13691 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13692 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13694 if (tg3_flag(tp, PCI_EXPRESS) &&
13695 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13696 !tg3_flag(tp, 57765_PLUS)) {
13697 u32 cfg3;
13699 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13700 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13701 tg3_flag_set(tp, ASPM_WORKAROUND);
13704 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13705 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13706 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13707 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13708 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13709 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13711 done:
13712 if (tg3_flag(tp, WOL_CAP))
13713 device_set_wakeup_enable(&tp->pdev->dev,
13714 tg3_flag(tp, WOL_ENABLE));
13715 else
13716 device_set_wakeup_capable(&tp->pdev->dev, false);
13719 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13721 int i;
13722 u32 val;
13724 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13725 tw32(OTP_CTRL, cmd);
13727 /* Wait for up to 1 ms for command to execute. */
13728 for (i = 0; i < 100; i++) {
13729 val = tr32(OTP_STATUS);
13730 if (val & OTP_STATUS_CMD_DONE)
13731 break;
13732 udelay(10);
13735 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13738 /* Read the gphy configuration from the OTP region of the chip. The gphy
13739 * configuration is a 32-bit value that straddles the alignment boundary.
13740 * We do two 32-bit reads and then shift and merge the results.
13742 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13744 u32 bhalf_otp, thalf_otp;
13746 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13748 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13749 return 0;
13751 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13753 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13754 return 0;
13756 thalf_otp = tr32(OTP_READ_DATA);
13758 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13760 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13761 return 0;
13763 bhalf_otp = tr32(OTP_READ_DATA);
13765 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13768 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13770 u32 adv = ADVERTISED_Autoneg;
13772 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13773 adv |= ADVERTISED_1000baseT_Half |
13774 ADVERTISED_1000baseT_Full;
13776 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13777 adv |= ADVERTISED_100baseT_Half |
13778 ADVERTISED_100baseT_Full |
13779 ADVERTISED_10baseT_Half |
13780 ADVERTISED_10baseT_Full |
13781 ADVERTISED_TP;
13782 else
13783 adv |= ADVERTISED_FIBRE;
13785 tp->link_config.advertising = adv;
13786 tp->link_config.speed = SPEED_UNKNOWN;
13787 tp->link_config.duplex = DUPLEX_UNKNOWN;
13788 tp->link_config.autoneg = AUTONEG_ENABLE;
13789 tp->link_config.active_speed = SPEED_UNKNOWN;
13790 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13792 tp->old_link = -1;
13795 static int __devinit tg3_phy_probe(struct tg3 *tp)
13797 u32 hw_phy_id_1, hw_phy_id_2;
13798 u32 hw_phy_id, hw_phy_id_masked;
13799 int err;
13801 /* flow control autonegotiation is default behavior */
13802 tg3_flag_set(tp, PAUSE_AUTONEG);
13803 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13805 if (tg3_flag(tp, ENABLE_APE)) {
13806 switch (tp->pci_fn) {
13807 case 0:
13808 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13809 break;
13810 case 1:
13811 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13812 break;
13813 case 2:
13814 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13815 break;
13816 case 3:
13817 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13818 break;
13822 if (tg3_flag(tp, USE_PHYLIB))
13823 return tg3_phy_init(tp);
13825 /* Reading the PHY ID register can conflict with ASF
13826 * firmware access to the PHY hardware.
13828 err = 0;
13829 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13830 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13831 } else {
13832 /* Now read the physical PHY_ID from the chip and verify
13833 * that it is sane. If it doesn't look good, we fall back
13834 * to either the hard-coded table based PHY_ID and failing
13835 * that the value found in the eeprom area.
13837 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13838 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13840 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13841 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13842 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13844 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13847 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13848 tp->phy_id = hw_phy_id;
13849 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13850 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13851 else
13852 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13853 } else {
13854 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13855 /* Do nothing, phy ID already set up in
13856 * tg3_get_eeprom_hw_cfg().
13858 } else {
13859 struct subsys_tbl_ent *p;
13861 /* No eeprom signature? Try the hardcoded
13862 * subsys device table.
13864 p = tg3_lookup_by_subsys(tp);
13865 if (!p)
13866 return -ENODEV;
13868 tp->phy_id = p->phy_id;
13869 if (!tp->phy_id ||
13870 tp->phy_id == TG3_PHY_ID_BCM8002)
13871 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13875 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13876 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13877 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13878 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13879 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13880 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13881 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13882 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13884 tg3_phy_init_link_config(tp);
13886 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13887 !tg3_flag(tp, ENABLE_APE) &&
13888 !tg3_flag(tp, ENABLE_ASF)) {
13889 u32 bmsr, dummy;
13891 tg3_readphy(tp, MII_BMSR, &bmsr);
13892 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13893 (bmsr & BMSR_LSTATUS))
13894 goto skip_phy_reset;
13896 err = tg3_phy_reset(tp);
13897 if (err)
13898 return err;
13900 tg3_phy_set_wirespeed(tp);
13902 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13903 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13904 tp->link_config.flowctrl);
13906 tg3_writephy(tp, MII_BMCR,
13907 BMCR_ANENABLE | BMCR_ANRESTART);
13911 skip_phy_reset:
13912 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13913 err = tg3_init_5401phy_dsp(tp);
13914 if (err)
13915 return err;
13917 err = tg3_init_5401phy_dsp(tp);
13920 return err;
13923 static void __devinit tg3_read_vpd(struct tg3 *tp)
13925 u8 *vpd_data;
13926 unsigned int block_end, rosize, len;
13927 u32 vpdlen;
13928 int j, i = 0;
13930 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13931 if (!vpd_data)
13932 goto out_no_vpd;
13934 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13935 if (i < 0)
13936 goto out_not_found;
13938 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13939 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13940 i += PCI_VPD_LRDT_TAG_SIZE;
13942 if (block_end > vpdlen)
13943 goto out_not_found;
13945 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13946 PCI_VPD_RO_KEYWORD_MFR_ID);
13947 if (j > 0) {
13948 len = pci_vpd_info_field_size(&vpd_data[j]);
13950 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13951 if (j + len > block_end || len != 4 ||
13952 memcmp(&vpd_data[j], "1028", 4))
13953 goto partno;
13955 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13956 PCI_VPD_RO_KEYWORD_VENDOR0);
13957 if (j < 0)
13958 goto partno;
13960 len = pci_vpd_info_field_size(&vpd_data[j]);
13962 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13963 if (j + len > block_end)
13964 goto partno;
13966 memcpy(tp->fw_ver, &vpd_data[j], len);
13967 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13970 partno:
13971 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13972 PCI_VPD_RO_KEYWORD_PARTNO);
13973 if (i < 0)
13974 goto out_not_found;
13976 len = pci_vpd_info_field_size(&vpd_data[i]);
13978 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13979 if (len > TG3_BPN_SIZE ||
13980 (len + i) > vpdlen)
13981 goto out_not_found;
13983 memcpy(tp->board_part_number, &vpd_data[i], len);
13985 out_not_found:
13986 kfree(vpd_data);
13987 if (tp->board_part_number[0])
13988 return;
13990 out_no_vpd:
13991 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13992 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13993 strcpy(tp->board_part_number, "BCM5717");
13994 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13995 strcpy(tp->board_part_number, "BCM5718");
13996 else
13997 goto nomatch;
13998 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13999 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14000 strcpy(tp->board_part_number, "BCM57780");
14001 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14002 strcpy(tp->board_part_number, "BCM57760");
14003 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14004 strcpy(tp->board_part_number, "BCM57790");
14005 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14006 strcpy(tp->board_part_number, "BCM57788");
14007 else
14008 goto nomatch;
14009 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14010 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14011 strcpy(tp->board_part_number, "BCM57761");
14012 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14013 strcpy(tp->board_part_number, "BCM57765");
14014 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14015 strcpy(tp->board_part_number, "BCM57781");
14016 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14017 strcpy(tp->board_part_number, "BCM57785");
14018 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14019 strcpy(tp->board_part_number, "BCM57791");
14020 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14021 strcpy(tp->board_part_number, "BCM57795");
14022 else
14023 goto nomatch;
14024 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14025 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14026 strcpy(tp->board_part_number, "BCM57762");
14027 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14028 strcpy(tp->board_part_number, "BCM57766");
14029 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14030 strcpy(tp->board_part_number, "BCM57782");
14031 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14032 strcpy(tp->board_part_number, "BCM57786");
14033 else
14034 goto nomatch;
14035 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14036 strcpy(tp->board_part_number, "BCM95906");
14037 } else {
14038 nomatch:
14039 strcpy(tp->board_part_number, "none");
14043 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14045 u32 val;
14047 if (tg3_nvram_read(tp, offset, &val) ||
14048 (val & 0xfc000000) != 0x0c000000 ||
14049 tg3_nvram_read(tp, offset + 4, &val) ||
14050 val != 0)
14051 return 0;
14053 return 1;
14056 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14058 u32 val, offset, start, ver_offset;
14059 int i, dst_off;
14060 bool newver = false;
14062 if (tg3_nvram_read(tp, 0xc, &offset) ||
14063 tg3_nvram_read(tp, 0x4, &start))
14064 return;
14066 offset = tg3_nvram_logical_addr(tp, offset);
14068 if (tg3_nvram_read(tp, offset, &val))
14069 return;
14071 if ((val & 0xfc000000) == 0x0c000000) {
14072 if (tg3_nvram_read(tp, offset + 4, &val))
14073 return;
14075 if (val == 0)
14076 newver = true;
14079 dst_off = strlen(tp->fw_ver);
14081 if (newver) {
14082 if (TG3_VER_SIZE - dst_off < 16 ||
14083 tg3_nvram_read(tp, offset + 8, &ver_offset))
14084 return;
14086 offset = offset + ver_offset - start;
14087 for (i = 0; i < 16; i += 4) {
14088 __be32 v;
14089 if (tg3_nvram_read_be32(tp, offset + i, &v))
14090 return;
14092 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14094 } else {
14095 u32 major, minor;
14097 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14098 return;
14100 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14101 TG3_NVM_BCVER_MAJSFT;
14102 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14103 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14104 "v%d.%02d", major, minor);
14108 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14110 u32 val, major, minor;
14112 /* Use native endian representation */
14113 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14114 return;
14116 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14117 TG3_NVM_HWSB_CFG1_MAJSFT;
14118 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14119 TG3_NVM_HWSB_CFG1_MINSFT;
14121 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14124 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14126 u32 offset, major, minor, build;
14128 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14130 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14131 return;
14133 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14134 case TG3_EEPROM_SB_REVISION_0:
14135 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14136 break;
14137 case TG3_EEPROM_SB_REVISION_2:
14138 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14139 break;
14140 case TG3_EEPROM_SB_REVISION_3:
14141 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14142 break;
14143 case TG3_EEPROM_SB_REVISION_4:
14144 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14145 break;
14146 case TG3_EEPROM_SB_REVISION_5:
14147 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14148 break;
14149 case TG3_EEPROM_SB_REVISION_6:
14150 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14151 break;
14152 default:
14153 return;
14156 if (tg3_nvram_read(tp, offset, &val))
14157 return;
14159 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14160 TG3_EEPROM_SB_EDH_BLD_SHFT;
14161 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14162 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14163 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14165 if (minor > 99 || build > 26)
14166 return;
14168 offset = strlen(tp->fw_ver);
14169 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14170 " v%d.%02d", major, minor);
14172 if (build > 0) {
14173 offset = strlen(tp->fw_ver);
14174 if (offset < TG3_VER_SIZE - 1)
14175 tp->fw_ver[offset] = 'a' + build - 1;
14179 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14181 u32 val, offset, start;
14182 int i, vlen;
14184 for (offset = TG3_NVM_DIR_START;
14185 offset < TG3_NVM_DIR_END;
14186 offset += TG3_NVM_DIRENT_SIZE) {
14187 if (tg3_nvram_read(tp, offset, &val))
14188 return;
14190 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14191 break;
14194 if (offset == TG3_NVM_DIR_END)
14195 return;
14197 if (!tg3_flag(tp, 5705_PLUS))
14198 start = 0x08000000;
14199 else if (tg3_nvram_read(tp, offset - 4, &start))
14200 return;
14202 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14203 !tg3_fw_img_is_valid(tp, offset) ||
14204 tg3_nvram_read(tp, offset + 8, &val))
14205 return;
14207 offset += val - start;
14209 vlen = strlen(tp->fw_ver);
14211 tp->fw_ver[vlen++] = ',';
14212 tp->fw_ver[vlen++] = ' ';
14214 for (i = 0; i < 4; i++) {
14215 __be32 v;
14216 if (tg3_nvram_read_be32(tp, offset, &v))
14217 return;
14219 offset += sizeof(v);
14221 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14222 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14223 break;
14226 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14227 vlen += sizeof(v);
14231 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14233 u32 apedata;
14235 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14236 if (apedata != APE_SEG_SIG_MAGIC)
14237 return;
14239 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14240 if (!(apedata & APE_FW_STATUS_READY))
14241 return;
14243 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14244 tg3_flag_set(tp, APE_HAS_NCSI);
14247 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14249 int vlen;
14250 u32 apedata;
14251 char *fwtype;
14253 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14255 if (tg3_flag(tp, APE_HAS_NCSI))
14256 fwtype = "NCSI";
14257 else
14258 fwtype = "DASH";
14260 vlen = strlen(tp->fw_ver);
14262 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14263 fwtype,
14264 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14265 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14266 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14267 (apedata & APE_FW_VERSION_BLDMSK));
14270 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14272 u32 val;
14273 bool vpd_vers = false;
14275 if (tp->fw_ver[0] != 0)
14276 vpd_vers = true;
14278 if (tg3_flag(tp, NO_NVRAM)) {
14279 strcat(tp->fw_ver, "sb");
14280 return;
14283 if (tg3_nvram_read(tp, 0, &val))
14284 return;
14286 if (val == TG3_EEPROM_MAGIC)
14287 tg3_read_bc_ver(tp);
14288 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14289 tg3_read_sb_ver(tp, val);
14290 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14291 tg3_read_hwsb_ver(tp);
14293 if (tg3_flag(tp, ENABLE_ASF)) {
14294 if (tg3_flag(tp, ENABLE_APE)) {
14295 tg3_probe_ncsi(tp);
14296 if (!vpd_vers)
14297 tg3_read_dash_ver(tp);
14298 } else if (!vpd_vers) {
14299 tg3_read_mgmtfw_ver(tp);
14303 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14306 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14308 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14309 return TG3_RX_RET_MAX_SIZE_5717;
14310 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14311 return TG3_RX_RET_MAX_SIZE_5700;
14312 else
14313 return TG3_RX_RET_MAX_SIZE_5705;
14316 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14317 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14318 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14319 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14320 { },
14323 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14325 struct pci_dev *peer;
14326 unsigned int func, devnr = tp->pdev->devfn & ~7;
14328 for (func = 0; func < 8; func++) {
14329 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14330 if (peer && peer != tp->pdev)
14331 break;
14332 pci_dev_put(peer);
14334 /* 5704 can be configured in single-port mode, set peer to
14335 * tp->pdev in that case.
14337 if (!peer) {
14338 peer = tp->pdev;
14339 return peer;
14343 * We don't need to keep the refcount elevated; there's no way
14344 * to remove one half of this device without removing the other
14346 pci_dev_put(peer);
14348 return peer;
14351 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14353 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14354 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14355 u32 reg;
14357 /* All devices that use the alternate
14358 * ASIC REV location have a CPMU.
14360 tg3_flag_set(tp, CPMU_PRESENT);
14362 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14363 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14364 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14365 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14366 reg = TG3PCI_GEN2_PRODID_ASICREV;
14367 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14368 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14369 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14370 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14371 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14372 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14373 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14374 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14375 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14376 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14377 reg = TG3PCI_GEN15_PRODID_ASICREV;
14378 else
14379 reg = TG3PCI_PRODID_ASICREV;
14381 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14384 /* Wrong chip ID in 5752 A0. This code can be removed later
14385 * as A0 is not in production.
14387 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14388 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14392 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14393 tg3_flag_set(tp, 5717_PLUS);
14395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14396 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14397 tg3_flag_set(tp, 57765_CLASS);
14399 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14400 tg3_flag_set(tp, 57765_PLUS);
14402 /* Intentionally exclude ASIC_REV_5906 */
14403 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14405 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14406 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14407 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14409 tg3_flag(tp, 57765_PLUS))
14410 tg3_flag_set(tp, 5755_PLUS);
14412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14413 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14414 tg3_flag_set(tp, 5780_CLASS);
14416 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14418 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14419 tg3_flag(tp, 5755_PLUS) ||
14420 tg3_flag(tp, 5780_CLASS))
14421 tg3_flag_set(tp, 5750_PLUS);
14423 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14424 tg3_flag(tp, 5750_PLUS))
14425 tg3_flag_set(tp, 5705_PLUS);
14428 static int __devinit tg3_get_invariants(struct tg3 *tp)
14430 u32 misc_ctrl_reg;
14431 u32 pci_state_reg, grc_misc_cfg;
14432 u32 val;
14433 u16 pci_cmd;
14434 int err;
14436 /* Force memory write invalidate off. If we leave it on,
14437 * then on 5700_BX chips we have to enable a workaround.
14438 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14439 * to match the cacheline size. The Broadcom driver have this
14440 * workaround but turns MWI off all the times so never uses
14441 * it. This seems to suggest that the workaround is insufficient.
14443 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14444 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14445 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14447 /* Important! -- Make sure register accesses are byteswapped
14448 * correctly. Also, for those chips that require it, make
14449 * sure that indirect register accesses are enabled before
14450 * the first operation.
14452 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14453 &misc_ctrl_reg);
14454 tp->misc_host_ctrl |= (misc_ctrl_reg &
14455 MISC_HOST_CTRL_CHIPREV);
14456 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14457 tp->misc_host_ctrl);
14459 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14461 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14462 * we need to disable memory and use config. cycles
14463 * only to access all registers. The 5702/03 chips
14464 * can mistakenly decode the special cycles from the
14465 * ICH chipsets as memory write cycles, causing corruption
14466 * of register and memory space. Only certain ICH bridges
14467 * will drive special cycles with non-zero data during the
14468 * address phase which can fall within the 5703's address
14469 * range. This is not an ICH bug as the PCI spec allows
14470 * non-zero address during special cycles. However, only
14471 * these ICH bridges are known to drive non-zero addresses
14472 * during special cycles.
14474 * Since special cycles do not cross PCI bridges, we only
14475 * enable this workaround if the 5703 is on the secondary
14476 * bus of these ICH bridges.
14478 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14479 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14480 static struct tg3_dev_id {
14481 u32 vendor;
14482 u32 device;
14483 u32 rev;
14484 } ich_chipsets[] = {
14485 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14486 PCI_ANY_ID },
14487 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14488 PCI_ANY_ID },
14489 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14490 0xa },
14491 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14492 PCI_ANY_ID },
14493 { },
14495 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14496 struct pci_dev *bridge = NULL;
14498 while (pci_id->vendor != 0) {
14499 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14500 bridge);
14501 if (!bridge) {
14502 pci_id++;
14503 continue;
14505 if (pci_id->rev != PCI_ANY_ID) {
14506 if (bridge->revision > pci_id->rev)
14507 continue;
14509 if (bridge->subordinate &&
14510 (bridge->subordinate->number ==
14511 tp->pdev->bus->number)) {
14512 tg3_flag_set(tp, ICH_WORKAROUND);
14513 pci_dev_put(bridge);
14514 break;
14519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14520 static struct tg3_dev_id {
14521 u32 vendor;
14522 u32 device;
14523 } bridge_chipsets[] = {
14524 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14525 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14526 { },
14528 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14529 struct pci_dev *bridge = NULL;
14531 while (pci_id->vendor != 0) {
14532 bridge = pci_get_device(pci_id->vendor,
14533 pci_id->device,
14534 bridge);
14535 if (!bridge) {
14536 pci_id++;
14537 continue;
14539 if (bridge->subordinate &&
14540 (bridge->subordinate->number <=
14541 tp->pdev->bus->number) &&
14542 (bridge->subordinate->busn_res.end >=
14543 tp->pdev->bus->number)) {
14544 tg3_flag_set(tp, 5701_DMA_BUG);
14545 pci_dev_put(bridge);
14546 break;
14551 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14552 * DMA addresses > 40-bit. This bridge may have other additional
14553 * 57xx devices behind it in some 4-port NIC designs for example.
14554 * Any tg3 device found behind the bridge will also need the 40-bit
14555 * DMA workaround.
14557 if (tg3_flag(tp, 5780_CLASS)) {
14558 tg3_flag_set(tp, 40BIT_DMA_BUG);
14559 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14560 } else {
14561 struct pci_dev *bridge = NULL;
14563 do {
14564 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14565 PCI_DEVICE_ID_SERVERWORKS_EPB,
14566 bridge);
14567 if (bridge && bridge->subordinate &&
14568 (bridge->subordinate->number <=
14569 tp->pdev->bus->number) &&
14570 (bridge->subordinate->busn_res.end >=
14571 tp->pdev->bus->number)) {
14572 tg3_flag_set(tp, 40BIT_DMA_BUG);
14573 pci_dev_put(bridge);
14574 break;
14576 } while (bridge);
14579 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14581 tp->pdev_peer = tg3_find_peer(tp);
14583 /* Determine TSO capabilities */
14584 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14585 ; /* Do nothing. HW bug. */
14586 else if (tg3_flag(tp, 57765_PLUS))
14587 tg3_flag_set(tp, HW_TSO_3);
14588 else if (tg3_flag(tp, 5755_PLUS) ||
14589 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14590 tg3_flag_set(tp, HW_TSO_2);
14591 else if (tg3_flag(tp, 5750_PLUS)) {
14592 tg3_flag_set(tp, HW_TSO_1);
14593 tg3_flag_set(tp, TSO_BUG);
14594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14595 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14596 tg3_flag_clear(tp, TSO_BUG);
14597 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14598 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14599 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14600 tg3_flag_set(tp, TSO_BUG);
14601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14602 tp->fw_needed = FIRMWARE_TG3TSO5;
14603 else
14604 tp->fw_needed = FIRMWARE_TG3TSO;
14607 /* Selectively allow TSO based on operating conditions */
14608 if (tg3_flag(tp, HW_TSO_1) ||
14609 tg3_flag(tp, HW_TSO_2) ||
14610 tg3_flag(tp, HW_TSO_3) ||
14611 tp->fw_needed) {
14612 /* For firmware TSO, assume ASF is disabled.
14613 * We'll disable TSO later if we discover ASF
14614 * is enabled in tg3_get_eeprom_hw_cfg().
14616 tg3_flag_set(tp, TSO_CAPABLE);
14617 } else {
14618 tg3_flag_clear(tp, TSO_CAPABLE);
14619 tg3_flag_clear(tp, TSO_BUG);
14620 tp->fw_needed = NULL;
14623 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14624 tp->fw_needed = FIRMWARE_TG3;
14626 tp->irq_max = 1;
14628 if (tg3_flag(tp, 5750_PLUS)) {
14629 tg3_flag_set(tp, SUPPORT_MSI);
14630 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14631 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14632 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14633 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14634 tp->pdev_peer == tp->pdev))
14635 tg3_flag_clear(tp, SUPPORT_MSI);
14637 if (tg3_flag(tp, 5755_PLUS) ||
14638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14639 tg3_flag_set(tp, 1SHOT_MSI);
14642 if (tg3_flag(tp, 57765_PLUS)) {
14643 tg3_flag_set(tp, SUPPORT_MSIX);
14644 tp->irq_max = TG3_IRQ_MAX_VECS;
14648 tp->txq_max = 1;
14649 tp->rxq_max = 1;
14650 if (tp->irq_max > 1) {
14651 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14652 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14656 tp->txq_max = tp->irq_max - 1;
14659 if (tg3_flag(tp, 5755_PLUS) ||
14660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14661 tg3_flag_set(tp, SHORT_DMA_BUG);
14663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14664 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14667 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14669 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14671 if (tg3_flag(tp, 57765_PLUS) &&
14672 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14673 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14675 if (!tg3_flag(tp, 5705_PLUS) ||
14676 tg3_flag(tp, 5780_CLASS) ||
14677 tg3_flag(tp, USE_JUMBO_BDFLAG))
14678 tg3_flag_set(tp, JUMBO_CAPABLE);
14680 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14681 &pci_state_reg);
14683 if (pci_is_pcie(tp->pdev)) {
14684 u16 lnkctl;
14686 tg3_flag_set(tp, PCI_EXPRESS);
14688 pci_read_config_word(tp->pdev,
14689 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14690 &lnkctl);
14691 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14692 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14693 ASIC_REV_5906) {
14694 tg3_flag_clear(tp, HW_TSO_2);
14695 tg3_flag_clear(tp, TSO_CAPABLE);
14697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14698 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14699 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14700 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14701 tg3_flag_set(tp, CLKREQ_BUG);
14702 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14703 tg3_flag_set(tp, L1PLLPD_EN);
14705 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14706 /* BCM5785 devices are effectively PCIe devices, and should
14707 * follow PCIe codepaths, but do not have a PCIe capabilities
14708 * section.
14710 tg3_flag_set(tp, PCI_EXPRESS);
14711 } else if (!tg3_flag(tp, 5705_PLUS) ||
14712 tg3_flag(tp, 5780_CLASS)) {
14713 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14714 if (!tp->pcix_cap) {
14715 dev_err(&tp->pdev->dev,
14716 "Cannot find PCI-X capability, aborting\n");
14717 return -EIO;
14720 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14721 tg3_flag_set(tp, PCIX_MODE);
14724 /* If we have an AMD 762 or VIA K8T800 chipset, write
14725 * reordering to the mailbox registers done by the host
14726 * controller can cause major troubles. We read back from
14727 * every mailbox register write to force the writes to be
14728 * posted to the chip in order.
14730 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14731 !tg3_flag(tp, PCI_EXPRESS))
14732 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14734 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14735 &tp->pci_cacheline_sz);
14736 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14737 &tp->pci_lat_timer);
14738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14739 tp->pci_lat_timer < 64) {
14740 tp->pci_lat_timer = 64;
14741 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14742 tp->pci_lat_timer);
14745 /* Important! -- It is critical that the PCI-X hw workaround
14746 * situation is decided before the first MMIO register access.
14748 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14749 /* 5700 BX chips need to have their TX producer index
14750 * mailboxes written twice to workaround a bug.
14752 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14754 /* If we are in PCI-X mode, enable register write workaround.
14756 * The workaround is to use indirect register accesses
14757 * for all chip writes not to mailbox registers.
14759 if (tg3_flag(tp, PCIX_MODE)) {
14760 u32 pm_reg;
14762 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14764 /* The chip can have it's power management PCI config
14765 * space registers clobbered due to this bug.
14766 * So explicitly force the chip into D0 here.
14768 pci_read_config_dword(tp->pdev,
14769 tp->pm_cap + PCI_PM_CTRL,
14770 &pm_reg);
14771 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14772 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14773 pci_write_config_dword(tp->pdev,
14774 tp->pm_cap + PCI_PM_CTRL,
14775 pm_reg);
14777 /* Also, force SERR#/PERR# in PCI command. */
14778 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14779 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14780 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14784 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14785 tg3_flag_set(tp, PCI_HIGH_SPEED);
14786 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14787 tg3_flag_set(tp, PCI_32BIT);
14789 /* Chip-specific fixup from Broadcom driver */
14790 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14791 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14792 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14793 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14796 /* Default fast path register access methods */
14797 tp->read32 = tg3_read32;
14798 tp->write32 = tg3_write32;
14799 tp->read32_mbox = tg3_read32;
14800 tp->write32_mbox = tg3_write32;
14801 tp->write32_tx_mbox = tg3_write32;
14802 tp->write32_rx_mbox = tg3_write32;
14804 /* Various workaround register access methods */
14805 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14806 tp->write32 = tg3_write_indirect_reg32;
14807 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14808 (tg3_flag(tp, PCI_EXPRESS) &&
14809 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14811 * Back to back register writes can cause problems on these
14812 * chips, the workaround is to read back all reg writes
14813 * except those to mailbox regs.
14815 * See tg3_write_indirect_reg32().
14817 tp->write32 = tg3_write_flush_reg32;
14820 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14821 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14822 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14823 tp->write32_rx_mbox = tg3_write_flush_reg32;
14826 if (tg3_flag(tp, ICH_WORKAROUND)) {
14827 tp->read32 = tg3_read_indirect_reg32;
14828 tp->write32 = tg3_write_indirect_reg32;
14829 tp->read32_mbox = tg3_read_indirect_mbox;
14830 tp->write32_mbox = tg3_write_indirect_mbox;
14831 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14832 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14834 iounmap(tp->regs);
14835 tp->regs = NULL;
14837 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14838 pci_cmd &= ~PCI_COMMAND_MEMORY;
14839 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14841 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14842 tp->read32_mbox = tg3_read32_mbox_5906;
14843 tp->write32_mbox = tg3_write32_mbox_5906;
14844 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14845 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14848 if (tp->write32 == tg3_write_indirect_reg32 ||
14849 (tg3_flag(tp, PCIX_MODE) &&
14850 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14851 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14852 tg3_flag_set(tp, SRAM_USE_CONFIG);
14854 /* The memory arbiter has to be enabled in order for SRAM accesses
14855 * to succeed. Normally on powerup the tg3 chip firmware will make
14856 * sure it is enabled, but other entities such as system netboot
14857 * code might disable it.
14859 val = tr32(MEMARB_MODE);
14860 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14862 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14863 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14864 tg3_flag(tp, 5780_CLASS)) {
14865 if (tg3_flag(tp, PCIX_MODE)) {
14866 pci_read_config_dword(tp->pdev,
14867 tp->pcix_cap + PCI_X_STATUS,
14868 &val);
14869 tp->pci_fn = val & 0x7;
14871 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14872 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14873 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14874 NIC_SRAM_CPMUSTAT_SIG) {
14875 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14876 tp->pci_fn = tp->pci_fn ? 1 : 0;
14878 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14880 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14881 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14882 NIC_SRAM_CPMUSTAT_SIG) {
14883 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14884 TG3_CPMU_STATUS_FSHFT_5719;
14888 /* Get eeprom hw config before calling tg3_set_power_state().
14889 * In particular, the TG3_FLAG_IS_NIC flag must be
14890 * determined before calling tg3_set_power_state() so that
14891 * we know whether or not to switch out of Vaux power.
14892 * When the flag is set, it means that GPIO1 is used for eeprom
14893 * write protect and also implies that it is a LOM where GPIOs
14894 * are not used to switch power.
14896 tg3_get_eeprom_hw_cfg(tp);
14898 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14899 tg3_flag_clear(tp, TSO_CAPABLE);
14900 tg3_flag_clear(tp, TSO_BUG);
14901 tp->fw_needed = NULL;
14904 if (tg3_flag(tp, ENABLE_APE)) {
14905 /* Allow reads and writes to the
14906 * APE register and memory space.
14908 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14909 PCISTATE_ALLOW_APE_SHMEM_WR |
14910 PCISTATE_ALLOW_APE_PSPACE_WR;
14911 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14912 pci_state_reg);
14914 tg3_ape_lock_init(tp);
14917 /* Set up tp->grc_local_ctrl before calling
14918 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14919 * will bring 5700's external PHY out of reset.
14920 * It is also used as eeprom write protect on LOMs.
14922 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14924 tg3_flag(tp, EEPROM_WRITE_PROT))
14925 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14926 GRC_LCLCTRL_GPIO_OUTPUT1);
14927 /* Unused GPIO3 must be driven as output on 5752 because there
14928 * are no pull-up resistors on unused GPIO pins.
14930 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14931 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14935 tg3_flag(tp, 57765_CLASS))
14936 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14938 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14939 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14940 /* Turn off the debug UART. */
14941 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14942 if (tg3_flag(tp, IS_NIC))
14943 /* Keep VMain power. */
14944 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14945 GRC_LCLCTRL_GPIO_OUTPUT0;
14948 /* Switch out of Vaux if it is a NIC */
14949 tg3_pwrsrc_switch_to_vmain(tp);
14951 /* Derive initial jumbo mode from MTU assigned in
14952 * ether_setup() via the alloc_etherdev() call
14954 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14955 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14957 /* Determine WakeOnLan speed to use. */
14958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14959 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14960 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14961 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14962 tg3_flag_clear(tp, WOL_SPEED_100MB);
14963 } else {
14964 tg3_flag_set(tp, WOL_SPEED_100MB);
14967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14968 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14970 /* A few boards don't want Ethernet@WireSpeed phy feature */
14971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14972 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14973 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14974 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14975 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14976 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14977 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14979 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14980 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14981 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14982 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14983 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14985 if (tg3_flag(tp, 5705_PLUS) &&
14986 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14987 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14988 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14989 !tg3_flag(tp, 57765_PLUS)) {
14990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14993 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14994 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14995 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14996 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14997 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14998 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14999 } else
15000 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15004 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15005 tp->phy_otp = tg3_read_otp_phycfg(tp);
15006 if (tp->phy_otp == 0)
15007 tp->phy_otp = TG3_OTP_DEFAULT;
15010 if (tg3_flag(tp, CPMU_PRESENT))
15011 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15012 else
15013 tp->mi_mode = MAC_MI_MODE_BASE;
15015 tp->coalesce_mode = 0;
15016 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15017 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15018 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15020 /* Set these bits to enable statistics workaround. */
15021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15022 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15023 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15024 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15025 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15030 tg3_flag_set(tp, USE_PHYLIB);
15032 err = tg3_mdio_init(tp);
15033 if (err)
15034 return err;
15036 /* Initialize data/descriptor byte/word swapping. */
15037 val = tr32(GRC_MODE);
15038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15039 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15040 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15041 GRC_MODE_B2HRX_ENABLE |
15042 GRC_MODE_HTX2B_ENABLE |
15043 GRC_MODE_HOST_STACKUP);
15044 else
15045 val &= GRC_MODE_HOST_STACKUP;
15047 tw32(GRC_MODE, val | tp->grc_mode);
15049 tg3_switch_clocks(tp);
15051 /* Clear this out for sanity. */
15052 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15054 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15055 &pci_state_reg);
15056 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15057 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15058 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15060 if (chiprevid == CHIPREV_ID_5701_A0 ||
15061 chiprevid == CHIPREV_ID_5701_B0 ||
15062 chiprevid == CHIPREV_ID_5701_B2 ||
15063 chiprevid == CHIPREV_ID_5701_B5) {
15064 void __iomem *sram_base;
15066 /* Write some dummy words into the SRAM status block
15067 * area, see if it reads back correctly. If the return
15068 * value is bad, force enable the PCIX workaround.
15070 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15072 writel(0x00000000, sram_base);
15073 writel(0x00000000, sram_base + 4);
15074 writel(0xffffffff, sram_base + 4);
15075 if (readl(sram_base) != 0x00000000)
15076 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15080 udelay(50);
15081 tg3_nvram_init(tp);
15083 grc_misc_cfg = tr32(GRC_MISC_CFG);
15084 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15086 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15087 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15088 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15089 tg3_flag_set(tp, IS_5788);
15091 if (!tg3_flag(tp, IS_5788) &&
15092 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15093 tg3_flag_set(tp, TAGGED_STATUS);
15094 if (tg3_flag(tp, TAGGED_STATUS)) {
15095 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15096 HOSTCC_MODE_CLRTICK_TXBD);
15098 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15099 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15100 tp->misc_host_ctrl);
15103 /* Preserve the APE MAC_MODE bits */
15104 if (tg3_flag(tp, ENABLE_APE))
15105 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15106 else
15107 tp->mac_mode = 0;
15109 /* these are limited to 10/100 only */
15110 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15111 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15112 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15113 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15114 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
15115 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
15116 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
15117 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15118 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
15119 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
15120 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
15121 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
15122 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15123 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15124 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15125 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15127 err = tg3_phy_probe(tp);
15128 if (err) {
15129 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15130 /* ... but do not return immediately ... */
15131 tg3_mdio_fini(tp);
15134 tg3_read_vpd(tp);
15135 tg3_read_fw_ver(tp);
15137 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15138 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15139 } else {
15140 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15141 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15142 else
15143 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15146 /* 5700 {AX,BX} chips have a broken status block link
15147 * change bit implementation, so we must use the
15148 * status register in those cases.
15150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15151 tg3_flag_set(tp, USE_LINKCHG_REG);
15152 else
15153 tg3_flag_clear(tp, USE_LINKCHG_REG);
15155 /* The led_ctrl is set during tg3_phy_probe, here we might
15156 * have to force the link status polling mechanism based
15157 * upon subsystem IDs.
15159 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15161 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15162 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15163 tg3_flag_set(tp, USE_LINKCHG_REG);
15166 /* For all SERDES we poll the MAC status register. */
15167 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15168 tg3_flag_set(tp, POLL_SERDES);
15169 else
15170 tg3_flag_clear(tp, POLL_SERDES);
15172 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15173 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15174 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15175 tg3_flag(tp, PCIX_MODE)) {
15176 tp->rx_offset = NET_SKB_PAD;
15177 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15178 tp->rx_copy_thresh = ~(u16)0;
15179 #endif
15182 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15183 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15184 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15186 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15188 /* Increment the rx prod index on the rx std ring by at most
15189 * 8 for these chips to workaround hw errata.
15191 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15194 tp->rx_std_max_post = 8;
15196 if (tg3_flag(tp, ASPM_WORKAROUND))
15197 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15198 PCIE_PWR_MGMT_L1_THRESH_MSK;
15200 return err;
15203 #ifdef CONFIG_SPARC
15204 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15206 struct net_device *dev = tp->dev;
15207 struct pci_dev *pdev = tp->pdev;
15208 struct device_node *dp = pci_device_to_OF_node(pdev);
15209 const unsigned char *addr;
15210 int len;
15212 addr = of_get_property(dp, "local-mac-address", &len);
15213 if (addr && len == 6) {
15214 memcpy(dev->dev_addr, addr, 6);
15215 memcpy(dev->perm_addr, dev->dev_addr, 6);
15216 return 0;
15218 return -ENODEV;
15221 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15223 struct net_device *dev = tp->dev;
15225 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15226 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15227 return 0;
15229 #endif
15231 static int __devinit tg3_get_device_address(struct tg3 *tp)
15233 struct net_device *dev = tp->dev;
15234 u32 hi, lo, mac_offset;
15235 int addr_ok = 0;
15237 #ifdef CONFIG_SPARC
15238 if (!tg3_get_macaddr_sparc(tp))
15239 return 0;
15240 #endif
15242 mac_offset = 0x7c;
15243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15244 tg3_flag(tp, 5780_CLASS)) {
15245 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15246 mac_offset = 0xcc;
15247 if (tg3_nvram_lock(tp))
15248 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15249 else
15250 tg3_nvram_unlock(tp);
15251 } else if (tg3_flag(tp, 5717_PLUS)) {
15252 if (tp->pci_fn & 1)
15253 mac_offset = 0xcc;
15254 if (tp->pci_fn > 1)
15255 mac_offset += 0x18c;
15256 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15257 mac_offset = 0x10;
15259 /* First try to get it from MAC address mailbox. */
15260 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15261 if ((hi >> 16) == 0x484b) {
15262 dev->dev_addr[0] = (hi >> 8) & 0xff;
15263 dev->dev_addr[1] = (hi >> 0) & 0xff;
15265 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15266 dev->dev_addr[2] = (lo >> 24) & 0xff;
15267 dev->dev_addr[3] = (lo >> 16) & 0xff;
15268 dev->dev_addr[4] = (lo >> 8) & 0xff;
15269 dev->dev_addr[5] = (lo >> 0) & 0xff;
15271 /* Some old bootcode may report a 0 MAC address in SRAM */
15272 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15274 if (!addr_ok) {
15275 /* Next, try NVRAM. */
15276 if (!tg3_flag(tp, NO_NVRAM) &&
15277 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15278 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15279 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15280 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15282 /* Finally just fetch it out of the MAC control regs. */
15283 else {
15284 hi = tr32(MAC_ADDR_0_HIGH);
15285 lo = tr32(MAC_ADDR_0_LOW);
15287 dev->dev_addr[5] = lo & 0xff;
15288 dev->dev_addr[4] = (lo >> 8) & 0xff;
15289 dev->dev_addr[3] = (lo >> 16) & 0xff;
15290 dev->dev_addr[2] = (lo >> 24) & 0xff;
15291 dev->dev_addr[1] = hi & 0xff;
15292 dev->dev_addr[0] = (hi >> 8) & 0xff;
15296 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15297 #ifdef CONFIG_SPARC
15298 if (!tg3_get_default_macaddr_sparc(tp))
15299 return 0;
15300 #endif
15301 return -EINVAL;
15303 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15304 return 0;
15307 #define BOUNDARY_SINGLE_CACHELINE 1
15308 #define BOUNDARY_MULTI_CACHELINE 2
15310 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15312 int cacheline_size;
15313 u8 byte;
15314 int goal;
15316 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15317 if (byte == 0)
15318 cacheline_size = 1024;
15319 else
15320 cacheline_size = (int) byte * 4;
15322 /* On 5703 and later chips, the boundary bits have no
15323 * effect.
15325 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15326 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15327 !tg3_flag(tp, PCI_EXPRESS))
15328 goto out;
15330 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15331 goal = BOUNDARY_MULTI_CACHELINE;
15332 #else
15333 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15334 goal = BOUNDARY_SINGLE_CACHELINE;
15335 #else
15336 goal = 0;
15337 #endif
15338 #endif
15340 if (tg3_flag(tp, 57765_PLUS)) {
15341 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15342 goto out;
15345 if (!goal)
15346 goto out;
15348 /* PCI controllers on most RISC systems tend to disconnect
15349 * when a device tries to burst across a cache-line boundary.
15350 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15352 * Unfortunately, for PCI-E there are only limited
15353 * write-side controls for this, and thus for reads
15354 * we will still get the disconnects. We'll also waste
15355 * these PCI cycles for both read and write for chips
15356 * other than 5700 and 5701 which do not implement the
15357 * boundary bits.
15359 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15360 switch (cacheline_size) {
15361 case 16:
15362 case 32:
15363 case 64:
15364 case 128:
15365 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15366 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15367 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15368 } else {
15369 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15370 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15372 break;
15374 case 256:
15375 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15376 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15377 break;
15379 default:
15380 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15381 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15382 break;
15384 } else if (tg3_flag(tp, PCI_EXPRESS)) {
15385 switch (cacheline_size) {
15386 case 16:
15387 case 32:
15388 case 64:
15389 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15390 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15391 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15392 break;
15394 /* fallthrough */
15395 case 128:
15396 default:
15397 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15398 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15399 break;
15401 } else {
15402 switch (cacheline_size) {
15403 case 16:
15404 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15405 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15406 DMA_RWCTRL_WRITE_BNDRY_16);
15407 break;
15409 /* fallthrough */
15410 case 32:
15411 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15412 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15413 DMA_RWCTRL_WRITE_BNDRY_32);
15414 break;
15416 /* fallthrough */
15417 case 64:
15418 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15419 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15420 DMA_RWCTRL_WRITE_BNDRY_64);
15421 break;
15423 /* fallthrough */
15424 case 128:
15425 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15426 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15427 DMA_RWCTRL_WRITE_BNDRY_128);
15428 break;
15430 /* fallthrough */
15431 case 256:
15432 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15433 DMA_RWCTRL_WRITE_BNDRY_256);
15434 break;
15435 case 512:
15436 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15437 DMA_RWCTRL_WRITE_BNDRY_512);
15438 break;
15439 case 1024:
15440 default:
15441 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15442 DMA_RWCTRL_WRITE_BNDRY_1024);
15443 break;
15447 out:
15448 return val;
15451 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15453 struct tg3_internal_buffer_desc test_desc;
15454 u32 sram_dma_descs;
15455 int i, ret;
15457 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15459 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15460 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15461 tw32(RDMAC_STATUS, 0);
15462 tw32(WDMAC_STATUS, 0);
15464 tw32(BUFMGR_MODE, 0);
15465 tw32(FTQ_RESET, 0);
15467 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15468 test_desc.addr_lo = buf_dma & 0xffffffff;
15469 test_desc.nic_mbuf = 0x00002100;
15470 test_desc.len = size;
15473 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15474 * the *second* time the tg3 driver was getting loaded after an
15475 * initial scan.
15477 * Broadcom tells me:
15478 * ...the DMA engine is connected to the GRC block and a DMA
15479 * reset may affect the GRC block in some unpredictable way...
15480 * The behavior of resets to individual blocks has not been tested.
15482 * Broadcom noted the GRC reset will also reset all sub-components.
15484 if (to_device) {
15485 test_desc.cqid_sqid = (13 << 8) | 2;
15487 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15488 udelay(40);
15489 } else {
15490 test_desc.cqid_sqid = (16 << 8) | 7;
15492 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15493 udelay(40);
15495 test_desc.flags = 0x00000005;
15497 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15498 u32 val;
15500 val = *(((u32 *)&test_desc) + i);
15501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15502 sram_dma_descs + (i * sizeof(u32)));
15503 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15507 if (to_device)
15508 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15509 else
15510 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15512 ret = -ENODEV;
15513 for (i = 0; i < 40; i++) {
15514 u32 val;
15516 if (to_device)
15517 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15518 else
15519 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15520 if ((val & 0xffff) == sram_dma_descs) {
15521 ret = 0;
15522 break;
15525 udelay(100);
15528 return ret;
15531 #define TEST_BUFFER_SIZE 0x2000
15533 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15534 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15535 { },
15538 static int __devinit tg3_test_dma(struct tg3 *tp)
15540 dma_addr_t buf_dma;
15541 u32 *buf, saved_dma_rwctrl;
15542 int ret = 0;
15544 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15545 &buf_dma, GFP_KERNEL);
15546 if (!buf) {
15547 ret = -ENOMEM;
15548 goto out_nofree;
15551 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15552 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15554 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15556 if (tg3_flag(tp, 57765_PLUS))
15557 goto out;
15559 if (tg3_flag(tp, PCI_EXPRESS)) {
15560 /* DMA read watermark not used on PCIE */
15561 tp->dma_rwctrl |= 0x00180000;
15562 } else if (!tg3_flag(tp, PCIX_MODE)) {
15563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15564 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15565 tp->dma_rwctrl |= 0x003f0000;
15566 else
15567 tp->dma_rwctrl |= 0x003f000f;
15568 } else {
15569 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15571 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15572 u32 read_water = 0x7;
15574 /* If the 5704 is behind the EPB bridge, we can
15575 * do the less restrictive ONE_DMA workaround for
15576 * better performance.
15578 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15580 tp->dma_rwctrl |= 0x8000;
15581 else if (ccval == 0x6 || ccval == 0x7)
15582 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15584 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15585 read_water = 4;
15586 /* Set bit 23 to enable PCIX hw bug fix */
15587 tp->dma_rwctrl |=
15588 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15589 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15590 (1 << 23);
15591 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15592 /* 5780 always in PCIX mode */
15593 tp->dma_rwctrl |= 0x00144000;
15594 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15595 /* 5714 always in PCIX mode */
15596 tp->dma_rwctrl |= 0x00148000;
15597 } else {
15598 tp->dma_rwctrl |= 0x001b000f;
15602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15603 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15604 tp->dma_rwctrl &= 0xfffffff0;
15606 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15607 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15608 /* Remove this if it causes problems for some boards. */
15609 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15611 /* On 5700/5701 chips, we need to set this bit.
15612 * Otherwise the chip will issue cacheline transactions
15613 * to streamable DMA memory with not all the byte
15614 * enables turned on. This is an error on several
15615 * RISC PCI controllers, in particular sparc64.
15617 * On 5703/5704 chips, this bit has been reassigned
15618 * a different meaning. In particular, it is used
15619 * on those chips to enable a PCI-X workaround.
15621 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15624 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15626 #if 0
15627 /* Unneeded, already done by tg3_get_invariants. */
15628 tg3_switch_clocks(tp);
15629 #endif
15631 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15632 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15633 goto out;
15635 /* It is best to perform DMA test with maximum write burst size
15636 * to expose the 5700/5701 write DMA bug.
15638 saved_dma_rwctrl = tp->dma_rwctrl;
15639 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15640 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15642 while (1) {
15643 u32 *p = buf, i;
15645 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15646 p[i] = i;
15648 /* Send the buffer to the chip. */
15649 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15650 if (ret) {
15651 dev_err(&tp->pdev->dev,
15652 "%s: Buffer write failed. err = %d\n",
15653 __func__, ret);
15654 break;
15657 #if 0
15658 /* validate data reached card RAM correctly. */
15659 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15660 u32 val;
15661 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15662 if (le32_to_cpu(val) != p[i]) {
15663 dev_err(&tp->pdev->dev,
15664 "%s: Buffer corrupted on device! "
15665 "(%d != %d)\n", __func__, val, i);
15666 /* ret = -ENODEV here? */
15668 p[i] = 0;
15670 #endif
15671 /* Now read it back. */
15672 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15673 if (ret) {
15674 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15675 "err = %d\n", __func__, ret);
15676 break;
15679 /* Verify it. */
15680 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15681 if (p[i] == i)
15682 continue;
15684 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15685 DMA_RWCTRL_WRITE_BNDRY_16) {
15686 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15687 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15688 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15689 break;
15690 } else {
15691 dev_err(&tp->pdev->dev,
15692 "%s: Buffer corrupted on read back! "
15693 "(%d != %d)\n", __func__, p[i], i);
15694 ret = -ENODEV;
15695 goto out;
15699 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15700 /* Success. */
15701 ret = 0;
15702 break;
15705 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15706 DMA_RWCTRL_WRITE_BNDRY_16) {
15707 /* DMA test passed without adjusting DMA boundary,
15708 * now look for chipsets that are known to expose the
15709 * DMA bug without failing the test.
15711 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15712 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15713 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15714 } else {
15715 /* Safe to use the calculated DMA boundary. */
15716 tp->dma_rwctrl = saved_dma_rwctrl;
15719 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15722 out:
15723 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15724 out_nofree:
15725 return ret;
15728 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15730 if (tg3_flag(tp, 57765_PLUS)) {
15731 tp->bufmgr_config.mbuf_read_dma_low_water =
15732 DEFAULT_MB_RDMA_LOW_WATER_5705;
15733 tp->bufmgr_config.mbuf_mac_rx_low_water =
15734 DEFAULT_MB_MACRX_LOW_WATER_57765;
15735 tp->bufmgr_config.mbuf_high_water =
15736 DEFAULT_MB_HIGH_WATER_57765;
15738 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15739 DEFAULT_MB_RDMA_LOW_WATER_5705;
15740 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15741 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15742 tp->bufmgr_config.mbuf_high_water_jumbo =
15743 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15744 } else if (tg3_flag(tp, 5705_PLUS)) {
15745 tp->bufmgr_config.mbuf_read_dma_low_water =
15746 DEFAULT_MB_RDMA_LOW_WATER_5705;
15747 tp->bufmgr_config.mbuf_mac_rx_low_water =
15748 DEFAULT_MB_MACRX_LOW_WATER_5705;
15749 tp->bufmgr_config.mbuf_high_water =
15750 DEFAULT_MB_HIGH_WATER_5705;
15751 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15752 tp->bufmgr_config.mbuf_mac_rx_low_water =
15753 DEFAULT_MB_MACRX_LOW_WATER_5906;
15754 tp->bufmgr_config.mbuf_high_water =
15755 DEFAULT_MB_HIGH_WATER_5906;
15758 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15759 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15760 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15761 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15762 tp->bufmgr_config.mbuf_high_water_jumbo =
15763 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15764 } else {
15765 tp->bufmgr_config.mbuf_read_dma_low_water =
15766 DEFAULT_MB_RDMA_LOW_WATER;
15767 tp->bufmgr_config.mbuf_mac_rx_low_water =
15768 DEFAULT_MB_MACRX_LOW_WATER;
15769 tp->bufmgr_config.mbuf_high_water =
15770 DEFAULT_MB_HIGH_WATER;
15772 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15773 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15774 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15775 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15776 tp->bufmgr_config.mbuf_high_water_jumbo =
15777 DEFAULT_MB_HIGH_WATER_JUMBO;
15780 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15781 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15784 static char * __devinit tg3_phy_string(struct tg3 *tp)
15786 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15787 case TG3_PHY_ID_BCM5400: return "5400";
15788 case TG3_PHY_ID_BCM5401: return "5401";
15789 case TG3_PHY_ID_BCM5411: return "5411";
15790 case TG3_PHY_ID_BCM5701: return "5701";
15791 case TG3_PHY_ID_BCM5703: return "5703";
15792 case TG3_PHY_ID_BCM5704: return "5704";
15793 case TG3_PHY_ID_BCM5705: return "5705";
15794 case TG3_PHY_ID_BCM5750: return "5750";
15795 case TG3_PHY_ID_BCM5752: return "5752";
15796 case TG3_PHY_ID_BCM5714: return "5714";
15797 case TG3_PHY_ID_BCM5780: return "5780";
15798 case TG3_PHY_ID_BCM5755: return "5755";
15799 case TG3_PHY_ID_BCM5787: return "5787";
15800 case TG3_PHY_ID_BCM5784: return "5784";
15801 case TG3_PHY_ID_BCM5756: return "5722/5756";
15802 case TG3_PHY_ID_BCM5906: return "5906";
15803 case TG3_PHY_ID_BCM5761: return "5761";
15804 case TG3_PHY_ID_BCM5718C: return "5718C";
15805 case TG3_PHY_ID_BCM5718S: return "5718S";
15806 case TG3_PHY_ID_BCM57765: return "57765";
15807 case TG3_PHY_ID_BCM5719C: return "5719C";
15808 case TG3_PHY_ID_BCM5720C: return "5720C";
15809 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15810 case 0: return "serdes";
15811 default: return "unknown";
15815 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15817 if (tg3_flag(tp, PCI_EXPRESS)) {
15818 strcpy(str, "PCI Express");
15819 return str;
15820 } else if (tg3_flag(tp, PCIX_MODE)) {
15821 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15823 strcpy(str, "PCIX:");
15825 if ((clock_ctrl == 7) ||
15826 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15827 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15828 strcat(str, "133MHz");
15829 else if (clock_ctrl == 0)
15830 strcat(str, "33MHz");
15831 else if (clock_ctrl == 2)
15832 strcat(str, "50MHz");
15833 else if (clock_ctrl == 4)
15834 strcat(str, "66MHz");
15835 else if (clock_ctrl == 6)
15836 strcat(str, "100MHz");
15837 } else {
15838 strcpy(str, "PCI:");
15839 if (tg3_flag(tp, PCI_HIGH_SPEED))
15840 strcat(str, "66MHz");
15841 else
15842 strcat(str, "33MHz");
15844 if (tg3_flag(tp, PCI_32BIT))
15845 strcat(str, ":32-bit");
15846 else
15847 strcat(str, ":64-bit");
15848 return str;
15851 static void __devinit tg3_init_coal(struct tg3 *tp)
15853 struct ethtool_coalesce *ec = &tp->coal;
15855 memset(ec, 0, sizeof(*ec));
15856 ec->cmd = ETHTOOL_GCOALESCE;
15857 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15858 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15859 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15860 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15861 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15862 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15863 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15864 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15865 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15867 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15868 HOSTCC_MODE_CLRTICK_TXBD)) {
15869 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15870 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15871 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15872 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15875 if (tg3_flag(tp, 5705_PLUS)) {
15876 ec->rx_coalesce_usecs_irq = 0;
15877 ec->tx_coalesce_usecs_irq = 0;
15878 ec->stats_block_coalesce_usecs = 0;
15882 static int __devinit tg3_init_one(struct pci_dev *pdev,
15883 const struct pci_device_id *ent)
15885 struct net_device *dev;
15886 struct tg3 *tp;
15887 int i, err, pm_cap;
15888 u32 sndmbx, rcvmbx, intmbx;
15889 char str[40];
15890 u64 dma_mask, persist_dma_mask;
15891 netdev_features_t features = 0;
15893 printk_once(KERN_INFO "%s\n", version);
15895 err = pci_enable_device(pdev);
15896 if (err) {
15897 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15898 return err;
15901 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15902 if (err) {
15903 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15904 goto err_out_disable_pdev;
15907 pci_set_master(pdev);
15909 /* Find power-management capability. */
15910 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15911 if (pm_cap == 0) {
15912 dev_err(&pdev->dev,
15913 "Cannot find Power Management capability, aborting\n");
15914 err = -EIO;
15915 goto err_out_free_res;
15918 err = pci_set_power_state(pdev, PCI_D0);
15919 if (err) {
15920 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15921 goto err_out_free_res;
15924 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15925 if (!dev) {
15926 err = -ENOMEM;
15927 goto err_out_power_down;
15930 SET_NETDEV_DEV(dev, &pdev->dev);
15932 tp = netdev_priv(dev);
15933 tp->pdev = pdev;
15934 tp->dev = dev;
15935 tp->pm_cap = pm_cap;
15936 tp->rx_mode = TG3_DEF_RX_MODE;
15937 tp->tx_mode = TG3_DEF_TX_MODE;
15939 if (tg3_debug > 0)
15940 tp->msg_enable = tg3_debug;
15941 else
15942 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15944 /* The word/byte swap controls here control register access byte
15945 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15946 * setting below.
15948 tp->misc_host_ctrl =
15949 MISC_HOST_CTRL_MASK_PCI_INT |
15950 MISC_HOST_CTRL_WORD_SWAP |
15951 MISC_HOST_CTRL_INDIR_ACCESS |
15952 MISC_HOST_CTRL_PCISTATE_RW;
15954 /* The NONFRM (non-frame) byte/word swap controls take effect
15955 * on descriptor entries, anything which isn't packet data.
15957 * The StrongARM chips on the board (one for tx, one for rx)
15958 * are running in big-endian mode.
15960 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15961 GRC_MODE_WSWAP_NONFRM_DATA);
15962 #ifdef __BIG_ENDIAN
15963 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15964 #endif
15965 spin_lock_init(&tp->lock);
15966 spin_lock_init(&tp->indirect_lock);
15967 INIT_WORK(&tp->reset_task, tg3_reset_task);
15969 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15970 if (!tp->regs) {
15971 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15972 err = -ENOMEM;
15973 goto err_out_free_dev;
15976 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15977 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15978 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15979 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15980 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15981 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15982 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15983 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15984 tg3_flag_set(tp, ENABLE_APE);
15985 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15986 if (!tp->aperegs) {
15987 dev_err(&pdev->dev,
15988 "Cannot map APE registers, aborting\n");
15989 err = -ENOMEM;
15990 goto err_out_iounmap;
15994 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15995 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15997 dev->ethtool_ops = &tg3_ethtool_ops;
15998 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15999 dev->netdev_ops = &tg3_netdev_ops;
16000 dev->irq = pdev->irq;
16002 err = tg3_get_invariants(tp);
16003 if (err) {
16004 dev_err(&pdev->dev,
16005 "Problem fetching invariants of chip, aborting\n");
16006 goto err_out_apeunmap;
16009 /* The EPB bridge inside 5714, 5715, and 5780 and any
16010 * device behind the EPB cannot support DMA addresses > 40-bit.
16011 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16012 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16013 * do DMA address check in tg3_start_xmit().
16015 if (tg3_flag(tp, IS_5788))
16016 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16017 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16018 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16019 #ifdef CONFIG_HIGHMEM
16020 dma_mask = DMA_BIT_MASK(64);
16021 #endif
16022 } else
16023 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16025 /* Configure DMA attributes. */
16026 if (dma_mask > DMA_BIT_MASK(32)) {
16027 err = pci_set_dma_mask(pdev, dma_mask);
16028 if (!err) {
16029 features |= NETIF_F_HIGHDMA;
16030 err = pci_set_consistent_dma_mask(pdev,
16031 persist_dma_mask);
16032 if (err < 0) {
16033 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16034 "DMA for consistent allocations\n");
16035 goto err_out_apeunmap;
16039 if (err || dma_mask == DMA_BIT_MASK(32)) {
16040 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16041 if (err) {
16042 dev_err(&pdev->dev,
16043 "No usable DMA configuration, aborting\n");
16044 goto err_out_apeunmap;
16048 tg3_init_bufmgr_config(tp);
16050 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16052 /* 5700 B0 chips do not support checksumming correctly due
16053 * to hardware bugs.
16055 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16056 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16058 if (tg3_flag(tp, 5755_PLUS))
16059 features |= NETIF_F_IPV6_CSUM;
16062 /* TSO is on by default on chips that support hardware TSO.
16063 * Firmware TSO on older chips gives lower performance, so it
16064 * is off by default, but can be enabled using ethtool.
16066 if ((tg3_flag(tp, HW_TSO_1) ||
16067 tg3_flag(tp, HW_TSO_2) ||
16068 tg3_flag(tp, HW_TSO_3)) &&
16069 (features & NETIF_F_IP_CSUM))
16070 features |= NETIF_F_TSO;
16071 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16072 if (features & NETIF_F_IPV6_CSUM)
16073 features |= NETIF_F_TSO6;
16074 if (tg3_flag(tp, HW_TSO_3) ||
16075 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16076 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16077 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16079 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16080 features |= NETIF_F_TSO_ECN;
16083 dev->features |= features;
16084 dev->vlan_features |= features;
16087 * Add loopback capability only for a subset of devices that support
16088 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16089 * loopback for the remaining devices.
16091 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16092 !tg3_flag(tp, CPMU_PRESENT))
16093 /* Add the loopback capability */
16094 features |= NETIF_F_LOOPBACK;
16096 dev->hw_features |= features;
16098 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16099 !tg3_flag(tp, TSO_CAPABLE) &&
16100 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16101 tg3_flag_set(tp, MAX_RXPEND_64);
16102 tp->rx_pending = 63;
16105 err = tg3_get_device_address(tp);
16106 if (err) {
16107 dev_err(&pdev->dev,
16108 "Could not obtain valid ethernet address, aborting\n");
16109 goto err_out_apeunmap;
16113 * Reset chip in case UNDI or EFI driver did not shutdown
16114 * DMA self test will enable WDMAC and we'll see (spurious)
16115 * pending DMA on the PCI bus at that point.
16117 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16118 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16119 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16120 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16123 err = tg3_test_dma(tp);
16124 if (err) {
16125 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16126 goto err_out_apeunmap;
16129 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16130 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16131 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16132 for (i = 0; i < tp->irq_max; i++) {
16133 struct tg3_napi *tnapi = &tp->napi[i];
16135 tnapi->tp = tp;
16136 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16138 tnapi->int_mbox = intmbx;
16139 if (i <= 4)
16140 intmbx += 0x8;
16141 else
16142 intmbx += 0x4;
16144 tnapi->consmbox = rcvmbx;
16145 tnapi->prodmbox = sndmbx;
16147 if (i)
16148 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16149 else
16150 tnapi->coal_now = HOSTCC_MODE_NOW;
16152 if (!tg3_flag(tp, SUPPORT_MSIX))
16153 break;
16156 * If we support MSIX, we'll be using RSS. If we're using
16157 * RSS, the first vector only handles link interrupts and the
16158 * remaining vectors handle rx and tx interrupts. Reuse the
16159 * mailbox values for the next iteration. The values we setup
16160 * above are still useful for the single vectored mode.
16162 if (!i)
16163 continue;
16165 rcvmbx += 0x8;
16167 if (sndmbx & 0x4)
16168 sndmbx -= 0x4;
16169 else
16170 sndmbx += 0xc;
16173 tg3_init_coal(tp);
16175 pci_set_drvdata(pdev, dev);
16177 if (tg3_flag(tp, 5717_PLUS)) {
16178 /* Resume a low-power mode */
16179 tg3_frob_aux_power(tp, false);
16182 tg3_timer_init(tp);
16184 err = register_netdev(dev);
16185 if (err) {
16186 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16187 goto err_out_apeunmap;
16190 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16191 tp->board_part_number,
16192 tp->pci_chip_rev_id,
16193 tg3_bus_string(tp, str),
16194 dev->dev_addr);
16196 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16197 struct phy_device *phydev;
16198 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16199 netdev_info(dev,
16200 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16201 phydev->drv->name, dev_name(&phydev->dev));
16202 } else {
16203 char *ethtype;
16205 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16206 ethtype = "10/100Base-TX";
16207 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16208 ethtype = "1000Base-SX";
16209 else
16210 ethtype = "10/100/1000Base-T";
16212 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16213 "(WireSpeed[%d], EEE[%d])\n",
16214 tg3_phy_string(tp), ethtype,
16215 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16216 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16219 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16220 (dev->features & NETIF_F_RXCSUM) != 0,
16221 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16222 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16223 tg3_flag(tp, ENABLE_ASF) != 0,
16224 tg3_flag(tp, TSO_CAPABLE) != 0);
16225 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16226 tp->dma_rwctrl,
16227 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16228 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16230 pci_save_state(pdev);
16232 return 0;
16234 err_out_apeunmap:
16235 if (tp->aperegs) {
16236 iounmap(tp->aperegs);
16237 tp->aperegs = NULL;
16240 err_out_iounmap:
16241 if (tp->regs) {
16242 iounmap(tp->regs);
16243 tp->regs = NULL;
16246 err_out_free_dev:
16247 free_netdev(dev);
16249 err_out_power_down:
16250 pci_set_power_state(pdev, PCI_D3hot);
16252 err_out_free_res:
16253 pci_release_regions(pdev);
16255 err_out_disable_pdev:
16256 pci_disable_device(pdev);
16257 pci_set_drvdata(pdev, NULL);
16258 return err;
16261 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16263 struct net_device *dev = pci_get_drvdata(pdev);
16265 if (dev) {
16266 struct tg3 *tp = netdev_priv(dev);
16268 release_firmware(tp->fw);
16270 tg3_reset_task_cancel(tp);
16272 if (tg3_flag(tp, USE_PHYLIB)) {
16273 tg3_phy_fini(tp);
16274 tg3_mdio_fini(tp);
16277 unregister_netdev(dev);
16278 if (tp->aperegs) {
16279 iounmap(tp->aperegs);
16280 tp->aperegs = NULL;
16282 if (tp->regs) {
16283 iounmap(tp->regs);
16284 tp->regs = NULL;
16286 free_netdev(dev);
16287 pci_release_regions(pdev);
16288 pci_disable_device(pdev);
16289 pci_set_drvdata(pdev, NULL);
16293 #ifdef CONFIG_PM_SLEEP
16294 static int tg3_suspend(struct device *device)
16296 struct pci_dev *pdev = to_pci_dev(device);
16297 struct net_device *dev = pci_get_drvdata(pdev);
16298 struct tg3 *tp = netdev_priv(dev);
16299 int err;
16301 if (!netif_running(dev))
16302 return 0;
16304 tg3_reset_task_cancel(tp);
16305 tg3_phy_stop(tp);
16306 tg3_netif_stop(tp);
16308 tg3_timer_stop(tp);
16310 tg3_full_lock(tp, 1);
16311 tg3_disable_ints(tp);
16312 tg3_full_unlock(tp);
16314 netif_device_detach(dev);
16316 tg3_full_lock(tp, 0);
16317 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16318 tg3_flag_clear(tp, INIT_COMPLETE);
16319 tg3_full_unlock(tp);
16321 err = tg3_power_down_prepare(tp);
16322 if (err) {
16323 int err2;
16325 tg3_full_lock(tp, 0);
16327 tg3_flag_set(tp, INIT_COMPLETE);
16328 err2 = tg3_restart_hw(tp, 1);
16329 if (err2)
16330 goto out;
16332 tg3_timer_start(tp);
16334 netif_device_attach(dev);
16335 tg3_netif_start(tp);
16337 out:
16338 tg3_full_unlock(tp);
16340 if (!err2)
16341 tg3_phy_start(tp);
16344 return err;
16347 static int tg3_resume(struct device *device)
16349 struct pci_dev *pdev = to_pci_dev(device);
16350 struct net_device *dev = pci_get_drvdata(pdev);
16351 struct tg3 *tp = netdev_priv(dev);
16352 int err;
16354 if (!netif_running(dev))
16355 return 0;
16357 netif_device_attach(dev);
16359 tg3_full_lock(tp, 0);
16361 tg3_flag_set(tp, INIT_COMPLETE);
16362 err = tg3_restart_hw(tp, 1);
16363 if (err)
16364 goto out;
16366 tg3_timer_start(tp);
16368 tg3_netif_start(tp);
16370 out:
16371 tg3_full_unlock(tp);
16373 if (!err)
16374 tg3_phy_start(tp);
16376 return err;
16379 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16380 #define TG3_PM_OPS (&tg3_pm_ops)
16382 #else
16384 #define TG3_PM_OPS NULL
16386 #endif /* CONFIG_PM_SLEEP */
16389 * tg3_io_error_detected - called when PCI error is detected
16390 * @pdev: Pointer to PCI device
16391 * @state: The current pci connection state
16393 * This function is called after a PCI bus error affecting
16394 * this device has been detected.
16396 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16397 pci_channel_state_t state)
16399 struct net_device *netdev = pci_get_drvdata(pdev);
16400 struct tg3 *tp = netdev_priv(netdev);
16401 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16403 netdev_info(netdev, "PCI I/O error detected\n");
16405 rtnl_lock();
16407 if (!netif_running(netdev))
16408 goto done;
16410 tg3_phy_stop(tp);
16412 tg3_netif_stop(tp);
16414 tg3_timer_stop(tp);
16416 /* Want to make sure that the reset task doesn't run */
16417 tg3_reset_task_cancel(tp);
16419 netif_device_detach(netdev);
16421 /* Clean up software state, even if MMIO is blocked */
16422 tg3_full_lock(tp, 0);
16423 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16424 tg3_full_unlock(tp);
16426 done:
16427 if (state == pci_channel_io_perm_failure)
16428 err = PCI_ERS_RESULT_DISCONNECT;
16429 else
16430 pci_disable_device(pdev);
16432 rtnl_unlock();
16434 return err;
16438 * tg3_io_slot_reset - called after the pci bus has been reset.
16439 * @pdev: Pointer to PCI device
16441 * Restart the card from scratch, as if from a cold-boot.
16442 * At this point, the card has exprienced a hard reset,
16443 * followed by fixups by BIOS, and has its config space
16444 * set up identically to what it was at cold boot.
16446 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16448 struct net_device *netdev = pci_get_drvdata(pdev);
16449 struct tg3 *tp = netdev_priv(netdev);
16450 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16451 int err;
16453 rtnl_lock();
16455 if (pci_enable_device(pdev)) {
16456 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16457 goto done;
16460 pci_set_master(pdev);
16461 pci_restore_state(pdev);
16462 pci_save_state(pdev);
16464 if (!netif_running(netdev)) {
16465 rc = PCI_ERS_RESULT_RECOVERED;
16466 goto done;
16469 err = tg3_power_up(tp);
16470 if (err)
16471 goto done;
16473 rc = PCI_ERS_RESULT_RECOVERED;
16475 done:
16476 rtnl_unlock();
16478 return rc;
16482 * tg3_io_resume - called when traffic can start flowing again.
16483 * @pdev: Pointer to PCI device
16485 * This callback is called when the error recovery driver tells
16486 * us that its OK to resume normal operation.
16488 static void tg3_io_resume(struct pci_dev *pdev)
16490 struct net_device *netdev = pci_get_drvdata(pdev);
16491 struct tg3 *tp = netdev_priv(netdev);
16492 int err;
16494 rtnl_lock();
16496 if (!netif_running(netdev))
16497 goto done;
16499 tg3_full_lock(tp, 0);
16500 tg3_flag_set(tp, INIT_COMPLETE);
16501 err = tg3_restart_hw(tp, 1);
16502 tg3_full_unlock(tp);
16503 if (err) {
16504 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16505 goto done;
16508 netif_device_attach(netdev);
16510 tg3_timer_start(tp);
16512 tg3_netif_start(tp);
16514 tg3_phy_start(tp);
16516 done:
16517 rtnl_unlock();
16520 static struct pci_error_handlers tg3_err_handler = {
16521 .error_detected = tg3_io_error_detected,
16522 .slot_reset = tg3_io_slot_reset,
16523 .resume = tg3_io_resume
16526 static struct pci_driver tg3_driver = {
16527 .name = DRV_MODULE_NAME,
16528 .id_table = tg3_pci_tbl,
16529 .probe = tg3_init_one,
16530 .remove = __devexit_p(tg3_remove_one),
16531 .err_handler = &tg3_err_handler,
16532 .driver.pm = TG3_PM_OPS,
16535 static int __init tg3_init(void)
16537 return pci_register_driver(&tg3_driver);
16540 static void __exit tg3_cleanup(void)
16542 pci_unregister_driver(&tg3_driver);
16545 module_init(tg3_init);
16546 module_exit(tg3_cleanup);