tg3: Add NVRAM support for 5762
[linux-2.6/cjktty.git] / drivers / net / ethernet / broadcom / tg3.c
blob6eab7d7ae4dfab2f3370d1d4317a2e999cacf2e1
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
50 #include <net/checksum.h>
51 #include <net/ip.h>
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
60 #ifdef CONFIG_SPARC
61 #include <asm/idprom.h>
62 #include <asm/prom.h>
63 #endif
65 #define BAR_0 0
66 #define BAR_2 2
68 #include "tg3.h"
70 /* Functions & macros to verify TG3_FLAGS types */
72 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 return test_bit(flag, bits);
77 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 set_bit(flag, bits);
82 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 clear_bit(flag, bits);
87 #define tg3_flag(tp, flag) \
88 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define tg3_flag_set(tp, flag) \
90 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_clear(tp, flag) \
92 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define DRV_MODULE_NAME "tg3"
95 #define TG3_MAJ_NUM 3
96 #define TG3_MIN_NUM 128
97 #define DRV_MODULE_VERSION \
98 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
99 #define DRV_MODULE_RELDATE "December 03, 2012"
101 #define RESET_KIND_SHUTDOWN 0
102 #define RESET_KIND_INIT 1
103 #define RESET_KIND_SUSPEND 2
105 #define TG3_DEF_RX_MODE 0
106 #define TG3_DEF_TX_MODE 0
107 #define TG3_DEF_MSG_ENABLE \
108 (NETIF_MSG_DRV | \
109 NETIF_MSG_PROBE | \
110 NETIF_MSG_LINK | \
111 NETIF_MSG_TIMER | \
112 NETIF_MSG_IFDOWN | \
113 NETIF_MSG_IFUP | \
114 NETIF_MSG_RX_ERR | \
115 NETIF_MSG_TX_ERR)
117 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119 /* length of time before we decide the hardware is borked,
120 * and dev->tx_timeout() should be called to fix the problem
123 #define TG3_TX_TIMEOUT (5 * HZ)
125 /* hardware minimum and maximum for a single frame's data payload */
126 #define TG3_MIN_MTU 60
127 #define TG3_MAX_MTU(tp) \
128 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 /* These numbers seem to be hard coded in the NIC firmware somehow.
131 * You can't change the ring sizes, but you can change where you place
132 * them in the NIC onboard memory.
134 #define TG3_RX_STD_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
137 #define TG3_DEF_RX_RING_PENDING 200
138 #define TG3_RX_JMB_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
141 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143 /* Do not place this n-ring entries value into the tp struct itself,
144 * we really want to expose these constants to GCC so that modulo et
145 * al. operations are done with shifts and masks instead of with
146 * hw multiply/modulo instructions. Another solution would be to
147 * replace things like '% foo' with '& (foo - 1)'.
150 #define TG3_TX_RING_SIZE 512
151 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153 #define TG3_RX_STD_RING_BYTES(tp) \
154 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155 #define TG3_RX_JMB_RING_BYTES(tp) \
156 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157 #define TG3_RX_RCB_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
159 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
160 TG3_TX_RING_SIZE)
161 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 #define TG3_DMA_BYTE_ENAB 64
165 #define TG3_RX_STD_DMA_SZ 1536
166 #define TG3_RX_JMB_DMA_SZ 9046
168 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
180 * that are at least dword aligned when used in PCIX mode. The driver
181 * works around this bug by double copying the packet. This workaround
182 * is built into the normal double copy length check for efficiency.
184 * However, the double copy is only necessary on those architectures
185 * where unaligned memory accesses are inefficient. For those architectures
186 * where unaligned memory accesses incur little penalty, we can reintegrate
187 * the 5701 in the normal rx path. Doing so saves a device structure
188 * dereference by hardcoding the double copy threshold in place.
190 #define TG3_RX_COPY_THRESHOLD 256
191 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
193 #else
194 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
195 #endif
197 #if (NET_IP_ALIGN != 0)
198 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
199 #else
200 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
201 #endif
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
205 #define TG3_TX_BD_DMA_MAX_2K 2048
206 #define TG3_TX_BD_DMA_MAX_4K 4096
208 #define TG3_RAW_IP_ALIGN 2
210 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
211 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 #define FIRMWARE_TG3 "tigon/tg3.bin"
214 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
215 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
217 static char version[] =
218 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222 MODULE_LICENSE("GPL");
223 MODULE_VERSION(DRV_MODULE_VERSION);
224 MODULE_FIRMWARE(FIRMWARE_TG3);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
229 module_param(tg3_debug, int, 0);
230 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
233 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
235 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
255 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256 TG3_DRV_DATA_FLAG_5705_10_100},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259 TG3_DRV_DATA_FLAG_5705_10_100},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
283 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284 PCI_VENDOR_ID_LENOVO,
285 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
308 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
327 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
336 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
337 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
338 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
339 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
340 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
342 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
343 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
349 static const struct {
350 const char string[ETH_GSTRING_LEN];
351 } ethtool_stats_keys[] = {
352 { "rx_octets" },
353 { "rx_fragments" },
354 { "rx_ucast_packets" },
355 { "rx_mcast_packets" },
356 { "rx_bcast_packets" },
357 { "rx_fcs_errors" },
358 { "rx_align_errors" },
359 { "rx_xon_pause_rcvd" },
360 { "rx_xoff_pause_rcvd" },
361 { "rx_mac_ctrl_rcvd" },
362 { "rx_xoff_entered" },
363 { "rx_frame_too_long_errors" },
364 { "rx_jabbers" },
365 { "rx_undersize_packets" },
366 { "rx_in_length_errors" },
367 { "rx_out_length_errors" },
368 { "rx_64_or_less_octet_packets" },
369 { "rx_65_to_127_octet_packets" },
370 { "rx_128_to_255_octet_packets" },
371 { "rx_256_to_511_octet_packets" },
372 { "rx_512_to_1023_octet_packets" },
373 { "rx_1024_to_1522_octet_packets" },
374 { "rx_1523_to_2047_octet_packets" },
375 { "rx_2048_to_4095_octet_packets" },
376 { "rx_4096_to_8191_octet_packets" },
377 { "rx_8192_to_9022_octet_packets" },
379 { "tx_octets" },
380 { "tx_collisions" },
382 { "tx_xon_sent" },
383 { "tx_xoff_sent" },
384 { "tx_flow_control" },
385 { "tx_mac_errors" },
386 { "tx_single_collisions" },
387 { "tx_mult_collisions" },
388 { "tx_deferred" },
389 { "tx_excessive_collisions" },
390 { "tx_late_collisions" },
391 { "tx_collide_2times" },
392 { "tx_collide_3times" },
393 { "tx_collide_4times" },
394 { "tx_collide_5times" },
395 { "tx_collide_6times" },
396 { "tx_collide_7times" },
397 { "tx_collide_8times" },
398 { "tx_collide_9times" },
399 { "tx_collide_10times" },
400 { "tx_collide_11times" },
401 { "tx_collide_12times" },
402 { "tx_collide_13times" },
403 { "tx_collide_14times" },
404 { "tx_collide_15times" },
405 { "tx_ucast_packets" },
406 { "tx_mcast_packets" },
407 { "tx_bcast_packets" },
408 { "tx_carrier_sense_errors" },
409 { "tx_discards" },
410 { "tx_errors" },
412 { "dma_writeq_full" },
413 { "dma_write_prioq_full" },
414 { "rxbds_empty" },
415 { "rx_discards" },
416 { "rx_errors" },
417 { "rx_threshold_hit" },
419 { "dma_readq_full" },
420 { "dma_read_prioq_full" },
421 { "tx_comp_queue_full" },
423 { "ring_set_send_prod_index" },
424 { "ring_status_update" },
425 { "nic_irqs" },
426 { "nic_avoided_irqs" },
427 { "nic_tx_threshold_hit" },
429 { "mbuf_lwm_thresh_hit" },
432 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
433 #define TG3_NVRAM_TEST 0
434 #define TG3_LINK_TEST 1
435 #define TG3_REGISTER_TEST 2
436 #define TG3_MEMORY_TEST 3
437 #define TG3_MAC_LOOPB_TEST 4
438 #define TG3_PHY_LOOPB_TEST 5
439 #define TG3_EXT_LOOPB_TEST 6
440 #define TG3_INTERRUPT_TEST 7
443 static const struct {
444 const char string[ETH_GSTRING_LEN];
445 } ethtool_test_keys[] = {
446 [TG3_NVRAM_TEST] = { "nvram test (online) " },
447 [TG3_LINK_TEST] = { "link test (online) " },
448 [TG3_REGISTER_TEST] = { "register test (offline)" },
449 [TG3_MEMORY_TEST] = { "memory test (offline)" },
450 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
451 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
452 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
453 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
456 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
459 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
461 writel(val, tp->regs + off);
464 static u32 tg3_read32(struct tg3 *tp, u32 off)
466 return readl(tp->regs + off);
469 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
471 writel(val, tp->aperegs + off);
474 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
476 return readl(tp->aperegs + off);
479 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
481 unsigned long flags;
483 spin_lock_irqsave(&tp->indirect_lock, flags);
484 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
485 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
489 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
491 writel(val, tp->regs + off);
492 readl(tp->regs + off);
495 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
497 unsigned long flags;
498 u32 val;
500 spin_lock_irqsave(&tp->indirect_lock, flags);
501 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503 spin_unlock_irqrestore(&tp->indirect_lock, flags);
504 return val;
507 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
509 unsigned long flags;
511 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
512 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
513 TG3_64BIT_REG_LOW, val);
514 return;
516 if (off == TG3_RX_STD_PROD_IDX_REG) {
517 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
518 TG3_64BIT_REG_LOW, val);
519 return;
522 spin_lock_irqsave(&tp->indirect_lock, flags);
523 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
524 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
525 spin_unlock_irqrestore(&tp->indirect_lock, flags);
527 /* In indirect mode when disabling interrupts, we also need
528 * to clear the interrupt bit in the GRC local ctrl register.
530 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
531 (val == 0x1)) {
532 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
533 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
539 unsigned long flags;
540 u32 val;
542 spin_lock_irqsave(&tp->indirect_lock, flags);
543 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
544 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
545 spin_unlock_irqrestore(&tp->indirect_lock, flags);
546 return val;
549 /* usec_wait specifies the wait time in usec when writing to certain registers
550 * where it is unsafe to read back the register without some delay.
551 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
552 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
554 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
556 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
557 /* Non-posted methods */
558 tp->write32(tp, off, val);
559 else {
560 /* Posted method */
561 tg3_write32(tp, off, val);
562 if (usec_wait)
563 udelay(usec_wait);
564 tp->read32(tp, off);
566 /* Wait again after the read for the posted method to guarantee that
567 * the wait time is met.
569 if (usec_wait)
570 udelay(usec_wait);
573 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
575 tp->write32_mbox(tp, off, val);
576 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
577 tp->read32_mbox(tp, off);
580 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
582 void __iomem *mbox = tp->regs + off;
583 writel(val, mbox);
584 if (tg3_flag(tp, TXD_MBOX_HWBUG))
585 writel(val, mbox);
586 if (tg3_flag(tp, MBOX_WRITE_REORDER))
587 readl(mbox);
590 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
592 return readl(tp->regs + off + GRCMBOX_BASE);
595 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
597 writel(val, tp->regs + off + GRCMBOX_BASE);
600 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
601 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
602 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
603 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
604 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
606 #define tw32(reg, val) tp->write32(tp, reg, val)
607 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
608 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
609 #define tr32(reg) tp->read32(tp, reg)
611 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
613 unsigned long flags;
615 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
616 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
617 return;
619 spin_lock_irqsave(&tp->indirect_lock, flags);
620 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
621 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
622 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
624 /* Always leave this as zero. */
625 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
626 } else {
627 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
628 tw32_f(TG3PCI_MEM_WIN_DATA, val);
630 /* Always leave this as zero. */
631 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 spin_unlock_irqrestore(&tp->indirect_lock, flags);
636 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
638 unsigned long flags;
640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
641 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
642 *val = 0;
643 return;
646 spin_lock_irqsave(&tp->indirect_lock, flags);
647 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
648 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
649 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
651 /* Always leave this as zero. */
652 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
653 } else {
654 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
655 *val = tr32(TG3PCI_MEM_WIN_DATA);
657 /* Always leave this as zero. */
658 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 spin_unlock_irqrestore(&tp->indirect_lock, flags);
663 static void tg3_ape_lock_init(struct tg3 *tp)
665 int i;
666 u32 regbase, bit;
668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
669 regbase = TG3_APE_LOCK_GRANT;
670 else
671 regbase = TG3_APE_PER_LOCK_GRANT;
673 /* Make sure the driver hasn't any stale locks. */
674 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
675 switch (i) {
676 case TG3_APE_LOCK_PHY0:
677 case TG3_APE_LOCK_PHY1:
678 case TG3_APE_LOCK_PHY2:
679 case TG3_APE_LOCK_PHY3:
680 bit = APE_LOCK_GRANT_DRIVER;
681 break;
682 default:
683 if (!tp->pci_fn)
684 bit = APE_LOCK_GRANT_DRIVER;
685 else
686 bit = 1 << tp->pci_fn;
688 tg3_ape_write32(tp, regbase + 4 * i, bit);
693 static int tg3_ape_lock(struct tg3 *tp, int locknum)
695 int i, off;
696 int ret = 0;
697 u32 status, req, gnt, bit;
699 if (!tg3_flag(tp, ENABLE_APE))
700 return 0;
702 switch (locknum) {
703 case TG3_APE_LOCK_GPIO:
704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
705 return 0;
706 case TG3_APE_LOCK_GRC:
707 case TG3_APE_LOCK_MEM:
708 if (!tp->pci_fn)
709 bit = APE_LOCK_REQ_DRIVER;
710 else
711 bit = 1 << tp->pci_fn;
712 break;
713 case TG3_APE_LOCK_PHY0:
714 case TG3_APE_LOCK_PHY1:
715 case TG3_APE_LOCK_PHY2:
716 case TG3_APE_LOCK_PHY3:
717 bit = APE_LOCK_REQ_DRIVER;
718 break;
719 default:
720 return -EINVAL;
723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
724 req = TG3_APE_LOCK_REQ;
725 gnt = TG3_APE_LOCK_GRANT;
726 } else {
727 req = TG3_APE_PER_LOCK_REQ;
728 gnt = TG3_APE_PER_LOCK_GRANT;
731 off = 4 * locknum;
733 tg3_ape_write32(tp, req + off, bit);
735 /* Wait for up to 1 millisecond to acquire lock. */
736 for (i = 0; i < 100; i++) {
737 status = tg3_ape_read32(tp, gnt + off);
738 if (status == bit)
739 break;
740 udelay(10);
743 if (status != bit) {
744 /* Revoke the lock request. */
745 tg3_ape_write32(tp, gnt + off, bit);
746 ret = -EBUSY;
749 return ret;
752 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
754 u32 gnt, bit;
756 if (!tg3_flag(tp, ENABLE_APE))
757 return;
759 switch (locknum) {
760 case TG3_APE_LOCK_GPIO:
761 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
762 return;
763 case TG3_APE_LOCK_GRC:
764 case TG3_APE_LOCK_MEM:
765 if (!tp->pci_fn)
766 bit = APE_LOCK_GRANT_DRIVER;
767 else
768 bit = 1 << tp->pci_fn;
769 break;
770 case TG3_APE_LOCK_PHY0:
771 case TG3_APE_LOCK_PHY1:
772 case TG3_APE_LOCK_PHY2:
773 case TG3_APE_LOCK_PHY3:
774 bit = APE_LOCK_GRANT_DRIVER;
775 break;
776 default:
777 return;
780 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
781 gnt = TG3_APE_LOCK_GRANT;
782 else
783 gnt = TG3_APE_PER_LOCK_GRANT;
785 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
788 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
790 u32 apedata;
792 while (timeout_us) {
793 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
794 return -EBUSY;
796 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
797 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
798 break;
800 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
802 udelay(10);
803 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
806 return timeout_us ? 0 : -EBUSY;
809 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
811 u32 i, apedata;
813 for (i = 0; i < timeout_us / 10; i++) {
814 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 break;
819 udelay(10);
822 return i == timeout_us / 10;
825 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
826 u32 len)
828 int err;
829 u32 i, bufoff, msgoff, maxlen, apedata;
831 if (!tg3_flag(tp, APE_HAS_NCSI))
832 return 0;
834 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
835 if (apedata != APE_SEG_SIG_MAGIC)
836 return -ENODEV;
838 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
839 if (!(apedata & APE_FW_STATUS_READY))
840 return -EAGAIN;
842 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
843 TG3_APE_SHMEM_BASE;
844 msgoff = bufoff + 2 * sizeof(u32);
845 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
847 while (len) {
848 u32 length;
850 /* Cap xfer sizes to scratchpad limits. */
851 length = (len > maxlen) ? maxlen : len;
852 len -= length;
854 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
855 if (!(apedata & APE_FW_STATUS_READY))
856 return -EAGAIN;
858 /* Wait for up to 1 msec for APE to service previous event. */
859 err = tg3_ape_event_lock(tp, 1000);
860 if (err)
861 return err;
863 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
864 APE_EVENT_STATUS_SCRTCHPD_READ |
865 APE_EVENT_STATUS_EVENT_PENDING;
866 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
868 tg3_ape_write32(tp, bufoff, base_off);
869 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
871 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
874 base_off += length;
876 if (tg3_ape_wait_for_event(tp, 30000))
877 return -EAGAIN;
879 for (i = 0; length; i += 4, length -= 4) {
880 u32 val = tg3_ape_read32(tp, msgoff + i);
881 memcpy(data, &val, sizeof(u32));
882 data++;
886 return 0;
889 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
891 int err;
892 u32 apedata;
894 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
895 if (apedata != APE_SEG_SIG_MAGIC)
896 return -EAGAIN;
898 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
899 if (!(apedata & APE_FW_STATUS_READY))
900 return -EAGAIN;
902 /* Wait for up to 1 millisecond for APE to service previous event. */
903 err = tg3_ape_event_lock(tp, 1000);
904 if (err)
905 return err;
907 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
908 event | APE_EVENT_STATUS_EVENT_PENDING);
910 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
911 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
913 return 0;
916 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
918 u32 event;
919 u32 apedata;
921 if (!tg3_flag(tp, ENABLE_APE))
922 return;
924 switch (kind) {
925 case RESET_KIND_INIT:
926 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
927 APE_HOST_SEG_SIG_MAGIC);
928 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
929 APE_HOST_SEG_LEN_MAGIC);
930 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
931 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
932 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
933 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
934 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
935 APE_HOST_BEHAV_NO_PHYLOCK);
936 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
937 TG3_APE_HOST_DRVR_STATE_START);
939 event = APE_EVENT_STATUS_STATE_START;
940 break;
941 case RESET_KIND_SHUTDOWN:
942 /* With the interface we are currently using,
943 * APE does not track driver state. Wiping
944 * out the HOST SEGMENT SIGNATURE forces
945 * the APE to assume OS absent status.
947 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
949 if (device_may_wakeup(&tp->pdev->dev) &&
950 tg3_flag(tp, WOL_ENABLE)) {
951 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
952 TG3_APE_HOST_WOL_SPEED_AUTO);
953 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
954 } else
955 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
957 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
959 event = APE_EVENT_STATUS_STATE_UNLOAD;
960 break;
961 case RESET_KIND_SUSPEND:
962 event = APE_EVENT_STATUS_STATE_SUSPEND;
963 break;
964 default:
965 return;
968 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
970 tg3_ape_send_event(tp, event);
973 static void tg3_disable_ints(struct tg3 *tp)
975 int i;
977 tw32(TG3PCI_MISC_HOST_CTRL,
978 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
979 for (i = 0; i < tp->irq_max; i++)
980 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
983 static void tg3_enable_ints(struct tg3 *tp)
985 int i;
987 tp->irq_sync = 0;
988 wmb();
990 tw32(TG3PCI_MISC_HOST_CTRL,
991 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
993 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
994 for (i = 0; i < tp->irq_cnt; i++) {
995 struct tg3_napi *tnapi = &tp->napi[i];
997 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
998 if (tg3_flag(tp, 1SHOT_MSI))
999 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1001 tp->coal_now |= tnapi->coal_now;
1004 /* Force an initial interrupt */
1005 if (!tg3_flag(tp, TAGGED_STATUS) &&
1006 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1007 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1008 else
1009 tw32(HOSTCC_MODE, tp->coal_now);
1011 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1014 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1016 struct tg3 *tp = tnapi->tp;
1017 struct tg3_hw_status *sblk = tnapi->hw_status;
1018 unsigned int work_exists = 0;
1020 /* check for phy events */
1021 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1022 if (sblk->status & SD_STATUS_LINK_CHG)
1023 work_exists = 1;
1026 /* check for TX work to do */
1027 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1028 work_exists = 1;
1030 /* check for RX work to do */
1031 if (tnapi->rx_rcb_prod_idx &&
1032 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1033 work_exists = 1;
1035 return work_exists;
1038 /* tg3_int_reenable
1039 * similar to tg3_enable_ints, but it accurately determines whether there
1040 * is new work pending and can return without flushing the PIO write
1041 * which reenables interrupts
1043 static void tg3_int_reenable(struct tg3_napi *tnapi)
1045 struct tg3 *tp = tnapi->tp;
1047 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1048 mmiowb();
1050 /* When doing tagged status, this work check is unnecessary.
1051 * The last_tag we write above tells the chip which piece of
1052 * work we've completed.
1054 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1055 tw32(HOSTCC_MODE, tp->coalesce_mode |
1056 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1059 static void tg3_switch_clocks(struct tg3 *tp)
1061 u32 clock_ctrl;
1062 u32 orig_clock_ctrl;
1064 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1065 return;
1067 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1069 orig_clock_ctrl = clock_ctrl;
1070 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1071 CLOCK_CTRL_CLKRUN_OENABLE |
1072 0x1f);
1073 tp->pci_clock_ctrl = clock_ctrl;
1075 if (tg3_flag(tp, 5705_PLUS)) {
1076 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1077 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1078 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1080 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1081 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1082 clock_ctrl |
1083 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1084 40);
1085 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1086 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1087 40);
1089 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1092 #define PHY_BUSY_LOOPS 5000
1094 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1096 u32 frame_val;
1097 unsigned int loops;
1098 int ret;
1100 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1101 tw32_f(MAC_MI_MODE,
1102 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1103 udelay(80);
1106 tg3_ape_lock(tp, tp->phy_ape_lock);
1108 *val = 0x0;
1110 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1111 MI_COM_PHY_ADDR_MASK);
1112 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1113 MI_COM_REG_ADDR_MASK);
1114 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1116 tw32_f(MAC_MI_COM, frame_val);
1118 loops = PHY_BUSY_LOOPS;
1119 while (loops != 0) {
1120 udelay(10);
1121 frame_val = tr32(MAC_MI_COM);
1123 if ((frame_val & MI_COM_BUSY) == 0) {
1124 udelay(5);
1125 frame_val = tr32(MAC_MI_COM);
1126 break;
1128 loops -= 1;
1131 ret = -EBUSY;
1132 if (loops != 0) {
1133 *val = frame_val & MI_COM_DATA_MASK;
1134 ret = 0;
1137 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1138 tw32_f(MAC_MI_MODE, tp->mi_mode);
1139 udelay(80);
1142 tg3_ape_unlock(tp, tp->phy_ape_lock);
1144 return ret;
1147 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1149 u32 frame_val;
1150 unsigned int loops;
1151 int ret;
1153 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1154 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1155 return 0;
1157 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1158 tw32_f(MAC_MI_MODE,
1159 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1160 udelay(80);
1163 tg3_ape_lock(tp, tp->phy_ape_lock);
1165 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1166 MI_COM_PHY_ADDR_MASK);
1167 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1168 MI_COM_REG_ADDR_MASK);
1169 frame_val |= (val & MI_COM_DATA_MASK);
1170 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1172 tw32_f(MAC_MI_COM, frame_val);
1174 loops = PHY_BUSY_LOOPS;
1175 while (loops != 0) {
1176 udelay(10);
1177 frame_val = tr32(MAC_MI_COM);
1178 if ((frame_val & MI_COM_BUSY) == 0) {
1179 udelay(5);
1180 frame_val = tr32(MAC_MI_COM);
1181 break;
1183 loops -= 1;
1186 ret = -EBUSY;
1187 if (loops != 0)
1188 ret = 0;
1190 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1191 tw32_f(MAC_MI_MODE, tp->mi_mode);
1192 udelay(80);
1195 tg3_ape_unlock(tp, tp->phy_ape_lock);
1197 return ret;
1200 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1202 int err;
1204 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1205 if (err)
1206 goto done;
1208 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1209 if (err)
1210 goto done;
1212 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1213 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1214 if (err)
1215 goto done;
1217 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1219 done:
1220 return err;
1223 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1225 int err;
1227 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1228 if (err)
1229 goto done;
1231 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1232 if (err)
1233 goto done;
1235 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1236 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1237 if (err)
1238 goto done;
1240 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1242 done:
1243 return err;
1246 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1248 int err;
1250 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1251 if (!err)
1252 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1254 return err;
1257 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1259 int err;
1261 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1262 if (!err)
1263 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1265 return err;
1268 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1270 int err;
1272 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1273 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1274 MII_TG3_AUXCTL_SHDWSEL_MISC);
1275 if (!err)
1276 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1278 return err;
1281 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1283 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1284 set |= MII_TG3_AUXCTL_MISC_WREN;
1286 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1289 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1290 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1291 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1292 MII_TG3_AUXCTL_ACTL_TX_6DB)
1294 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1295 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1296 MII_TG3_AUXCTL_ACTL_TX_6DB);
1298 static int tg3_bmcr_reset(struct tg3 *tp)
1300 u32 phy_control;
1301 int limit, err;
1303 /* OK, reset it, and poll the BMCR_RESET bit until it
1304 * clears or we time out.
1306 phy_control = BMCR_RESET;
1307 err = tg3_writephy(tp, MII_BMCR, phy_control);
1308 if (err != 0)
1309 return -EBUSY;
1311 limit = 5000;
1312 while (limit--) {
1313 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1314 if (err != 0)
1315 return -EBUSY;
1317 if ((phy_control & BMCR_RESET) == 0) {
1318 udelay(40);
1319 break;
1321 udelay(10);
1323 if (limit < 0)
1324 return -EBUSY;
1326 return 0;
1329 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1331 struct tg3 *tp = bp->priv;
1332 u32 val;
1334 spin_lock_bh(&tp->lock);
1336 if (tg3_readphy(tp, reg, &val))
1337 val = -EIO;
1339 spin_unlock_bh(&tp->lock);
1341 return val;
1344 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1346 struct tg3 *tp = bp->priv;
1347 u32 ret = 0;
1349 spin_lock_bh(&tp->lock);
1351 if (tg3_writephy(tp, reg, val))
1352 ret = -EIO;
1354 spin_unlock_bh(&tp->lock);
1356 return ret;
1359 static int tg3_mdio_reset(struct mii_bus *bp)
1361 return 0;
1364 static void tg3_mdio_config_5785(struct tg3 *tp)
1366 u32 val;
1367 struct phy_device *phydev;
1369 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1370 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1371 case PHY_ID_BCM50610:
1372 case PHY_ID_BCM50610M:
1373 val = MAC_PHYCFG2_50610_LED_MODES;
1374 break;
1375 case PHY_ID_BCMAC131:
1376 val = MAC_PHYCFG2_AC131_LED_MODES;
1377 break;
1378 case PHY_ID_RTL8211C:
1379 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1380 break;
1381 case PHY_ID_RTL8201E:
1382 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1383 break;
1384 default:
1385 return;
1388 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1389 tw32(MAC_PHYCFG2, val);
1391 val = tr32(MAC_PHYCFG1);
1392 val &= ~(MAC_PHYCFG1_RGMII_INT |
1393 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1394 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1395 tw32(MAC_PHYCFG1, val);
1397 return;
1400 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1401 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1402 MAC_PHYCFG2_FMODE_MASK_MASK |
1403 MAC_PHYCFG2_GMODE_MASK_MASK |
1404 MAC_PHYCFG2_ACT_MASK_MASK |
1405 MAC_PHYCFG2_QUAL_MASK_MASK |
1406 MAC_PHYCFG2_INBAND_ENABLE;
1408 tw32(MAC_PHYCFG2, val);
1410 val = tr32(MAC_PHYCFG1);
1411 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1412 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1413 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1414 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1415 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1416 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1417 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1419 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1420 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1421 tw32(MAC_PHYCFG1, val);
1423 val = tr32(MAC_EXT_RGMII_MODE);
1424 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1425 MAC_RGMII_MODE_RX_QUALITY |
1426 MAC_RGMII_MODE_RX_ACTIVITY |
1427 MAC_RGMII_MODE_RX_ENG_DET |
1428 MAC_RGMII_MODE_TX_ENABLE |
1429 MAC_RGMII_MODE_TX_LOWPWR |
1430 MAC_RGMII_MODE_TX_RESET);
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1432 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1433 val |= MAC_RGMII_MODE_RX_INT_B |
1434 MAC_RGMII_MODE_RX_QUALITY |
1435 MAC_RGMII_MODE_RX_ACTIVITY |
1436 MAC_RGMII_MODE_RX_ENG_DET;
1437 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1438 val |= MAC_RGMII_MODE_TX_ENABLE |
1439 MAC_RGMII_MODE_TX_LOWPWR |
1440 MAC_RGMII_MODE_TX_RESET;
1442 tw32(MAC_EXT_RGMII_MODE, val);
1445 static void tg3_mdio_start(struct tg3 *tp)
1447 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1448 tw32_f(MAC_MI_MODE, tp->mi_mode);
1449 udelay(80);
1451 if (tg3_flag(tp, MDIOBUS_INITED) &&
1452 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1453 tg3_mdio_config_5785(tp);
1456 static int tg3_mdio_init(struct tg3 *tp)
1458 int i;
1459 u32 reg;
1460 struct phy_device *phydev;
1462 if (tg3_flag(tp, 5717_PLUS)) {
1463 u32 is_serdes;
1465 tp->phy_addr = tp->pci_fn + 1;
1467 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1468 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1469 else
1470 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1471 TG3_CPMU_PHY_STRAP_IS_SERDES;
1472 if (is_serdes)
1473 tp->phy_addr += 7;
1474 } else
1475 tp->phy_addr = TG3_PHY_MII_ADDR;
1477 tg3_mdio_start(tp);
1479 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1480 return 0;
1482 tp->mdio_bus = mdiobus_alloc();
1483 if (tp->mdio_bus == NULL)
1484 return -ENOMEM;
1486 tp->mdio_bus->name = "tg3 mdio bus";
1487 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1488 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1489 tp->mdio_bus->priv = tp;
1490 tp->mdio_bus->parent = &tp->pdev->dev;
1491 tp->mdio_bus->read = &tg3_mdio_read;
1492 tp->mdio_bus->write = &tg3_mdio_write;
1493 tp->mdio_bus->reset = &tg3_mdio_reset;
1494 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1495 tp->mdio_bus->irq = &tp->mdio_irq[0];
1497 for (i = 0; i < PHY_MAX_ADDR; i++)
1498 tp->mdio_bus->irq[i] = PHY_POLL;
1500 /* The bus registration will look for all the PHYs on the mdio bus.
1501 * Unfortunately, it does not ensure the PHY is powered up before
1502 * accessing the PHY ID registers. A chip reset is the
1503 * quickest way to bring the device back to an operational state..
1505 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1506 tg3_bmcr_reset(tp);
1508 i = mdiobus_register(tp->mdio_bus);
1509 if (i) {
1510 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1511 mdiobus_free(tp->mdio_bus);
1512 return i;
1515 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1517 if (!phydev || !phydev->drv) {
1518 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1519 mdiobus_unregister(tp->mdio_bus);
1520 mdiobus_free(tp->mdio_bus);
1521 return -ENODEV;
1524 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1525 case PHY_ID_BCM57780:
1526 phydev->interface = PHY_INTERFACE_MODE_GMII;
1527 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1528 break;
1529 case PHY_ID_BCM50610:
1530 case PHY_ID_BCM50610M:
1531 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1532 PHY_BRCM_RX_REFCLK_UNUSED |
1533 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1534 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1535 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1536 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1537 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1538 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1539 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1540 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1541 /* fallthru */
1542 case PHY_ID_RTL8211C:
1543 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1544 break;
1545 case PHY_ID_RTL8201E:
1546 case PHY_ID_BCMAC131:
1547 phydev->interface = PHY_INTERFACE_MODE_MII;
1548 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1549 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1550 break;
1553 tg3_flag_set(tp, MDIOBUS_INITED);
1555 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1556 tg3_mdio_config_5785(tp);
1558 return 0;
1561 static void tg3_mdio_fini(struct tg3 *tp)
1563 if (tg3_flag(tp, MDIOBUS_INITED)) {
1564 tg3_flag_clear(tp, MDIOBUS_INITED);
1565 mdiobus_unregister(tp->mdio_bus);
1566 mdiobus_free(tp->mdio_bus);
1570 /* tp->lock is held. */
1571 static inline void tg3_generate_fw_event(struct tg3 *tp)
1573 u32 val;
1575 val = tr32(GRC_RX_CPU_EVENT);
1576 val |= GRC_RX_CPU_DRIVER_EVENT;
1577 tw32_f(GRC_RX_CPU_EVENT, val);
1579 tp->last_event_jiffies = jiffies;
1582 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1584 /* tp->lock is held. */
1585 static void tg3_wait_for_event_ack(struct tg3 *tp)
1587 int i;
1588 unsigned int delay_cnt;
1589 long time_remain;
1591 /* If enough time has passed, no wait is necessary. */
1592 time_remain = (long)(tp->last_event_jiffies + 1 +
1593 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1594 (long)jiffies;
1595 if (time_remain < 0)
1596 return;
1598 /* Check if we can shorten the wait time. */
1599 delay_cnt = jiffies_to_usecs(time_remain);
1600 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1601 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1602 delay_cnt = (delay_cnt >> 3) + 1;
1604 for (i = 0; i < delay_cnt; i++) {
1605 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1606 break;
1607 udelay(8);
1611 /* tp->lock is held. */
1612 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1614 u32 reg, val;
1616 val = 0;
1617 if (!tg3_readphy(tp, MII_BMCR, &reg))
1618 val = reg << 16;
1619 if (!tg3_readphy(tp, MII_BMSR, &reg))
1620 val |= (reg & 0xffff);
1621 *data++ = val;
1623 val = 0;
1624 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1625 val = reg << 16;
1626 if (!tg3_readphy(tp, MII_LPA, &reg))
1627 val |= (reg & 0xffff);
1628 *data++ = val;
1630 val = 0;
1631 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1632 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1633 val = reg << 16;
1634 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1635 val |= (reg & 0xffff);
1637 *data++ = val;
1639 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1640 val = reg << 16;
1641 else
1642 val = 0;
1643 *data++ = val;
1646 /* tp->lock is held. */
1647 static void tg3_ump_link_report(struct tg3 *tp)
1649 u32 data[4];
1651 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1652 return;
1654 tg3_phy_gather_ump_data(tp, data);
1656 tg3_wait_for_event_ack(tp);
1658 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1659 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1660 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1661 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1662 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1663 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1665 tg3_generate_fw_event(tp);
1668 /* tp->lock is held. */
1669 static void tg3_stop_fw(struct tg3 *tp)
1671 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1672 /* Wait for RX cpu to ACK the previous event. */
1673 tg3_wait_for_event_ack(tp);
1675 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1677 tg3_generate_fw_event(tp);
1679 /* Wait for RX cpu to ACK this event. */
1680 tg3_wait_for_event_ack(tp);
1684 /* tp->lock is held. */
1685 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1687 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1688 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1690 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1691 switch (kind) {
1692 case RESET_KIND_INIT:
1693 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1694 DRV_STATE_START);
1695 break;
1697 case RESET_KIND_SHUTDOWN:
1698 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1699 DRV_STATE_UNLOAD);
1700 break;
1702 case RESET_KIND_SUSPEND:
1703 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1704 DRV_STATE_SUSPEND);
1705 break;
1707 default:
1708 break;
1712 if (kind == RESET_KIND_INIT ||
1713 kind == RESET_KIND_SUSPEND)
1714 tg3_ape_driver_state_change(tp, kind);
1717 /* tp->lock is held. */
1718 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1720 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1721 switch (kind) {
1722 case RESET_KIND_INIT:
1723 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1724 DRV_STATE_START_DONE);
1725 break;
1727 case RESET_KIND_SHUTDOWN:
1728 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1729 DRV_STATE_UNLOAD_DONE);
1730 break;
1732 default:
1733 break;
1737 if (kind == RESET_KIND_SHUTDOWN)
1738 tg3_ape_driver_state_change(tp, kind);
1741 /* tp->lock is held. */
1742 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1744 if (tg3_flag(tp, ENABLE_ASF)) {
1745 switch (kind) {
1746 case RESET_KIND_INIT:
1747 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748 DRV_STATE_START);
1749 break;
1751 case RESET_KIND_SHUTDOWN:
1752 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753 DRV_STATE_UNLOAD);
1754 break;
1756 case RESET_KIND_SUSPEND:
1757 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1758 DRV_STATE_SUSPEND);
1759 break;
1761 default:
1762 break;
1767 static int tg3_poll_fw(struct tg3 *tp)
1769 int i;
1770 u32 val;
1772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1773 /* Wait up to 20ms for init done. */
1774 for (i = 0; i < 200; i++) {
1775 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1776 return 0;
1777 udelay(100);
1779 return -ENODEV;
1782 /* Wait for firmware initialization to complete. */
1783 for (i = 0; i < 100000; i++) {
1784 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1785 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1786 break;
1787 udelay(10);
1790 /* Chip might not be fitted with firmware. Some Sun onboard
1791 * parts are configured like that. So don't signal the timeout
1792 * of the above loop as an error, but do report the lack of
1793 * running firmware once.
1795 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1796 tg3_flag_set(tp, NO_FWARE_REPORTED);
1798 netdev_info(tp->dev, "No firmware running\n");
1801 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1802 /* The 57765 A0 needs a little more
1803 * time to do some important work.
1805 mdelay(10);
1808 return 0;
1811 static void tg3_link_report(struct tg3 *tp)
1813 if (!netif_carrier_ok(tp->dev)) {
1814 netif_info(tp, link, tp->dev, "Link is down\n");
1815 tg3_ump_link_report(tp);
1816 } else if (netif_msg_link(tp)) {
1817 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1818 (tp->link_config.active_speed == SPEED_1000 ?
1819 1000 :
1820 (tp->link_config.active_speed == SPEED_100 ?
1821 100 : 10)),
1822 (tp->link_config.active_duplex == DUPLEX_FULL ?
1823 "full" : "half"));
1825 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1826 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1827 "on" : "off",
1828 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1829 "on" : "off");
1831 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1832 netdev_info(tp->dev, "EEE is %s\n",
1833 tp->setlpicnt ? "enabled" : "disabled");
1835 tg3_ump_link_report(tp);
1839 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1841 u16 miireg;
1843 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1844 miireg = ADVERTISE_1000XPAUSE;
1845 else if (flow_ctrl & FLOW_CTRL_TX)
1846 miireg = ADVERTISE_1000XPSE_ASYM;
1847 else if (flow_ctrl & FLOW_CTRL_RX)
1848 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1849 else
1850 miireg = 0;
1852 return miireg;
1855 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1857 u8 cap = 0;
1859 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1860 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1861 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1862 if (lcladv & ADVERTISE_1000XPAUSE)
1863 cap = FLOW_CTRL_RX;
1864 if (rmtadv & ADVERTISE_1000XPAUSE)
1865 cap = FLOW_CTRL_TX;
1868 return cap;
1871 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1873 u8 autoneg;
1874 u8 flowctrl = 0;
1875 u32 old_rx_mode = tp->rx_mode;
1876 u32 old_tx_mode = tp->tx_mode;
1878 if (tg3_flag(tp, USE_PHYLIB))
1879 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1880 else
1881 autoneg = tp->link_config.autoneg;
1883 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1884 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1885 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1886 else
1887 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1888 } else
1889 flowctrl = tp->link_config.flowctrl;
1891 tp->link_config.active_flowctrl = flowctrl;
1893 if (flowctrl & FLOW_CTRL_RX)
1894 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1895 else
1896 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1898 if (old_rx_mode != tp->rx_mode)
1899 tw32_f(MAC_RX_MODE, tp->rx_mode);
1901 if (flowctrl & FLOW_CTRL_TX)
1902 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1903 else
1904 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1906 if (old_tx_mode != tp->tx_mode)
1907 tw32_f(MAC_TX_MODE, tp->tx_mode);
1910 static void tg3_adjust_link(struct net_device *dev)
1912 u8 oldflowctrl, linkmesg = 0;
1913 u32 mac_mode, lcl_adv, rmt_adv;
1914 struct tg3 *tp = netdev_priv(dev);
1915 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1917 spin_lock_bh(&tp->lock);
1919 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1920 MAC_MODE_HALF_DUPLEX);
1922 oldflowctrl = tp->link_config.active_flowctrl;
1924 if (phydev->link) {
1925 lcl_adv = 0;
1926 rmt_adv = 0;
1928 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1929 mac_mode |= MAC_MODE_PORT_MODE_MII;
1930 else if (phydev->speed == SPEED_1000 ||
1931 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1932 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1933 else
1934 mac_mode |= MAC_MODE_PORT_MODE_MII;
1936 if (phydev->duplex == DUPLEX_HALF)
1937 mac_mode |= MAC_MODE_HALF_DUPLEX;
1938 else {
1939 lcl_adv = mii_advertise_flowctrl(
1940 tp->link_config.flowctrl);
1942 if (phydev->pause)
1943 rmt_adv = LPA_PAUSE_CAP;
1944 if (phydev->asym_pause)
1945 rmt_adv |= LPA_PAUSE_ASYM;
1948 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1949 } else
1950 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1952 if (mac_mode != tp->mac_mode) {
1953 tp->mac_mode = mac_mode;
1954 tw32_f(MAC_MODE, tp->mac_mode);
1955 udelay(40);
1958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1959 if (phydev->speed == SPEED_10)
1960 tw32(MAC_MI_STAT,
1961 MAC_MI_STAT_10MBPS_MODE |
1962 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1963 else
1964 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1967 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1968 tw32(MAC_TX_LENGTHS,
1969 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1970 (6 << TX_LENGTHS_IPG_SHIFT) |
1971 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1972 else
1973 tw32(MAC_TX_LENGTHS,
1974 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1975 (6 << TX_LENGTHS_IPG_SHIFT) |
1976 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1978 if (phydev->link != tp->old_link ||
1979 phydev->speed != tp->link_config.active_speed ||
1980 phydev->duplex != tp->link_config.active_duplex ||
1981 oldflowctrl != tp->link_config.active_flowctrl)
1982 linkmesg = 1;
1984 tp->old_link = phydev->link;
1985 tp->link_config.active_speed = phydev->speed;
1986 tp->link_config.active_duplex = phydev->duplex;
1988 spin_unlock_bh(&tp->lock);
1990 if (linkmesg)
1991 tg3_link_report(tp);
1994 static int tg3_phy_init(struct tg3 *tp)
1996 struct phy_device *phydev;
1998 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1999 return 0;
2001 /* Bring the PHY back to a known state. */
2002 tg3_bmcr_reset(tp);
2004 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2006 /* Attach the MAC to the PHY. */
2007 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
2008 phydev->dev_flags, phydev->interface);
2009 if (IS_ERR(phydev)) {
2010 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2011 return PTR_ERR(phydev);
2014 /* Mask with MAC supported features. */
2015 switch (phydev->interface) {
2016 case PHY_INTERFACE_MODE_GMII:
2017 case PHY_INTERFACE_MODE_RGMII:
2018 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2019 phydev->supported &= (PHY_GBIT_FEATURES |
2020 SUPPORTED_Pause |
2021 SUPPORTED_Asym_Pause);
2022 break;
2024 /* fallthru */
2025 case PHY_INTERFACE_MODE_MII:
2026 phydev->supported &= (PHY_BASIC_FEATURES |
2027 SUPPORTED_Pause |
2028 SUPPORTED_Asym_Pause);
2029 break;
2030 default:
2031 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2032 return -EINVAL;
2035 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2037 phydev->advertising = phydev->supported;
2039 return 0;
2042 static void tg3_phy_start(struct tg3 *tp)
2044 struct phy_device *phydev;
2046 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2047 return;
2049 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2051 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2052 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2053 phydev->speed = tp->link_config.speed;
2054 phydev->duplex = tp->link_config.duplex;
2055 phydev->autoneg = tp->link_config.autoneg;
2056 phydev->advertising = tp->link_config.advertising;
2059 phy_start(phydev);
2061 phy_start_aneg(phydev);
2064 static void tg3_phy_stop(struct tg3 *tp)
2066 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2067 return;
2069 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2072 static void tg3_phy_fini(struct tg3 *tp)
2074 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2075 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2076 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2080 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2082 int err;
2083 u32 val;
2085 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2086 return 0;
2088 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2089 /* Cannot do read-modify-write on 5401 */
2090 err = tg3_phy_auxctl_write(tp,
2091 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2092 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2093 0x4c20);
2094 goto done;
2097 err = tg3_phy_auxctl_read(tp,
2098 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2099 if (err)
2100 return err;
2102 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2103 err = tg3_phy_auxctl_write(tp,
2104 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2106 done:
2107 return err;
2110 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2112 u32 phytest;
2114 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2115 u32 phy;
2117 tg3_writephy(tp, MII_TG3_FET_TEST,
2118 phytest | MII_TG3_FET_SHADOW_EN);
2119 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2120 if (enable)
2121 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2122 else
2123 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2124 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2126 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2130 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2132 u32 reg;
2134 if (!tg3_flag(tp, 5705_PLUS) ||
2135 (tg3_flag(tp, 5717_PLUS) &&
2136 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2137 return;
2139 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2140 tg3_phy_fet_toggle_apd(tp, enable);
2141 return;
2144 reg = MII_TG3_MISC_SHDW_WREN |
2145 MII_TG3_MISC_SHDW_SCR5_SEL |
2146 MII_TG3_MISC_SHDW_SCR5_LPED |
2147 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2148 MII_TG3_MISC_SHDW_SCR5_SDTL |
2149 MII_TG3_MISC_SHDW_SCR5_C125OE;
2150 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2151 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2153 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2156 reg = MII_TG3_MISC_SHDW_WREN |
2157 MII_TG3_MISC_SHDW_APD_SEL |
2158 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2159 if (enable)
2160 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2162 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2165 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2167 u32 phy;
2169 if (!tg3_flag(tp, 5705_PLUS) ||
2170 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2171 return;
2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2174 u32 ephy;
2176 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2177 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2179 tg3_writephy(tp, MII_TG3_FET_TEST,
2180 ephy | MII_TG3_FET_SHADOW_EN);
2181 if (!tg3_readphy(tp, reg, &phy)) {
2182 if (enable)
2183 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2184 else
2185 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2186 tg3_writephy(tp, reg, phy);
2188 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2190 } else {
2191 int ret;
2193 ret = tg3_phy_auxctl_read(tp,
2194 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2195 if (!ret) {
2196 if (enable)
2197 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2198 else
2199 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2200 tg3_phy_auxctl_write(tp,
2201 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2206 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2208 int ret;
2209 u32 val;
2211 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2212 return;
2214 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2215 if (!ret)
2216 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2217 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2220 static void tg3_phy_apply_otp(struct tg3 *tp)
2222 u32 otp, phy;
2224 if (!tp->phy_otp)
2225 return;
2227 otp = tp->phy_otp;
2229 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2230 return;
2232 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2233 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2234 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2236 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2237 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2238 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2240 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2241 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2242 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2244 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2245 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2247 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2248 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2250 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2251 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2252 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2254 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2257 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2259 u32 val;
2261 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2262 return;
2264 tp->setlpicnt = 0;
2266 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2267 current_link_up == 1 &&
2268 tp->link_config.active_duplex == DUPLEX_FULL &&
2269 (tp->link_config.active_speed == SPEED_100 ||
2270 tp->link_config.active_speed == SPEED_1000)) {
2271 u32 eeectl;
2273 if (tp->link_config.active_speed == SPEED_1000)
2274 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2275 else
2276 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2278 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2280 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2281 TG3_CL45_D7_EEERES_STAT, &val);
2283 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2284 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2285 tp->setlpicnt = 2;
2288 if (!tp->setlpicnt) {
2289 if (current_link_up == 1 &&
2290 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2291 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2292 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2295 val = tr32(TG3_CPMU_EEE_MODE);
2296 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2300 static void tg3_phy_eee_enable(struct tg3 *tp)
2302 u32 val;
2304 if (tp->link_config.active_speed == SPEED_1000 &&
2305 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2306 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2307 tg3_flag(tp, 57765_CLASS)) &&
2308 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2309 val = MII_TG3_DSP_TAP26_ALNOKO |
2310 MII_TG3_DSP_TAP26_RMRXSTO;
2311 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2312 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2315 val = tr32(TG3_CPMU_EEE_MODE);
2316 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2319 static int tg3_wait_macro_done(struct tg3 *tp)
2321 int limit = 100;
2323 while (limit--) {
2324 u32 tmp32;
2326 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2327 if ((tmp32 & 0x1000) == 0)
2328 break;
2331 if (limit < 0)
2332 return -EBUSY;
2334 return 0;
2337 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2339 static const u32 test_pat[4][6] = {
2340 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2341 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2342 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2343 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2345 int chan;
2347 for (chan = 0; chan < 4; chan++) {
2348 int i;
2350 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2351 (chan * 0x2000) | 0x0200);
2352 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2354 for (i = 0; i < 6; i++)
2355 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2356 test_pat[chan][i]);
2358 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2359 if (tg3_wait_macro_done(tp)) {
2360 *resetp = 1;
2361 return -EBUSY;
2364 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2365 (chan * 0x2000) | 0x0200);
2366 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2367 if (tg3_wait_macro_done(tp)) {
2368 *resetp = 1;
2369 return -EBUSY;
2372 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2373 if (tg3_wait_macro_done(tp)) {
2374 *resetp = 1;
2375 return -EBUSY;
2378 for (i = 0; i < 6; i += 2) {
2379 u32 low, high;
2381 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2382 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2383 tg3_wait_macro_done(tp)) {
2384 *resetp = 1;
2385 return -EBUSY;
2387 low &= 0x7fff;
2388 high &= 0x000f;
2389 if (low != test_pat[chan][i] ||
2390 high != test_pat[chan][i+1]) {
2391 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2392 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2393 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2395 return -EBUSY;
2400 return 0;
2403 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2405 int chan;
2407 for (chan = 0; chan < 4; chan++) {
2408 int i;
2410 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2411 (chan * 0x2000) | 0x0200);
2412 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2413 for (i = 0; i < 6; i++)
2414 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2415 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2416 if (tg3_wait_macro_done(tp))
2417 return -EBUSY;
2420 return 0;
2423 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2425 u32 reg32, phy9_orig;
2426 int retries, do_phy_reset, err;
2428 retries = 10;
2429 do_phy_reset = 1;
2430 do {
2431 if (do_phy_reset) {
2432 err = tg3_bmcr_reset(tp);
2433 if (err)
2434 return err;
2435 do_phy_reset = 0;
2438 /* Disable transmitter and interrupt. */
2439 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2440 continue;
2442 reg32 |= 0x3000;
2443 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2445 /* Set full-duplex, 1000 mbps. */
2446 tg3_writephy(tp, MII_BMCR,
2447 BMCR_FULLDPLX | BMCR_SPEED1000);
2449 /* Set to master mode. */
2450 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2451 continue;
2453 tg3_writephy(tp, MII_CTRL1000,
2454 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2456 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2457 if (err)
2458 return err;
2460 /* Block the PHY control access. */
2461 tg3_phydsp_write(tp, 0x8005, 0x0800);
2463 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2464 if (!err)
2465 break;
2466 } while (--retries);
2468 err = tg3_phy_reset_chanpat(tp);
2469 if (err)
2470 return err;
2472 tg3_phydsp_write(tp, 0x8005, 0x0000);
2474 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2475 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2477 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2479 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2481 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2482 reg32 &= ~0x3000;
2483 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2484 } else if (!err)
2485 err = -EBUSY;
2487 return err;
2490 static void tg3_carrier_on(struct tg3 *tp)
2492 netif_carrier_on(tp->dev);
2493 tp->link_up = true;
2496 static void tg3_carrier_off(struct tg3 *tp)
2498 netif_carrier_off(tp->dev);
2499 tp->link_up = false;
2502 /* This will reset the tigon3 PHY if there is no valid
2503 * link unless the FORCE argument is non-zero.
2505 static int tg3_phy_reset(struct tg3 *tp)
2507 u32 val, cpmuctrl;
2508 int err;
2510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2511 val = tr32(GRC_MISC_CFG);
2512 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2513 udelay(40);
2515 err = tg3_readphy(tp, MII_BMSR, &val);
2516 err |= tg3_readphy(tp, MII_BMSR, &val);
2517 if (err != 0)
2518 return -EBUSY;
2520 if (netif_running(tp->dev) && tp->link_up) {
2521 tg3_carrier_off(tp);
2522 tg3_link_report(tp);
2525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2528 err = tg3_phy_reset_5703_4_5(tp);
2529 if (err)
2530 return err;
2531 goto out;
2534 cpmuctrl = 0;
2535 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2536 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2537 cpmuctrl = tr32(TG3_CPMU_CTRL);
2538 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2539 tw32(TG3_CPMU_CTRL,
2540 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2543 err = tg3_bmcr_reset(tp);
2544 if (err)
2545 return err;
2547 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2548 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2549 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2551 tw32(TG3_CPMU_CTRL, cpmuctrl);
2554 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2555 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2556 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2557 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2558 CPMU_LSPD_1000MB_MACCLK_12_5) {
2559 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2560 udelay(40);
2561 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2565 if (tg3_flag(tp, 5717_PLUS) &&
2566 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2567 return 0;
2569 tg3_phy_apply_otp(tp);
2571 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2572 tg3_phy_toggle_apd(tp, true);
2573 else
2574 tg3_phy_toggle_apd(tp, false);
2576 out:
2577 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2578 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2579 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2580 tg3_phydsp_write(tp, 0x000a, 0x0323);
2581 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2584 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2585 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2586 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2589 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2590 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2591 tg3_phydsp_write(tp, 0x000a, 0x310b);
2592 tg3_phydsp_write(tp, 0x201f, 0x9506);
2593 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2594 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2596 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2597 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2598 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2599 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2600 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2601 tg3_writephy(tp, MII_TG3_TEST1,
2602 MII_TG3_TEST1_TRIM_EN | 0x4);
2603 } else
2604 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2606 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2610 /* Set Extended packet length bit (bit 14) on all chips that */
2611 /* support jumbo frames */
2612 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2613 /* Cannot do read-modify-write on 5401 */
2614 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2615 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2616 /* Set bit 14 with read-modify-write to preserve other bits */
2617 err = tg3_phy_auxctl_read(tp,
2618 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2619 if (!err)
2620 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2621 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2624 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2625 * jumbo frames transmission.
2627 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2628 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2629 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2630 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2633 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2634 /* adjust output voltage */
2635 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2638 if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
2639 tg3_phydsp_write(tp, 0xffb, 0x4000);
2641 tg3_phy_toggle_automdix(tp, 1);
2642 tg3_phy_set_wirespeed(tp);
2643 return 0;
2646 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2647 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2648 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2649 TG3_GPIO_MSG_NEED_VAUX)
2650 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2651 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2652 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2653 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2654 (TG3_GPIO_MSG_DRVR_PRES << 12))
2656 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2657 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2658 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2659 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2660 (TG3_GPIO_MSG_NEED_VAUX << 12))
2662 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2664 u32 status, shift;
2666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2667 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2668 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2669 else
2670 status = tr32(TG3_CPMU_DRV_STATUS);
2672 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2673 status &= ~(TG3_GPIO_MSG_MASK << shift);
2674 status |= (newstat << shift);
2676 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2677 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2678 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2679 else
2680 tw32(TG3_CPMU_DRV_STATUS, status);
2682 return status >> TG3_APE_GPIO_MSG_SHIFT;
2685 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2687 if (!tg3_flag(tp, IS_NIC))
2688 return 0;
2690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2692 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2693 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2694 return -EIO;
2696 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2698 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2699 TG3_GRC_LCLCTL_PWRSW_DELAY);
2701 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2702 } else {
2703 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2704 TG3_GRC_LCLCTL_PWRSW_DELAY);
2707 return 0;
2710 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2712 u32 grc_local_ctrl;
2714 if (!tg3_flag(tp, IS_NIC) ||
2715 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2716 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2717 return;
2719 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2721 tw32_wait_f(GRC_LOCAL_CTRL,
2722 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2723 TG3_GRC_LCLCTL_PWRSW_DELAY);
2725 tw32_wait_f(GRC_LOCAL_CTRL,
2726 grc_local_ctrl,
2727 TG3_GRC_LCLCTL_PWRSW_DELAY);
2729 tw32_wait_f(GRC_LOCAL_CTRL,
2730 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2731 TG3_GRC_LCLCTL_PWRSW_DELAY);
2734 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2736 if (!tg3_flag(tp, IS_NIC))
2737 return;
2739 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2740 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2741 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2742 (GRC_LCLCTRL_GPIO_OE0 |
2743 GRC_LCLCTRL_GPIO_OE1 |
2744 GRC_LCLCTRL_GPIO_OE2 |
2745 GRC_LCLCTRL_GPIO_OUTPUT0 |
2746 GRC_LCLCTRL_GPIO_OUTPUT1),
2747 TG3_GRC_LCLCTL_PWRSW_DELAY);
2748 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2749 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2750 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2751 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2752 GRC_LCLCTRL_GPIO_OE1 |
2753 GRC_LCLCTRL_GPIO_OE2 |
2754 GRC_LCLCTRL_GPIO_OUTPUT0 |
2755 GRC_LCLCTRL_GPIO_OUTPUT1 |
2756 tp->grc_local_ctrl;
2757 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2758 TG3_GRC_LCLCTL_PWRSW_DELAY);
2760 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2761 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY);
2764 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2765 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2767 } else {
2768 u32 no_gpio2;
2769 u32 grc_local_ctrl = 0;
2771 /* Workaround to prevent overdrawing Amps. */
2772 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2773 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2774 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2775 grc_local_ctrl,
2776 TG3_GRC_LCLCTL_PWRSW_DELAY);
2779 /* On 5753 and variants, GPIO2 cannot be used. */
2780 no_gpio2 = tp->nic_sram_data_cfg &
2781 NIC_SRAM_DATA_CFG_NO_GPIO2;
2783 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2784 GRC_LCLCTRL_GPIO_OE1 |
2785 GRC_LCLCTRL_GPIO_OE2 |
2786 GRC_LCLCTRL_GPIO_OUTPUT1 |
2787 GRC_LCLCTRL_GPIO_OUTPUT2;
2788 if (no_gpio2) {
2789 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2790 GRC_LCLCTRL_GPIO_OUTPUT2);
2792 tw32_wait_f(GRC_LOCAL_CTRL,
2793 tp->grc_local_ctrl | grc_local_ctrl,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY);
2796 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2798 tw32_wait_f(GRC_LOCAL_CTRL,
2799 tp->grc_local_ctrl | grc_local_ctrl,
2800 TG3_GRC_LCLCTL_PWRSW_DELAY);
2802 if (!no_gpio2) {
2803 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2804 tw32_wait_f(GRC_LOCAL_CTRL,
2805 tp->grc_local_ctrl | grc_local_ctrl,
2806 TG3_GRC_LCLCTL_PWRSW_DELAY);
2811 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2813 u32 msg = 0;
2815 /* Serialize power state transitions */
2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2817 return;
2819 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2820 msg = TG3_GPIO_MSG_NEED_VAUX;
2822 msg = tg3_set_function_status(tp, msg);
2824 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2825 goto done;
2827 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2828 tg3_pwrsrc_switch_to_vaux(tp);
2829 else
2830 tg3_pwrsrc_die_with_vmain(tp);
2832 done:
2833 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2836 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2838 bool need_vaux = false;
2840 /* The GPIOs do something completely different on 57765. */
2841 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2842 return;
2844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2847 tg3_frob_aux_power_5717(tp, include_wol ?
2848 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2849 return;
2852 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2853 struct net_device *dev_peer;
2855 dev_peer = pci_get_drvdata(tp->pdev_peer);
2857 /* remove_one() may have been run on the peer. */
2858 if (dev_peer) {
2859 struct tg3 *tp_peer = netdev_priv(dev_peer);
2861 if (tg3_flag(tp_peer, INIT_COMPLETE))
2862 return;
2864 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2865 tg3_flag(tp_peer, ENABLE_ASF))
2866 need_vaux = true;
2870 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2871 tg3_flag(tp, ENABLE_ASF))
2872 need_vaux = true;
2874 if (need_vaux)
2875 tg3_pwrsrc_switch_to_vaux(tp);
2876 else
2877 tg3_pwrsrc_die_with_vmain(tp);
2880 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2882 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2883 return 1;
2884 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2885 if (speed != SPEED_10)
2886 return 1;
2887 } else if (speed == SPEED_10)
2888 return 1;
2890 return 0;
2893 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2895 u32 val;
2897 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2899 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2900 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2902 sg_dig_ctrl |=
2903 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2904 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2905 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2907 return;
2910 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2911 tg3_bmcr_reset(tp);
2912 val = tr32(GRC_MISC_CFG);
2913 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2914 udelay(40);
2915 return;
2916 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2917 u32 phytest;
2918 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2919 u32 phy;
2921 tg3_writephy(tp, MII_ADVERTISE, 0);
2922 tg3_writephy(tp, MII_BMCR,
2923 BMCR_ANENABLE | BMCR_ANRESTART);
2925 tg3_writephy(tp, MII_TG3_FET_TEST,
2926 phytest | MII_TG3_FET_SHADOW_EN);
2927 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2928 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2929 tg3_writephy(tp,
2930 MII_TG3_FET_SHDW_AUXMODE4,
2931 phy);
2933 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2935 return;
2936 } else if (do_low_power) {
2937 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2938 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2940 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2941 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2942 MII_TG3_AUXCTL_PCTL_VREG_11V;
2943 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2946 /* The PHY should not be powered down on some chips because
2947 * of bugs.
2949 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2950 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2951 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2952 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2953 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2954 !tp->pci_fn))
2955 return;
2957 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2958 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2959 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2960 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2961 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2962 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2965 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2968 /* tp->lock is held. */
2969 static int tg3_nvram_lock(struct tg3 *tp)
2971 if (tg3_flag(tp, NVRAM)) {
2972 int i;
2974 if (tp->nvram_lock_cnt == 0) {
2975 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2976 for (i = 0; i < 8000; i++) {
2977 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2978 break;
2979 udelay(20);
2981 if (i == 8000) {
2982 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2983 return -ENODEV;
2986 tp->nvram_lock_cnt++;
2988 return 0;
2991 /* tp->lock is held. */
2992 static void tg3_nvram_unlock(struct tg3 *tp)
2994 if (tg3_flag(tp, NVRAM)) {
2995 if (tp->nvram_lock_cnt > 0)
2996 tp->nvram_lock_cnt--;
2997 if (tp->nvram_lock_cnt == 0)
2998 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3002 /* tp->lock is held. */
3003 static void tg3_enable_nvram_access(struct tg3 *tp)
3005 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3006 u32 nvaccess = tr32(NVRAM_ACCESS);
3008 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3012 /* tp->lock is held. */
3013 static void tg3_disable_nvram_access(struct tg3 *tp)
3015 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3016 u32 nvaccess = tr32(NVRAM_ACCESS);
3018 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3022 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3023 u32 offset, u32 *val)
3025 u32 tmp;
3026 int i;
3028 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3029 return -EINVAL;
3031 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3032 EEPROM_ADDR_DEVID_MASK |
3033 EEPROM_ADDR_READ);
3034 tw32(GRC_EEPROM_ADDR,
3035 tmp |
3036 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3037 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3038 EEPROM_ADDR_ADDR_MASK) |
3039 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3041 for (i = 0; i < 1000; i++) {
3042 tmp = tr32(GRC_EEPROM_ADDR);
3044 if (tmp & EEPROM_ADDR_COMPLETE)
3045 break;
3046 msleep(1);
3048 if (!(tmp & EEPROM_ADDR_COMPLETE))
3049 return -EBUSY;
3051 tmp = tr32(GRC_EEPROM_DATA);
3054 * The data will always be opposite the native endian
3055 * format. Perform a blind byteswap to compensate.
3057 *val = swab32(tmp);
3059 return 0;
3062 #define NVRAM_CMD_TIMEOUT 10000
3064 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3066 int i;
3068 tw32(NVRAM_CMD, nvram_cmd);
3069 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3070 udelay(10);
3071 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3072 udelay(10);
3073 break;
3077 if (i == NVRAM_CMD_TIMEOUT)
3078 return -EBUSY;
3080 return 0;
3083 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3085 if (tg3_flag(tp, NVRAM) &&
3086 tg3_flag(tp, NVRAM_BUFFERED) &&
3087 tg3_flag(tp, FLASH) &&
3088 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3089 (tp->nvram_jedecnum == JEDEC_ATMEL))
3091 addr = ((addr / tp->nvram_pagesize) <<
3092 ATMEL_AT45DB0X1B_PAGE_POS) +
3093 (addr % tp->nvram_pagesize);
3095 return addr;
3098 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3100 if (tg3_flag(tp, NVRAM) &&
3101 tg3_flag(tp, NVRAM_BUFFERED) &&
3102 tg3_flag(tp, FLASH) &&
3103 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3104 (tp->nvram_jedecnum == JEDEC_ATMEL))
3106 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3107 tp->nvram_pagesize) +
3108 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3110 return addr;
3113 /* NOTE: Data read in from NVRAM is byteswapped according to
3114 * the byteswapping settings for all other register accesses.
3115 * tg3 devices are BE devices, so on a BE machine, the data
3116 * returned will be exactly as it is seen in NVRAM. On a LE
3117 * machine, the 32-bit value will be byteswapped.
3119 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3121 int ret;
3123 if (!tg3_flag(tp, NVRAM))
3124 return tg3_nvram_read_using_eeprom(tp, offset, val);
3126 offset = tg3_nvram_phys_addr(tp, offset);
3128 if (offset > NVRAM_ADDR_MSK)
3129 return -EINVAL;
3131 ret = tg3_nvram_lock(tp);
3132 if (ret)
3133 return ret;
3135 tg3_enable_nvram_access(tp);
3137 tw32(NVRAM_ADDR, offset);
3138 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3139 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3141 if (ret == 0)
3142 *val = tr32(NVRAM_RDDATA);
3144 tg3_disable_nvram_access(tp);
3146 tg3_nvram_unlock(tp);
3148 return ret;
3151 /* Ensures NVRAM data is in bytestream format. */
3152 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3154 u32 v;
3155 int res = tg3_nvram_read(tp, offset, &v);
3156 if (!res)
3157 *val = cpu_to_be32(v);
3158 return res;
3161 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3162 u32 offset, u32 len, u8 *buf)
3164 int i, j, rc = 0;
3165 u32 val;
3167 for (i = 0; i < len; i += 4) {
3168 u32 addr;
3169 __be32 data;
3171 addr = offset + i;
3173 memcpy(&data, buf + i, 4);
3176 * The SEEPROM interface expects the data to always be opposite
3177 * the native endian format. We accomplish this by reversing
3178 * all the operations that would have been performed on the
3179 * data from a call to tg3_nvram_read_be32().
3181 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3183 val = tr32(GRC_EEPROM_ADDR);
3184 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3186 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3187 EEPROM_ADDR_READ);
3188 tw32(GRC_EEPROM_ADDR, val |
3189 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3190 (addr & EEPROM_ADDR_ADDR_MASK) |
3191 EEPROM_ADDR_START |
3192 EEPROM_ADDR_WRITE);
3194 for (j = 0; j < 1000; j++) {
3195 val = tr32(GRC_EEPROM_ADDR);
3197 if (val & EEPROM_ADDR_COMPLETE)
3198 break;
3199 msleep(1);
3201 if (!(val & EEPROM_ADDR_COMPLETE)) {
3202 rc = -EBUSY;
3203 break;
3207 return rc;
3210 /* offset and length are dword aligned */
3211 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3212 u8 *buf)
3214 int ret = 0;
3215 u32 pagesize = tp->nvram_pagesize;
3216 u32 pagemask = pagesize - 1;
3217 u32 nvram_cmd;
3218 u8 *tmp;
3220 tmp = kmalloc(pagesize, GFP_KERNEL);
3221 if (tmp == NULL)
3222 return -ENOMEM;
3224 while (len) {
3225 int j;
3226 u32 phy_addr, page_off, size;
3228 phy_addr = offset & ~pagemask;
3230 for (j = 0; j < pagesize; j += 4) {
3231 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3232 (__be32 *) (tmp + j));
3233 if (ret)
3234 break;
3236 if (ret)
3237 break;
3239 page_off = offset & pagemask;
3240 size = pagesize;
3241 if (len < size)
3242 size = len;
3244 len -= size;
3246 memcpy(tmp + page_off, buf, size);
3248 offset = offset + (pagesize - page_off);
3250 tg3_enable_nvram_access(tp);
3253 * Before we can erase the flash page, we need
3254 * to issue a special "write enable" command.
3256 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3258 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3259 break;
3261 /* Erase the target page */
3262 tw32(NVRAM_ADDR, phy_addr);
3264 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3265 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3267 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3268 break;
3270 /* Issue another write enable to start the write. */
3271 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3273 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3274 break;
3276 for (j = 0; j < pagesize; j += 4) {
3277 __be32 data;
3279 data = *((__be32 *) (tmp + j));
3281 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3283 tw32(NVRAM_ADDR, phy_addr + j);
3285 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3286 NVRAM_CMD_WR;
3288 if (j == 0)
3289 nvram_cmd |= NVRAM_CMD_FIRST;
3290 else if (j == (pagesize - 4))
3291 nvram_cmd |= NVRAM_CMD_LAST;
3293 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3294 if (ret)
3295 break;
3297 if (ret)
3298 break;
3301 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3302 tg3_nvram_exec_cmd(tp, nvram_cmd);
3304 kfree(tmp);
3306 return ret;
3309 /* offset and length are dword aligned */
3310 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3311 u8 *buf)
3313 int i, ret = 0;
3315 for (i = 0; i < len; i += 4, offset += 4) {
3316 u32 page_off, phy_addr, nvram_cmd;
3317 __be32 data;
3319 memcpy(&data, buf + i, 4);
3320 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3322 page_off = offset % tp->nvram_pagesize;
3324 phy_addr = tg3_nvram_phys_addr(tp, offset);
3326 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3328 if (page_off == 0 || i == 0)
3329 nvram_cmd |= NVRAM_CMD_FIRST;
3330 if (page_off == (tp->nvram_pagesize - 4))
3331 nvram_cmd |= NVRAM_CMD_LAST;
3333 if (i == (len - 4))
3334 nvram_cmd |= NVRAM_CMD_LAST;
3336 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3337 !tg3_flag(tp, FLASH) ||
3338 !tg3_flag(tp, 57765_PLUS))
3339 tw32(NVRAM_ADDR, phy_addr);
3341 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3342 !tg3_flag(tp, 5755_PLUS) &&
3343 (tp->nvram_jedecnum == JEDEC_ST) &&
3344 (nvram_cmd & NVRAM_CMD_FIRST)) {
3345 u32 cmd;
3347 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3348 ret = tg3_nvram_exec_cmd(tp, cmd);
3349 if (ret)
3350 break;
3352 if (!tg3_flag(tp, FLASH)) {
3353 /* We always do complete word writes to eeprom. */
3354 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3357 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3358 if (ret)
3359 break;
3361 return ret;
3364 /* offset and length are dword aligned */
3365 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3367 int ret;
3369 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3370 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3371 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3372 udelay(40);
3375 if (!tg3_flag(tp, NVRAM)) {
3376 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3377 } else {
3378 u32 grc_mode;
3380 ret = tg3_nvram_lock(tp);
3381 if (ret)
3382 return ret;
3384 tg3_enable_nvram_access(tp);
3385 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3386 tw32(NVRAM_WRITE1, 0x406);
3388 grc_mode = tr32(GRC_MODE);
3389 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3391 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3392 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3393 buf);
3394 } else {
3395 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3396 buf);
3399 grc_mode = tr32(GRC_MODE);
3400 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3402 tg3_disable_nvram_access(tp);
3403 tg3_nvram_unlock(tp);
3406 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3407 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3408 udelay(40);
3411 return ret;
3414 #define RX_CPU_SCRATCH_BASE 0x30000
3415 #define RX_CPU_SCRATCH_SIZE 0x04000
3416 #define TX_CPU_SCRATCH_BASE 0x34000
3417 #define TX_CPU_SCRATCH_SIZE 0x04000
3419 /* tp->lock is held. */
3420 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3422 int i;
3424 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3426 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3427 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3429 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3430 return 0;
3432 if (offset == RX_CPU_BASE) {
3433 for (i = 0; i < 10000; i++) {
3434 tw32(offset + CPU_STATE, 0xffffffff);
3435 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3436 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3437 break;
3440 tw32(offset + CPU_STATE, 0xffffffff);
3441 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3442 udelay(10);
3443 } else {
3444 for (i = 0; i < 10000; i++) {
3445 tw32(offset + CPU_STATE, 0xffffffff);
3446 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3447 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3448 break;
3452 if (i >= 10000) {
3453 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3454 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3455 return -ENODEV;
3458 /* Clear firmware's nvram arbitration. */
3459 if (tg3_flag(tp, NVRAM))
3460 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3461 return 0;
3464 struct fw_info {
3465 unsigned int fw_base;
3466 unsigned int fw_len;
3467 const __be32 *fw_data;
3470 /* tp->lock is held. */
3471 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3472 u32 cpu_scratch_base, int cpu_scratch_size,
3473 struct fw_info *info)
3475 int err, lock_err, i;
3476 void (*write_op)(struct tg3 *, u32, u32);
3478 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3479 netdev_err(tp->dev,
3480 "%s: Trying to load TX cpu firmware which is 5705\n",
3481 __func__);
3482 return -EINVAL;
3485 if (tg3_flag(tp, 5705_PLUS))
3486 write_op = tg3_write_mem;
3487 else
3488 write_op = tg3_write_indirect_reg32;
3490 /* It is possible that bootcode is still loading at this point.
3491 * Get the nvram lock first before halting the cpu.
3493 lock_err = tg3_nvram_lock(tp);
3494 err = tg3_halt_cpu(tp, cpu_base);
3495 if (!lock_err)
3496 tg3_nvram_unlock(tp);
3497 if (err)
3498 goto out;
3500 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3501 write_op(tp, cpu_scratch_base + i, 0);
3502 tw32(cpu_base + CPU_STATE, 0xffffffff);
3503 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3504 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3505 write_op(tp, (cpu_scratch_base +
3506 (info->fw_base & 0xffff) +
3507 (i * sizeof(u32))),
3508 be32_to_cpu(info->fw_data[i]));
3510 err = 0;
3512 out:
3513 return err;
3516 /* tp->lock is held. */
3517 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3519 struct fw_info info;
3520 const __be32 *fw_data;
3521 int err, i;
3523 fw_data = (void *)tp->fw->data;
3525 /* Firmware blob starts with version numbers, followed by
3526 start address and length. We are setting complete length.
3527 length = end_address_of_bss - start_address_of_text.
3528 Remainder is the blob to be loaded contiguously
3529 from start address. */
3531 info.fw_base = be32_to_cpu(fw_data[1]);
3532 info.fw_len = tp->fw->size - 12;
3533 info.fw_data = &fw_data[3];
3535 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3536 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3537 &info);
3538 if (err)
3539 return err;
3541 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3542 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3543 &info);
3544 if (err)
3545 return err;
3547 /* Now startup only the RX cpu. */
3548 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3549 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3551 for (i = 0; i < 5; i++) {
3552 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3553 break;
3554 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3555 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3556 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3557 udelay(1000);
3559 if (i >= 5) {
3560 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3561 "should be %08x\n", __func__,
3562 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3563 return -ENODEV;
3565 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3566 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3568 return 0;
3571 /* tp->lock is held. */
3572 static int tg3_load_tso_firmware(struct tg3 *tp)
3574 struct fw_info info;
3575 const __be32 *fw_data;
3576 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3577 int err, i;
3579 if (tg3_flag(tp, HW_TSO_1) ||
3580 tg3_flag(tp, HW_TSO_2) ||
3581 tg3_flag(tp, HW_TSO_3))
3582 return 0;
3584 fw_data = (void *)tp->fw->data;
3586 /* Firmware blob starts with version numbers, followed by
3587 start address and length. We are setting complete length.
3588 length = end_address_of_bss - start_address_of_text.
3589 Remainder is the blob to be loaded contiguously
3590 from start address. */
3592 info.fw_base = be32_to_cpu(fw_data[1]);
3593 cpu_scratch_size = tp->fw_len;
3594 info.fw_len = tp->fw->size - 12;
3595 info.fw_data = &fw_data[3];
3597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3598 cpu_base = RX_CPU_BASE;
3599 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3600 } else {
3601 cpu_base = TX_CPU_BASE;
3602 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3603 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3606 err = tg3_load_firmware_cpu(tp, cpu_base,
3607 cpu_scratch_base, cpu_scratch_size,
3608 &info);
3609 if (err)
3610 return err;
3612 /* Now startup the cpu. */
3613 tw32(cpu_base + CPU_STATE, 0xffffffff);
3614 tw32_f(cpu_base + CPU_PC, info.fw_base);
3616 for (i = 0; i < 5; i++) {
3617 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3618 break;
3619 tw32(cpu_base + CPU_STATE, 0xffffffff);
3620 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3621 tw32_f(cpu_base + CPU_PC, info.fw_base);
3622 udelay(1000);
3624 if (i >= 5) {
3625 netdev_err(tp->dev,
3626 "%s fails to set CPU PC, is %08x should be %08x\n",
3627 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3628 return -ENODEV;
3630 tw32(cpu_base + CPU_STATE, 0xffffffff);
3631 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3632 return 0;
3636 /* tp->lock is held. */
3637 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3639 u32 addr_high, addr_low;
3640 int i;
3642 addr_high = ((tp->dev->dev_addr[0] << 8) |
3643 tp->dev->dev_addr[1]);
3644 addr_low = ((tp->dev->dev_addr[2] << 24) |
3645 (tp->dev->dev_addr[3] << 16) |
3646 (tp->dev->dev_addr[4] << 8) |
3647 (tp->dev->dev_addr[5] << 0));
3648 for (i = 0; i < 4; i++) {
3649 if (i == 1 && skip_mac_1)
3650 continue;
3651 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3652 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3655 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3656 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3657 for (i = 0; i < 12; i++) {
3658 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3659 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3663 addr_high = (tp->dev->dev_addr[0] +
3664 tp->dev->dev_addr[1] +
3665 tp->dev->dev_addr[2] +
3666 tp->dev->dev_addr[3] +
3667 tp->dev->dev_addr[4] +
3668 tp->dev->dev_addr[5]) &
3669 TX_BACKOFF_SEED_MASK;
3670 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3673 static void tg3_enable_register_access(struct tg3 *tp)
3676 * Make sure register accesses (indirect or otherwise) will function
3677 * correctly.
3679 pci_write_config_dword(tp->pdev,
3680 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3683 static int tg3_power_up(struct tg3 *tp)
3685 int err;
3687 tg3_enable_register_access(tp);
3689 err = pci_set_power_state(tp->pdev, PCI_D0);
3690 if (!err) {
3691 /* Switch out of Vaux if it is a NIC */
3692 tg3_pwrsrc_switch_to_vmain(tp);
3693 } else {
3694 netdev_err(tp->dev, "Transition to D0 failed\n");
3697 return err;
3700 static int tg3_setup_phy(struct tg3 *, int);
3702 static int tg3_power_down_prepare(struct tg3 *tp)
3704 u32 misc_host_ctrl;
3705 bool device_should_wake, do_low_power;
3707 tg3_enable_register_access(tp);
3709 /* Restore the CLKREQ setting. */
3710 if (tg3_flag(tp, CLKREQ_BUG))
3711 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3712 PCI_EXP_LNKCTL_CLKREQ_EN);
3714 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3715 tw32(TG3PCI_MISC_HOST_CTRL,
3716 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3718 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3719 tg3_flag(tp, WOL_ENABLE);
3721 if (tg3_flag(tp, USE_PHYLIB)) {
3722 do_low_power = false;
3723 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3724 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3725 struct phy_device *phydev;
3726 u32 phyid, advertising;
3728 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3730 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3732 tp->link_config.speed = phydev->speed;
3733 tp->link_config.duplex = phydev->duplex;
3734 tp->link_config.autoneg = phydev->autoneg;
3735 tp->link_config.advertising = phydev->advertising;
3737 advertising = ADVERTISED_TP |
3738 ADVERTISED_Pause |
3739 ADVERTISED_Autoneg |
3740 ADVERTISED_10baseT_Half;
3742 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3743 if (tg3_flag(tp, WOL_SPEED_100MB))
3744 advertising |=
3745 ADVERTISED_100baseT_Half |
3746 ADVERTISED_100baseT_Full |
3747 ADVERTISED_10baseT_Full;
3748 else
3749 advertising |= ADVERTISED_10baseT_Full;
3752 phydev->advertising = advertising;
3754 phy_start_aneg(phydev);
3756 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3757 if (phyid != PHY_ID_BCMAC131) {
3758 phyid &= PHY_BCM_OUI_MASK;
3759 if (phyid == PHY_BCM_OUI_1 ||
3760 phyid == PHY_BCM_OUI_2 ||
3761 phyid == PHY_BCM_OUI_3)
3762 do_low_power = true;
3765 } else {
3766 do_low_power = true;
3768 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3769 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3771 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3772 tg3_setup_phy(tp, 0);
3775 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3776 u32 val;
3778 val = tr32(GRC_VCPU_EXT_CTRL);
3779 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3780 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3781 int i;
3782 u32 val;
3784 for (i = 0; i < 200; i++) {
3785 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3786 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3787 break;
3788 msleep(1);
3791 if (tg3_flag(tp, WOL_CAP))
3792 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3793 WOL_DRV_STATE_SHUTDOWN |
3794 WOL_DRV_WOL |
3795 WOL_SET_MAGIC_PKT);
3797 if (device_should_wake) {
3798 u32 mac_mode;
3800 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3801 if (do_low_power &&
3802 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3803 tg3_phy_auxctl_write(tp,
3804 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3805 MII_TG3_AUXCTL_PCTL_WOL_EN |
3806 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3807 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3808 udelay(40);
3811 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3812 mac_mode = MAC_MODE_PORT_MODE_GMII;
3813 else
3814 mac_mode = MAC_MODE_PORT_MODE_MII;
3816 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3817 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3818 ASIC_REV_5700) {
3819 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3820 SPEED_100 : SPEED_10;
3821 if (tg3_5700_link_polarity(tp, speed))
3822 mac_mode |= MAC_MODE_LINK_POLARITY;
3823 else
3824 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3826 } else {
3827 mac_mode = MAC_MODE_PORT_MODE_TBI;
3830 if (!tg3_flag(tp, 5750_PLUS))
3831 tw32(MAC_LED_CTRL, tp->led_ctrl);
3833 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3834 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3835 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3836 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3838 if (tg3_flag(tp, ENABLE_APE))
3839 mac_mode |= MAC_MODE_APE_TX_EN |
3840 MAC_MODE_APE_RX_EN |
3841 MAC_MODE_TDE_ENABLE;
3843 tw32_f(MAC_MODE, mac_mode);
3844 udelay(100);
3846 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3847 udelay(10);
3850 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3851 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3853 u32 base_val;
3855 base_val = tp->pci_clock_ctrl;
3856 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3857 CLOCK_CTRL_TXCLK_DISABLE);
3859 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3860 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3861 } else if (tg3_flag(tp, 5780_CLASS) ||
3862 tg3_flag(tp, CPMU_PRESENT) ||
3863 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3864 /* do nothing */
3865 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3866 u32 newbits1, newbits2;
3868 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3869 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3870 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3871 CLOCK_CTRL_TXCLK_DISABLE |
3872 CLOCK_CTRL_ALTCLK);
3873 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3874 } else if (tg3_flag(tp, 5705_PLUS)) {
3875 newbits1 = CLOCK_CTRL_625_CORE;
3876 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3877 } else {
3878 newbits1 = CLOCK_CTRL_ALTCLK;
3879 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3882 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3883 40);
3885 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3886 40);
3888 if (!tg3_flag(tp, 5705_PLUS)) {
3889 u32 newbits3;
3891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3893 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3894 CLOCK_CTRL_TXCLK_DISABLE |
3895 CLOCK_CTRL_44MHZ_CORE);
3896 } else {
3897 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3900 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3901 tp->pci_clock_ctrl | newbits3, 40);
3905 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3906 tg3_power_down_phy(tp, do_low_power);
3908 tg3_frob_aux_power(tp, true);
3910 /* Workaround for unstable PLL clock */
3911 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3912 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3913 u32 val = tr32(0x7d00);
3915 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3916 tw32(0x7d00, val);
3917 if (!tg3_flag(tp, ENABLE_ASF)) {
3918 int err;
3920 err = tg3_nvram_lock(tp);
3921 tg3_halt_cpu(tp, RX_CPU_BASE);
3922 if (!err)
3923 tg3_nvram_unlock(tp);
3927 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3929 return 0;
3932 static void tg3_power_down(struct tg3 *tp)
3934 tg3_power_down_prepare(tp);
3936 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3937 pci_set_power_state(tp->pdev, PCI_D3hot);
3940 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3942 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3943 case MII_TG3_AUX_STAT_10HALF:
3944 *speed = SPEED_10;
3945 *duplex = DUPLEX_HALF;
3946 break;
3948 case MII_TG3_AUX_STAT_10FULL:
3949 *speed = SPEED_10;
3950 *duplex = DUPLEX_FULL;
3951 break;
3953 case MII_TG3_AUX_STAT_100HALF:
3954 *speed = SPEED_100;
3955 *duplex = DUPLEX_HALF;
3956 break;
3958 case MII_TG3_AUX_STAT_100FULL:
3959 *speed = SPEED_100;
3960 *duplex = DUPLEX_FULL;
3961 break;
3963 case MII_TG3_AUX_STAT_1000HALF:
3964 *speed = SPEED_1000;
3965 *duplex = DUPLEX_HALF;
3966 break;
3968 case MII_TG3_AUX_STAT_1000FULL:
3969 *speed = SPEED_1000;
3970 *duplex = DUPLEX_FULL;
3971 break;
3973 default:
3974 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3975 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3976 SPEED_10;
3977 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3978 DUPLEX_HALF;
3979 break;
3981 *speed = SPEED_UNKNOWN;
3982 *duplex = DUPLEX_UNKNOWN;
3983 break;
3987 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3989 int err = 0;
3990 u32 val, new_adv;
3992 new_adv = ADVERTISE_CSMA;
3993 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3994 new_adv |= mii_advertise_flowctrl(flowctrl);
3996 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3997 if (err)
3998 goto done;
4000 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4001 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4003 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4004 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
4005 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4007 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4008 if (err)
4009 goto done;
4012 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4013 goto done;
4015 tw32(TG3_CPMU_EEE_MODE,
4016 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4018 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
4019 if (!err) {
4020 u32 err2;
4022 val = 0;
4023 /* Advertise 100-BaseTX EEE ability */
4024 if (advertise & ADVERTISED_100baseT_Full)
4025 val |= MDIO_AN_EEE_ADV_100TX;
4026 /* Advertise 1000-BaseT EEE ability */
4027 if (advertise & ADVERTISED_1000baseT_Full)
4028 val |= MDIO_AN_EEE_ADV_1000T;
4029 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4030 if (err)
4031 val = 0;
4033 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4034 case ASIC_REV_5717:
4035 case ASIC_REV_57765:
4036 case ASIC_REV_57766:
4037 case ASIC_REV_5719:
4038 /* If we advertised any eee advertisements above... */
4039 if (val)
4040 val = MII_TG3_DSP_TAP26_ALNOKO |
4041 MII_TG3_DSP_TAP26_RMRXSTO |
4042 MII_TG3_DSP_TAP26_OPCSINPT;
4043 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4044 /* Fall through */
4045 case ASIC_REV_5720:
4046 case ASIC_REV_5762:
4047 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4048 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4049 MII_TG3_DSP_CH34TP2_HIBW01);
4052 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4053 if (!err)
4054 err = err2;
4057 done:
4058 return err;
4061 static void tg3_phy_copper_begin(struct tg3 *tp)
4063 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4064 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4065 u32 adv, fc;
4067 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4068 adv = ADVERTISED_10baseT_Half |
4069 ADVERTISED_10baseT_Full;
4070 if (tg3_flag(tp, WOL_SPEED_100MB))
4071 adv |= ADVERTISED_100baseT_Half |
4072 ADVERTISED_100baseT_Full;
4074 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4075 } else {
4076 adv = tp->link_config.advertising;
4077 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4078 adv &= ~(ADVERTISED_1000baseT_Half |
4079 ADVERTISED_1000baseT_Full);
4081 fc = tp->link_config.flowctrl;
4084 tg3_phy_autoneg_cfg(tp, adv, fc);
4086 tg3_writephy(tp, MII_BMCR,
4087 BMCR_ANENABLE | BMCR_ANRESTART);
4088 } else {
4089 int i;
4090 u32 bmcr, orig_bmcr;
4092 tp->link_config.active_speed = tp->link_config.speed;
4093 tp->link_config.active_duplex = tp->link_config.duplex;
4095 bmcr = 0;
4096 switch (tp->link_config.speed) {
4097 default:
4098 case SPEED_10:
4099 break;
4101 case SPEED_100:
4102 bmcr |= BMCR_SPEED100;
4103 break;
4105 case SPEED_1000:
4106 bmcr |= BMCR_SPEED1000;
4107 break;
4110 if (tp->link_config.duplex == DUPLEX_FULL)
4111 bmcr |= BMCR_FULLDPLX;
4113 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4114 (bmcr != orig_bmcr)) {
4115 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4116 for (i = 0; i < 1500; i++) {
4117 u32 tmp;
4119 udelay(10);
4120 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4121 tg3_readphy(tp, MII_BMSR, &tmp))
4122 continue;
4123 if (!(tmp & BMSR_LSTATUS)) {
4124 udelay(40);
4125 break;
4128 tg3_writephy(tp, MII_BMCR, bmcr);
4129 udelay(40);
4134 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4136 int err;
4138 /* Turn off tap power management. */
4139 /* Set Extended packet length bit */
4140 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4142 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4143 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4144 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4145 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4146 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4148 udelay(40);
4150 return err;
4153 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4155 u32 advmsk, tgtadv, advertising;
4157 advertising = tp->link_config.advertising;
4158 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4160 advmsk = ADVERTISE_ALL;
4161 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4162 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4163 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4166 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4167 return false;
4169 if ((*lcladv & advmsk) != tgtadv)
4170 return false;
4172 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4173 u32 tg3_ctrl;
4175 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4177 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4178 return false;
4180 if (tgtadv &&
4181 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4182 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4183 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4184 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4185 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4186 } else {
4187 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4190 if (tg3_ctrl != tgtadv)
4191 return false;
4194 return true;
4197 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4199 u32 lpeth = 0;
4201 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4202 u32 val;
4204 if (tg3_readphy(tp, MII_STAT1000, &val))
4205 return false;
4207 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4210 if (tg3_readphy(tp, MII_LPA, rmtadv))
4211 return false;
4213 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4214 tp->link_config.rmt_adv = lpeth;
4216 return true;
4219 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4221 if (curr_link_up != tp->link_up) {
4222 if (curr_link_up) {
4223 tg3_carrier_on(tp);
4224 } else {
4225 tg3_carrier_off(tp);
4226 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4227 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4230 tg3_link_report(tp);
4231 return true;
4234 return false;
4237 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4239 int current_link_up;
4240 u32 bmsr, val;
4241 u32 lcl_adv, rmt_adv;
4242 u16 current_speed;
4243 u8 current_duplex;
4244 int i, err;
4246 tw32(MAC_EVENT, 0);
4248 tw32_f(MAC_STATUS,
4249 (MAC_STATUS_SYNC_CHANGED |
4250 MAC_STATUS_CFG_CHANGED |
4251 MAC_STATUS_MI_COMPLETION |
4252 MAC_STATUS_LNKSTATE_CHANGED));
4253 udelay(40);
4255 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4256 tw32_f(MAC_MI_MODE,
4257 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4258 udelay(80);
4261 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4263 /* Some third-party PHYs need to be reset on link going
4264 * down.
4266 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4269 tp->link_up) {
4270 tg3_readphy(tp, MII_BMSR, &bmsr);
4271 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4272 !(bmsr & BMSR_LSTATUS))
4273 force_reset = 1;
4275 if (force_reset)
4276 tg3_phy_reset(tp);
4278 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4279 tg3_readphy(tp, MII_BMSR, &bmsr);
4280 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4281 !tg3_flag(tp, INIT_COMPLETE))
4282 bmsr = 0;
4284 if (!(bmsr & BMSR_LSTATUS)) {
4285 err = tg3_init_5401phy_dsp(tp);
4286 if (err)
4287 return err;
4289 tg3_readphy(tp, MII_BMSR, &bmsr);
4290 for (i = 0; i < 1000; i++) {
4291 udelay(10);
4292 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4293 (bmsr & BMSR_LSTATUS)) {
4294 udelay(40);
4295 break;
4299 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4300 TG3_PHY_REV_BCM5401_B0 &&
4301 !(bmsr & BMSR_LSTATUS) &&
4302 tp->link_config.active_speed == SPEED_1000) {
4303 err = tg3_phy_reset(tp);
4304 if (!err)
4305 err = tg3_init_5401phy_dsp(tp);
4306 if (err)
4307 return err;
4310 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4311 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4312 /* 5701 {A0,B0} CRC bug workaround */
4313 tg3_writephy(tp, 0x15, 0x0a75);
4314 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4315 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4316 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4319 /* Clear pending interrupts... */
4320 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4321 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4323 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4324 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4325 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4326 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4329 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4330 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4331 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4332 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4333 else
4334 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4337 current_link_up = 0;
4338 current_speed = SPEED_UNKNOWN;
4339 current_duplex = DUPLEX_UNKNOWN;
4340 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4341 tp->link_config.rmt_adv = 0;
4343 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4344 err = tg3_phy_auxctl_read(tp,
4345 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4346 &val);
4347 if (!err && !(val & (1 << 10))) {
4348 tg3_phy_auxctl_write(tp,
4349 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4350 val | (1 << 10));
4351 goto relink;
4355 bmsr = 0;
4356 for (i = 0; i < 100; i++) {
4357 tg3_readphy(tp, MII_BMSR, &bmsr);
4358 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4359 (bmsr & BMSR_LSTATUS))
4360 break;
4361 udelay(40);
4364 if (bmsr & BMSR_LSTATUS) {
4365 u32 aux_stat, bmcr;
4367 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4368 for (i = 0; i < 2000; i++) {
4369 udelay(10);
4370 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4371 aux_stat)
4372 break;
4375 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4376 &current_speed,
4377 &current_duplex);
4379 bmcr = 0;
4380 for (i = 0; i < 200; i++) {
4381 tg3_readphy(tp, MII_BMCR, &bmcr);
4382 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4383 continue;
4384 if (bmcr && bmcr != 0x7fff)
4385 break;
4386 udelay(10);
4389 lcl_adv = 0;
4390 rmt_adv = 0;
4392 tp->link_config.active_speed = current_speed;
4393 tp->link_config.active_duplex = current_duplex;
4395 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4396 if ((bmcr & BMCR_ANENABLE) &&
4397 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4398 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4399 current_link_up = 1;
4400 } else {
4401 if (!(bmcr & BMCR_ANENABLE) &&
4402 tp->link_config.speed == current_speed &&
4403 tp->link_config.duplex == current_duplex &&
4404 tp->link_config.flowctrl ==
4405 tp->link_config.active_flowctrl) {
4406 current_link_up = 1;
4410 if (current_link_up == 1 &&
4411 tp->link_config.active_duplex == DUPLEX_FULL) {
4412 u32 reg, bit;
4414 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4415 reg = MII_TG3_FET_GEN_STAT;
4416 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4417 } else {
4418 reg = MII_TG3_EXT_STAT;
4419 bit = MII_TG3_EXT_STAT_MDIX;
4422 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4423 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4425 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4429 relink:
4430 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4431 tg3_phy_copper_begin(tp);
4433 tg3_readphy(tp, MII_BMSR, &bmsr);
4434 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4435 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4436 current_link_up = 1;
4439 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4440 if (current_link_up == 1) {
4441 if (tp->link_config.active_speed == SPEED_100 ||
4442 tp->link_config.active_speed == SPEED_10)
4443 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4444 else
4445 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4446 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4447 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4448 else
4449 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4451 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4452 if (tp->link_config.active_duplex == DUPLEX_HALF)
4453 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4456 if (current_link_up == 1 &&
4457 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4458 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4459 else
4460 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4463 /* ??? Without this setting Netgear GA302T PHY does not
4464 * ??? send/receive packets...
4466 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4467 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4468 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4469 tw32_f(MAC_MI_MODE, tp->mi_mode);
4470 udelay(80);
4473 tw32_f(MAC_MODE, tp->mac_mode);
4474 udelay(40);
4476 tg3_phy_eee_adjust(tp, current_link_up);
4478 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4479 /* Polled via timer. */
4480 tw32_f(MAC_EVENT, 0);
4481 } else {
4482 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4484 udelay(40);
4486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4487 current_link_up == 1 &&
4488 tp->link_config.active_speed == SPEED_1000 &&
4489 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4490 udelay(120);
4491 tw32_f(MAC_STATUS,
4492 (MAC_STATUS_SYNC_CHANGED |
4493 MAC_STATUS_CFG_CHANGED));
4494 udelay(40);
4495 tg3_write_mem(tp,
4496 NIC_SRAM_FIRMWARE_MBOX,
4497 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4500 /* Prevent send BD corruption. */
4501 if (tg3_flag(tp, CLKREQ_BUG)) {
4502 if (tp->link_config.active_speed == SPEED_100 ||
4503 tp->link_config.active_speed == SPEED_10)
4504 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4505 PCI_EXP_LNKCTL_CLKREQ_EN);
4506 else
4507 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4508 PCI_EXP_LNKCTL_CLKREQ_EN);
4511 tg3_test_and_report_link_chg(tp, current_link_up);
4513 return 0;
4516 struct tg3_fiber_aneginfo {
4517 int state;
4518 #define ANEG_STATE_UNKNOWN 0
4519 #define ANEG_STATE_AN_ENABLE 1
4520 #define ANEG_STATE_RESTART_INIT 2
4521 #define ANEG_STATE_RESTART 3
4522 #define ANEG_STATE_DISABLE_LINK_OK 4
4523 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4524 #define ANEG_STATE_ABILITY_DETECT 6
4525 #define ANEG_STATE_ACK_DETECT_INIT 7
4526 #define ANEG_STATE_ACK_DETECT 8
4527 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4528 #define ANEG_STATE_COMPLETE_ACK 10
4529 #define ANEG_STATE_IDLE_DETECT_INIT 11
4530 #define ANEG_STATE_IDLE_DETECT 12
4531 #define ANEG_STATE_LINK_OK 13
4532 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4533 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4535 u32 flags;
4536 #define MR_AN_ENABLE 0x00000001
4537 #define MR_RESTART_AN 0x00000002
4538 #define MR_AN_COMPLETE 0x00000004
4539 #define MR_PAGE_RX 0x00000008
4540 #define MR_NP_LOADED 0x00000010
4541 #define MR_TOGGLE_TX 0x00000020
4542 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4543 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4544 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4545 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4546 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4547 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4548 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4549 #define MR_TOGGLE_RX 0x00002000
4550 #define MR_NP_RX 0x00004000
4552 #define MR_LINK_OK 0x80000000
4554 unsigned long link_time, cur_time;
4556 u32 ability_match_cfg;
4557 int ability_match_count;
4559 char ability_match, idle_match, ack_match;
4561 u32 txconfig, rxconfig;
4562 #define ANEG_CFG_NP 0x00000080
4563 #define ANEG_CFG_ACK 0x00000040
4564 #define ANEG_CFG_RF2 0x00000020
4565 #define ANEG_CFG_RF1 0x00000010
4566 #define ANEG_CFG_PS2 0x00000001
4567 #define ANEG_CFG_PS1 0x00008000
4568 #define ANEG_CFG_HD 0x00004000
4569 #define ANEG_CFG_FD 0x00002000
4570 #define ANEG_CFG_INVAL 0x00001f06
4573 #define ANEG_OK 0
4574 #define ANEG_DONE 1
4575 #define ANEG_TIMER_ENAB 2
4576 #define ANEG_FAILED -1
4578 #define ANEG_STATE_SETTLE_TIME 10000
4580 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4581 struct tg3_fiber_aneginfo *ap)
4583 u16 flowctrl;
4584 unsigned long delta;
4585 u32 rx_cfg_reg;
4586 int ret;
4588 if (ap->state == ANEG_STATE_UNKNOWN) {
4589 ap->rxconfig = 0;
4590 ap->link_time = 0;
4591 ap->cur_time = 0;
4592 ap->ability_match_cfg = 0;
4593 ap->ability_match_count = 0;
4594 ap->ability_match = 0;
4595 ap->idle_match = 0;
4596 ap->ack_match = 0;
4598 ap->cur_time++;
4600 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4601 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4603 if (rx_cfg_reg != ap->ability_match_cfg) {
4604 ap->ability_match_cfg = rx_cfg_reg;
4605 ap->ability_match = 0;
4606 ap->ability_match_count = 0;
4607 } else {
4608 if (++ap->ability_match_count > 1) {
4609 ap->ability_match = 1;
4610 ap->ability_match_cfg = rx_cfg_reg;
4613 if (rx_cfg_reg & ANEG_CFG_ACK)
4614 ap->ack_match = 1;
4615 else
4616 ap->ack_match = 0;
4618 ap->idle_match = 0;
4619 } else {
4620 ap->idle_match = 1;
4621 ap->ability_match_cfg = 0;
4622 ap->ability_match_count = 0;
4623 ap->ability_match = 0;
4624 ap->ack_match = 0;
4626 rx_cfg_reg = 0;
4629 ap->rxconfig = rx_cfg_reg;
4630 ret = ANEG_OK;
4632 switch (ap->state) {
4633 case ANEG_STATE_UNKNOWN:
4634 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4635 ap->state = ANEG_STATE_AN_ENABLE;
4637 /* fallthru */
4638 case ANEG_STATE_AN_ENABLE:
4639 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4640 if (ap->flags & MR_AN_ENABLE) {
4641 ap->link_time = 0;
4642 ap->cur_time = 0;
4643 ap->ability_match_cfg = 0;
4644 ap->ability_match_count = 0;
4645 ap->ability_match = 0;
4646 ap->idle_match = 0;
4647 ap->ack_match = 0;
4649 ap->state = ANEG_STATE_RESTART_INIT;
4650 } else {
4651 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4653 break;
4655 case ANEG_STATE_RESTART_INIT:
4656 ap->link_time = ap->cur_time;
4657 ap->flags &= ~(MR_NP_LOADED);
4658 ap->txconfig = 0;
4659 tw32(MAC_TX_AUTO_NEG, 0);
4660 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4661 tw32_f(MAC_MODE, tp->mac_mode);
4662 udelay(40);
4664 ret = ANEG_TIMER_ENAB;
4665 ap->state = ANEG_STATE_RESTART;
4667 /* fallthru */
4668 case ANEG_STATE_RESTART:
4669 delta = ap->cur_time - ap->link_time;
4670 if (delta > ANEG_STATE_SETTLE_TIME)
4671 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4672 else
4673 ret = ANEG_TIMER_ENAB;
4674 break;
4676 case ANEG_STATE_DISABLE_LINK_OK:
4677 ret = ANEG_DONE;
4678 break;
4680 case ANEG_STATE_ABILITY_DETECT_INIT:
4681 ap->flags &= ~(MR_TOGGLE_TX);
4682 ap->txconfig = ANEG_CFG_FD;
4683 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4684 if (flowctrl & ADVERTISE_1000XPAUSE)
4685 ap->txconfig |= ANEG_CFG_PS1;
4686 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4687 ap->txconfig |= ANEG_CFG_PS2;
4688 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4689 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4690 tw32_f(MAC_MODE, tp->mac_mode);
4691 udelay(40);
4693 ap->state = ANEG_STATE_ABILITY_DETECT;
4694 break;
4696 case ANEG_STATE_ABILITY_DETECT:
4697 if (ap->ability_match != 0 && ap->rxconfig != 0)
4698 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4699 break;
4701 case ANEG_STATE_ACK_DETECT_INIT:
4702 ap->txconfig |= ANEG_CFG_ACK;
4703 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4704 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4705 tw32_f(MAC_MODE, tp->mac_mode);
4706 udelay(40);
4708 ap->state = ANEG_STATE_ACK_DETECT;
4710 /* fallthru */
4711 case ANEG_STATE_ACK_DETECT:
4712 if (ap->ack_match != 0) {
4713 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4714 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4715 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4716 } else {
4717 ap->state = ANEG_STATE_AN_ENABLE;
4719 } else if (ap->ability_match != 0 &&
4720 ap->rxconfig == 0) {
4721 ap->state = ANEG_STATE_AN_ENABLE;
4723 break;
4725 case ANEG_STATE_COMPLETE_ACK_INIT:
4726 if (ap->rxconfig & ANEG_CFG_INVAL) {
4727 ret = ANEG_FAILED;
4728 break;
4730 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4731 MR_LP_ADV_HALF_DUPLEX |
4732 MR_LP_ADV_SYM_PAUSE |
4733 MR_LP_ADV_ASYM_PAUSE |
4734 MR_LP_ADV_REMOTE_FAULT1 |
4735 MR_LP_ADV_REMOTE_FAULT2 |
4736 MR_LP_ADV_NEXT_PAGE |
4737 MR_TOGGLE_RX |
4738 MR_NP_RX);
4739 if (ap->rxconfig & ANEG_CFG_FD)
4740 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4741 if (ap->rxconfig & ANEG_CFG_HD)
4742 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4743 if (ap->rxconfig & ANEG_CFG_PS1)
4744 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4745 if (ap->rxconfig & ANEG_CFG_PS2)
4746 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4747 if (ap->rxconfig & ANEG_CFG_RF1)
4748 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4749 if (ap->rxconfig & ANEG_CFG_RF2)
4750 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4751 if (ap->rxconfig & ANEG_CFG_NP)
4752 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4754 ap->link_time = ap->cur_time;
4756 ap->flags ^= (MR_TOGGLE_TX);
4757 if (ap->rxconfig & 0x0008)
4758 ap->flags |= MR_TOGGLE_RX;
4759 if (ap->rxconfig & ANEG_CFG_NP)
4760 ap->flags |= MR_NP_RX;
4761 ap->flags |= MR_PAGE_RX;
4763 ap->state = ANEG_STATE_COMPLETE_ACK;
4764 ret = ANEG_TIMER_ENAB;
4765 break;
4767 case ANEG_STATE_COMPLETE_ACK:
4768 if (ap->ability_match != 0 &&
4769 ap->rxconfig == 0) {
4770 ap->state = ANEG_STATE_AN_ENABLE;
4771 break;
4773 delta = ap->cur_time - ap->link_time;
4774 if (delta > ANEG_STATE_SETTLE_TIME) {
4775 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4776 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4777 } else {
4778 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4779 !(ap->flags & MR_NP_RX)) {
4780 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4781 } else {
4782 ret = ANEG_FAILED;
4786 break;
4788 case ANEG_STATE_IDLE_DETECT_INIT:
4789 ap->link_time = ap->cur_time;
4790 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4791 tw32_f(MAC_MODE, tp->mac_mode);
4792 udelay(40);
4794 ap->state = ANEG_STATE_IDLE_DETECT;
4795 ret = ANEG_TIMER_ENAB;
4796 break;
4798 case ANEG_STATE_IDLE_DETECT:
4799 if (ap->ability_match != 0 &&
4800 ap->rxconfig == 0) {
4801 ap->state = ANEG_STATE_AN_ENABLE;
4802 break;
4804 delta = ap->cur_time - ap->link_time;
4805 if (delta > ANEG_STATE_SETTLE_TIME) {
4806 /* XXX another gem from the Broadcom driver :( */
4807 ap->state = ANEG_STATE_LINK_OK;
4809 break;
4811 case ANEG_STATE_LINK_OK:
4812 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4813 ret = ANEG_DONE;
4814 break;
4816 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4817 /* ??? unimplemented */
4818 break;
4820 case ANEG_STATE_NEXT_PAGE_WAIT:
4821 /* ??? unimplemented */
4822 break;
4824 default:
4825 ret = ANEG_FAILED;
4826 break;
4829 return ret;
4832 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4834 int res = 0;
4835 struct tg3_fiber_aneginfo aninfo;
4836 int status = ANEG_FAILED;
4837 unsigned int tick;
4838 u32 tmp;
4840 tw32_f(MAC_TX_AUTO_NEG, 0);
4842 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4843 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4844 udelay(40);
4846 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4847 udelay(40);
4849 memset(&aninfo, 0, sizeof(aninfo));
4850 aninfo.flags |= MR_AN_ENABLE;
4851 aninfo.state = ANEG_STATE_UNKNOWN;
4852 aninfo.cur_time = 0;
4853 tick = 0;
4854 while (++tick < 195000) {
4855 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4856 if (status == ANEG_DONE || status == ANEG_FAILED)
4857 break;
4859 udelay(1);
4862 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4863 tw32_f(MAC_MODE, tp->mac_mode);
4864 udelay(40);
4866 *txflags = aninfo.txconfig;
4867 *rxflags = aninfo.flags;
4869 if (status == ANEG_DONE &&
4870 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4871 MR_LP_ADV_FULL_DUPLEX)))
4872 res = 1;
4874 return res;
4877 static void tg3_init_bcm8002(struct tg3 *tp)
4879 u32 mac_status = tr32(MAC_STATUS);
4880 int i;
4882 /* Reset when initting first time or we have a link. */
4883 if (tg3_flag(tp, INIT_COMPLETE) &&
4884 !(mac_status & MAC_STATUS_PCS_SYNCED))
4885 return;
4887 /* Set PLL lock range. */
4888 tg3_writephy(tp, 0x16, 0x8007);
4890 /* SW reset */
4891 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4893 /* Wait for reset to complete. */
4894 /* XXX schedule_timeout() ... */
4895 for (i = 0; i < 500; i++)
4896 udelay(10);
4898 /* Config mode; select PMA/Ch 1 regs. */
4899 tg3_writephy(tp, 0x10, 0x8411);
4901 /* Enable auto-lock and comdet, select txclk for tx. */
4902 tg3_writephy(tp, 0x11, 0x0a10);
4904 tg3_writephy(tp, 0x18, 0x00a0);
4905 tg3_writephy(tp, 0x16, 0x41ff);
4907 /* Assert and deassert POR. */
4908 tg3_writephy(tp, 0x13, 0x0400);
4909 udelay(40);
4910 tg3_writephy(tp, 0x13, 0x0000);
4912 tg3_writephy(tp, 0x11, 0x0a50);
4913 udelay(40);
4914 tg3_writephy(tp, 0x11, 0x0a10);
4916 /* Wait for signal to stabilize */
4917 /* XXX schedule_timeout() ... */
4918 for (i = 0; i < 15000; i++)
4919 udelay(10);
4921 /* Deselect the channel register so we can read the PHYID
4922 * later.
4924 tg3_writephy(tp, 0x10, 0x8011);
4927 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4929 u16 flowctrl;
4930 u32 sg_dig_ctrl, sg_dig_status;
4931 u32 serdes_cfg, expected_sg_dig_ctrl;
4932 int workaround, port_a;
4933 int current_link_up;
4935 serdes_cfg = 0;
4936 expected_sg_dig_ctrl = 0;
4937 workaround = 0;
4938 port_a = 1;
4939 current_link_up = 0;
4941 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4942 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4943 workaround = 1;
4944 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4945 port_a = 0;
4947 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4948 /* preserve bits 20-23 for voltage regulator */
4949 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4952 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4954 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4955 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4956 if (workaround) {
4957 u32 val = serdes_cfg;
4959 if (port_a)
4960 val |= 0xc010000;
4961 else
4962 val |= 0x4010000;
4963 tw32_f(MAC_SERDES_CFG, val);
4966 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4968 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4969 tg3_setup_flow_control(tp, 0, 0);
4970 current_link_up = 1;
4972 goto out;
4975 /* Want auto-negotiation. */
4976 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4978 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4979 if (flowctrl & ADVERTISE_1000XPAUSE)
4980 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4981 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4982 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4984 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4985 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4986 tp->serdes_counter &&
4987 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4988 MAC_STATUS_RCVD_CFG)) ==
4989 MAC_STATUS_PCS_SYNCED)) {
4990 tp->serdes_counter--;
4991 current_link_up = 1;
4992 goto out;
4994 restart_autoneg:
4995 if (workaround)
4996 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4997 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4998 udelay(5);
4999 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5001 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5002 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5003 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5004 MAC_STATUS_SIGNAL_DET)) {
5005 sg_dig_status = tr32(SG_DIG_STATUS);
5006 mac_status = tr32(MAC_STATUS);
5008 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5009 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5010 u32 local_adv = 0, remote_adv = 0;
5012 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5013 local_adv |= ADVERTISE_1000XPAUSE;
5014 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5015 local_adv |= ADVERTISE_1000XPSE_ASYM;
5017 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5018 remote_adv |= LPA_1000XPAUSE;
5019 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5020 remote_adv |= LPA_1000XPAUSE_ASYM;
5022 tp->link_config.rmt_adv =
5023 mii_adv_to_ethtool_adv_x(remote_adv);
5025 tg3_setup_flow_control(tp, local_adv, remote_adv);
5026 current_link_up = 1;
5027 tp->serdes_counter = 0;
5028 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5029 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5030 if (tp->serdes_counter)
5031 tp->serdes_counter--;
5032 else {
5033 if (workaround) {
5034 u32 val = serdes_cfg;
5036 if (port_a)
5037 val |= 0xc010000;
5038 else
5039 val |= 0x4010000;
5041 tw32_f(MAC_SERDES_CFG, val);
5044 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5045 udelay(40);
5047 /* Link parallel detection - link is up */
5048 /* only if we have PCS_SYNC and not */
5049 /* receiving config code words */
5050 mac_status = tr32(MAC_STATUS);
5051 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5052 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5053 tg3_setup_flow_control(tp, 0, 0);
5054 current_link_up = 1;
5055 tp->phy_flags |=
5056 TG3_PHYFLG_PARALLEL_DETECT;
5057 tp->serdes_counter =
5058 SERDES_PARALLEL_DET_TIMEOUT;
5059 } else
5060 goto restart_autoneg;
5063 } else {
5064 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5065 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5068 out:
5069 return current_link_up;
5072 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5074 int current_link_up = 0;
5076 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5077 goto out;
5079 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5080 u32 txflags, rxflags;
5081 int i;
5083 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5084 u32 local_adv = 0, remote_adv = 0;
5086 if (txflags & ANEG_CFG_PS1)
5087 local_adv |= ADVERTISE_1000XPAUSE;
5088 if (txflags & ANEG_CFG_PS2)
5089 local_adv |= ADVERTISE_1000XPSE_ASYM;
5091 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5092 remote_adv |= LPA_1000XPAUSE;
5093 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5094 remote_adv |= LPA_1000XPAUSE_ASYM;
5096 tp->link_config.rmt_adv =
5097 mii_adv_to_ethtool_adv_x(remote_adv);
5099 tg3_setup_flow_control(tp, local_adv, remote_adv);
5101 current_link_up = 1;
5103 for (i = 0; i < 30; i++) {
5104 udelay(20);
5105 tw32_f(MAC_STATUS,
5106 (MAC_STATUS_SYNC_CHANGED |
5107 MAC_STATUS_CFG_CHANGED));
5108 udelay(40);
5109 if ((tr32(MAC_STATUS) &
5110 (MAC_STATUS_SYNC_CHANGED |
5111 MAC_STATUS_CFG_CHANGED)) == 0)
5112 break;
5115 mac_status = tr32(MAC_STATUS);
5116 if (current_link_up == 0 &&
5117 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5118 !(mac_status & MAC_STATUS_RCVD_CFG))
5119 current_link_up = 1;
5120 } else {
5121 tg3_setup_flow_control(tp, 0, 0);
5123 /* Forcing 1000FD link up. */
5124 current_link_up = 1;
5126 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5127 udelay(40);
5129 tw32_f(MAC_MODE, tp->mac_mode);
5130 udelay(40);
5133 out:
5134 return current_link_up;
5137 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5139 u32 orig_pause_cfg;
5140 u16 orig_active_speed;
5141 u8 orig_active_duplex;
5142 u32 mac_status;
5143 int current_link_up;
5144 int i;
5146 orig_pause_cfg = tp->link_config.active_flowctrl;
5147 orig_active_speed = tp->link_config.active_speed;
5148 orig_active_duplex = tp->link_config.active_duplex;
5150 if (!tg3_flag(tp, HW_AUTONEG) &&
5151 tp->link_up &&
5152 tg3_flag(tp, INIT_COMPLETE)) {
5153 mac_status = tr32(MAC_STATUS);
5154 mac_status &= (MAC_STATUS_PCS_SYNCED |
5155 MAC_STATUS_SIGNAL_DET |
5156 MAC_STATUS_CFG_CHANGED |
5157 MAC_STATUS_RCVD_CFG);
5158 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5159 MAC_STATUS_SIGNAL_DET)) {
5160 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5161 MAC_STATUS_CFG_CHANGED));
5162 return 0;
5166 tw32_f(MAC_TX_AUTO_NEG, 0);
5168 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5169 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5170 tw32_f(MAC_MODE, tp->mac_mode);
5171 udelay(40);
5173 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5174 tg3_init_bcm8002(tp);
5176 /* Enable link change event even when serdes polling. */
5177 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5178 udelay(40);
5180 current_link_up = 0;
5181 tp->link_config.rmt_adv = 0;
5182 mac_status = tr32(MAC_STATUS);
5184 if (tg3_flag(tp, HW_AUTONEG))
5185 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5186 else
5187 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5189 tp->napi[0].hw_status->status =
5190 (SD_STATUS_UPDATED |
5191 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5193 for (i = 0; i < 100; i++) {
5194 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5195 MAC_STATUS_CFG_CHANGED));
5196 udelay(5);
5197 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5198 MAC_STATUS_CFG_CHANGED |
5199 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5200 break;
5203 mac_status = tr32(MAC_STATUS);
5204 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5205 current_link_up = 0;
5206 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5207 tp->serdes_counter == 0) {
5208 tw32_f(MAC_MODE, (tp->mac_mode |
5209 MAC_MODE_SEND_CONFIGS));
5210 udelay(1);
5211 tw32_f(MAC_MODE, tp->mac_mode);
5215 if (current_link_up == 1) {
5216 tp->link_config.active_speed = SPEED_1000;
5217 tp->link_config.active_duplex = DUPLEX_FULL;
5218 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5219 LED_CTRL_LNKLED_OVERRIDE |
5220 LED_CTRL_1000MBPS_ON));
5221 } else {
5222 tp->link_config.active_speed = SPEED_UNKNOWN;
5223 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5224 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5225 LED_CTRL_LNKLED_OVERRIDE |
5226 LED_CTRL_TRAFFIC_OVERRIDE));
5229 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5230 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5231 if (orig_pause_cfg != now_pause_cfg ||
5232 orig_active_speed != tp->link_config.active_speed ||
5233 orig_active_duplex != tp->link_config.active_duplex)
5234 tg3_link_report(tp);
5237 return 0;
5240 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5242 int current_link_up, err = 0;
5243 u32 bmsr, bmcr;
5244 u16 current_speed;
5245 u8 current_duplex;
5246 u32 local_adv, remote_adv;
5248 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5249 tw32_f(MAC_MODE, tp->mac_mode);
5250 udelay(40);
5252 tw32(MAC_EVENT, 0);
5254 tw32_f(MAC_STATUS,
5255 (MAC_STATUS_SYNC_CHANGED |
5256 MAC_STATUS_CFG_CHANGED |
5257 MAC_STATUS_MI_COMPLETION |
5258 MAC_STATUS_LNKSTATE_CHANGED));
5259 udelay(40);
5261 if (force_reset)
5262 tg3_phy_reset(tp);
5264 current_link_up = 0;
5265 current_speed = SPEED_UNKNOWN;
5266 current_duplex = DUPLEX_UNKNOWN;
5267 tp->link_config.rmt_adv = 0;
5269 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5270 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5271 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5272 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5273 bmsr |= BMSR_LSTATUS;
5274 else
5275 bmsr &= ~BMSR_LSTATUS;
5278 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5280 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5281 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5282 /* do nothing, just check for link up at the end */
5283 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5284 u32 adv, newadv;
5286 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5287 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5288 ADVERTISE_1000XPAUSE |
5289 ADVERTISE_1000XPSE_ASYM |
5290 ADVERTISE_SLCT);
5292 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5293 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5295 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5296 tg3_writephy(tp, MII_ADVERTISE, newadv);
5297 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5298 tg3_writephy(tp, MII_BMCR, bmcr);
5300 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5301 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5302 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5304 return err;
5306 } else {
5307 u32 new_bmcr;
5309 bmcr &= ~BMCR_SPEED1000;
5310 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5312 if (tp->link_config.duplex == DUPLEX_FULL)
5313 new_bmcr |= BMCR_FULLDPLX;
5315 if (new_bmcr != bmcr) {
5316 /* BMCR_SPEED1000 is a reserved bit that needs
5317 * to be set on write.
5319 new_bmcr |= BMCR_SPEED1000;
5321 /* Force a linkdown */
5322 if (tp->link_up) {
5323 u32 adv;
5325 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5326 adv &= ~(ADVERTISE_1000XFULL |
5327 ADVERTISE_1000XHALF |
5328 ADVERTISE_SLCT);
5329 tg3_writephy(tp, MII_ADVERTISE, adv);
5330 tg3_writephy(tp, MII_BMCR, bmcr |
5331 BMCR_ANRESTART |
5332 BMCR_ANENABLE);
5333 udelay(10);
5334 tg3_carrier_off(tp);
5336 tg3_writephy(tp, MII_BMCR, new_bmcr);
5337 bmcr = new_bmcr;
5338 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5339 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5340 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5341 ASIC_REV_5714) {
5342 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5343 bmsr |= BMSR_LSTATUS;
5344 else
5345 bmsr &= ~BMSR_LSTATUS;
5347 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5351 if (bmsr & BMSR_LSTATUS) {
5352 current_speed = SPEED_1000;
5353 current_link_up = 1;
5354 if (bmcr & BMCR_FULLDPLX)
5355 current_duplex = DUPLEX_FULL;
5356 else
5357 current_duplex = DUPLEX_HALF;
5359 local_adv = 0;
5360 remote_adv = 0;
5362 if (bmcr & BMCR_ANENABLE) {
5363 u32 common;
5365 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5366 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5367 common = local_adv & remote_adv;
5368 if (common & (ADVERTISE_1000XHALF |
5369 ADVERTISE_1000XFULL)) {
5370 if (common & ADVERTISE_1000XFULL)
5371 current_duplex = DUPLEX_FULL;
5372 else
5373 current_duplex = DUPLEX_HALF;
5375 tp->link_config.rmt_adv =
5376 mii_adv_to_ethtool_adv_x(remote_adv);
5377 } else if (!tg3_flag(tp, 5780_CLASS)) {
5378 /* Link is up via parallel detect */
5379 } else {
5380 current_link_up = 0;
5385 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5386 tg3_setup_flow_control(tp, local_adv, remote_adv);
5388 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5389 if (tp->link_config.active_duplex == DUPLEX_HALF)
5390 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5392 tw32_f(MAC_MODE, tp->mac_mode);
5393 udelay(40);
5395 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5397 tp->link_config.active_speed = current_speed;
5398 tp->link_config.active_duplex = current_duplex;
5400 tg3_test_and_report_link_chg(tp, current_link_up);
5401 return err;
5404 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5406 if (tp->serdes_counter) {
5407 /* Give autoneg time to complete. */
5408 tp->serdes_counter--;
5409 return;
5412 if (!tp->link_up &&
5413 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5414 u32 bmcr;
5416 tg3_readphy(tp, MII_BMCR, &bmcr);
5417 if (bmcr & BMCR_ANENABLE) {
5418 u32 phy1, phy2;
5420 /* Select shadow register 0x1f */
5421 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5422 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5424 /* Select expansion interrupt status register */
5425 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5426 MII_TG3_DSP_EXP1_INT_STAT);
5427 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5428 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5430 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5431 /* We have signal detect and not receiving
5432 * config code words, link is up by parallel
5433 * detection.
5436 bmcr &= ~BMCR_ANENABLE;
5437 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5438 tg3_writephy(tp, MII_BMCR, bmcr);
5439 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5442 } else if (tp->link_up &&
5443 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5444 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5445 u32 phy2;
5447 /* Select expansion interrupt status register */
5448 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5449 MII_TG3_DSP_EXP1_INT_STAT);
5450 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5451 if (phy2 & 0x20) {
5452 u32 bmcr;
5454 /* Config code words received, turn on autoneg. */
5455 tg3_readphy(tp, MII_BMCR, &bmcr);
5456 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5458 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5464 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5466 u32 val;
5467 int err;
5469 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5470 err = tg3_setup_fiber_phy(tp, force_reset);
5471 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5472 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5473 else
5474 err = tg3_setup_copper_phy(tp, force_reset);
5476 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5477 u32 scale;
5479 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5480 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5481 scale = 65;
5482 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5483 scale = 6;
5484 else
5485 scale = 12;
5487 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5488 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5489 tw32(GRC_MISC_CFG, val);
5492 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5493 (6 << TX_LENGTHS_IPG_SHIFT);
5494 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
5495 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
5496 val |= tr32(MAC_TX_LENGTHS) &
5497 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5498 TX_LENGTHS_CNT_DWN_VAL_MSK);
5500 if (tp->link_config.active_speed == SPEED_1000 &&
5501 tp->link_config.active_duplex == DUPLEX_HALF)
5502 tw32(MAC_TX_LENGTHS, val |
5503 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5504 else
5505 tw32(MAC_TX_LENGTHS, val |
5506 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5508 if (!tg3_flag(tp, 5705_PLUS)) {
5509 if (tp->link_up) {
5510 tw32(HOSTCC_STAT_COAL_TICKS,
5511 tp->coal.stats_block_coalesce_usecs);
5512 } else {
5513 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5517 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5518 val = tr32(PCIE_PWR_MGMT_THRESH);
5519 if (!tp->link_up)
5520 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5521 tp->pwrmgmt_thresh;
5522 else
5523 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5524 tw32(PCIE_PWR_MGMT_THRESH, val);
5527 return err;
5530 /* tp->lock must be held */
5531 static u64 tg3_refclk_read(struct tg3 *tp)
5533 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5534 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5537 /* tp->lock must be held */
5538 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5540 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5541 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5542 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5543 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5546 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5547 static inline void tg3_full_unlock(struct tg3 *tp);
5548 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5550 struct tg3 *tp = netdev_priv(dev);
5552 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5553 SOF_TIMESTAMPING_RX_SOFTWARE |
5554 SOF_TIMESTAMPING_SOFTWARE |
5555 SOF_TIMESTAMPING_TX_HARDWARE |
5556 SOF_TIMESTAMPING_RX_HARDWARE |
5557 SOF_TIMESTAMPING_RAW_HARDWARE;
5559 if (tp->ptp_clock)
5560 info->phc_index = ptp_clock_index(tp->ptp_clock);
5561 else
5562 info->phc_index = -1;
5564 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5566 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5567 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5568 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5569 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5570 return 0;
5573 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5575 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5576 bool neg_adj = false;
5577 u32 correction = 0;
5579 if (ppb < 0) {
5580 neg_adj = true;
5581 ppb = -ppb;
5584 /* Frequency adjustment is performed using hardware with a 24 bit
5585 * accumulator and a programmable correction value. On each clk, the
5586 * correction value gets added to the accumulator and when it
5587 * overflows, the time counter is incremented/decremented.
5589 * So conversion from ppb to correction value is
5590 * ppb * (1 << 24) / 1000000000
5592 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5593 TG3_EAV_REF_CLK_CORRECT_MASK;
5595 tg3_full_lock(tp, 0);
5597 if (correction)
5598 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5599 TG3_EAV_REF_CLK_CORRECT_EN |
5600 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5601 else
5602 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5604 tg3_full_unlock(tp);
5606 return 0;
5609 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5611 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5613 tg3_full_lock(tp, 0);
5614 tp->ptp_adjust += delta;
5615 tg3_full_unlock(tp);
5617 return 0;
5620 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5622 u64 ns;
5623 u32 remainder;
5624 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5626 tg3_full_lock(tp, 0);
5627 ns = tg3_refclk_read(tp);
5628 ns += tp->ptp_adjust;
5629 tg3_full_unlock(tp);
5631 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5632 ts->tv_nsec = remainder;
5634 return 0;
5637 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5638 const struct timespec *ts)
5640 u64 ns;
5641 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5643 ns = timespec_to_ns(ts);
5645 tg3_full_lock(tp, 0);
5646 tg3_refclk_write(tp, ns);
5647 tp->ptp_adjust = 0;
5648 tg3_full_unlock(tp);
5650 return 0;
5653 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5654 struct ptp_clock_request *rq, int on)
5656 return -EOPNOTSUPP;
5659 static const struct ptp_clock_info tg3_ptp_caps = {
5660 .owner = THIS_MODULE,
5661 .name = "tg3 clock",
5662 .max_adj = 250000000,
5663 .n_alarm = 0,
5664 .n_ext_ts = 0,
5665 .n_per_out = 0,
5666 .pps = 0,
5667 .adjfreq = tg3_ptp_adjfreq,
5668 .adjtime = tg3_ptp_adjtime,
5669 .gettime = tg3_ptp_gettime,
5670 .settime = tg3_ptp_settime,
5671 .enable = tg3_ptp_enable,
5674 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5675 struct skb_shared_hwtstamps *timestamp)
5677 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5678 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5679 tp->ptp_adjust);
5682 /* tp->lock must be held */
5683 static void tg3_ptp_init(struct tg3 *tp)
5685 if (!tg3_flag(tp, PTP_CAPABLE))
5686 return;
5688 /* Initialize the hardware clock to the system time. */
5689 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5690 tp->ptp_adjust = 0;
5691 tp->ptp_info = tg3_ptp_caps;
5694 /* tp->lock must be held */
5695 static void tg3_ptp_resume(struct tg3 *tp)
5697 if (!tg3_flag(tp, PTP_CAPABLE))
5698 return;
5700 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5701 tp->ptp_adjust = 0;
5704 static void tg3_ptp_fini(struct tg3 *tp)
5706 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5707 return;
5709 ptp_clock_unregister(tp->ptp_clock);
5710 tp->ptp_clock = NULL;
5711 tp->ptp_adjust = 0;
5714 static inline int tg3_irq_sync(struct tg3 *tp)
5716 return tp->irq_sync;
5719 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5721 int i;
5723 dst = (u32 *)((u8 *)dst + off);
5724 for (i = 0; i < len; i += sizeof(u32))
5725 *dst++ = tr32(off + i);
5728 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5730 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5731 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5732 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5733 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5734 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5735 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5736 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5737 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5738 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5739 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5740 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5741 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5742 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5743 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5744 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5745 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5746 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5747 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5748 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5750 if (tg3_flag(tp, SUPPORT_MSIX))
5751 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5753 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5754 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5755 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5756 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5757 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5758 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5759 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5760 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5762 if (!tg3_flag(tp, 5705_PLUS)) {
5763 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5764 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5765 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5768 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5769 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5770 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5771 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5772 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5774 if (tg3_flag(tp, NVRAM))
5775 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5778 static void tg3_dump_state(struct tg3 *tp)
5780 int i;
5781 u32 *regs;
5783 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5784 if (!regs) {
5785 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5786 return;
5789 if (tg3_flag(tp, PCI_EXPRESS)) {
5790 /* Read up to but not including private PCI registers */
5791 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5792 regs[i / sizeof(u32)] = tr32(i);
5793 } else
5794 tg3_dump_legacy_regs(tp, regs);
5796 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5797 if (!regs[i + 0] && !regs[i + 1] &&
5798 !regs[i + 2] && !regs[i + 3])
5799 continue;
5801 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5802 i * 4,
5803 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5806 kfree(regs);
5808 for (i = 0; i < tp->irq_cnt; i++) {
5809 struct tg3_napi *tnapi = &tp->napi[i];
5811 /* SW status block */
5812 netdev_err(tp->dev,
5813 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5815 tnapi->hw_status->status,
5816 tnapi->hw_status->status_tag,
5817 tnapi->hw_status->rx_jumbo_consumer,
5818 tnapi->hw_status->rx_consumer,
5819 tnapi->hw_status->rx_mini_consumer,
5820 tnapi->hw_status->idx[0].rx_producer,
5821 tnapi->hw_status->idx[0].tx_consumer);
5823 netdev_err(tp->dev,
5824 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5826 tnapi->last_tag, tnapi->last_irq_tag,
5827 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5828 tnapi->rx_rcb_ptr,
5829 tnapi->prodring.rx_std_prod_idx,
5830 tnapi->prodring.rx_std_cons_idx,
5831 tnapi->prodring.rx_jmb_prod_idx,
5832 tnapi->prodring.rx_jmb_cons_idx);
5836 /* This is called whenever we suspect that the system chipset is re-
5837 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5838 * is bogus tx completions. We try to recover by setting the
5839 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5840 * in the workqueue.
5842 static void tg3_tx_recover(struct tg3 *tp)
5844 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5845 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5847 netdev_warn(tp->dev,
5848 "The system may be re-ordering memory-mapped I/O "
5849 "cycles to the network device, attempting to recover. "
5850 "Please report the problem to the driver maintainer "
5851 "and include system chipset information.\n");
5853 spin_lock(&tp->lock);
5854 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5855 spin_unlock(&tp->lock);
5858 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5860 /* Tell compiler to fetch tx indices from memory. */
5861 barrier();
5862 return tnapi->tx_pending -
5863 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5866 /* Tigon3 never reports partial packet sends. So we do not
5867 * need special logic to handle SKBs that have not had all
5868 * of their frags sent yet, like SunGEM does.
5870 static void tg3_tx(struct tg3_napi *tnapi)
5872 struct tg3 *tp = tnapi->tp;
5873 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5874 u32 sw_idx = tnapi->tx_cons;
5875 struct netdev_queue *txq;
5876 int index = tnapi - tp->napi;
5877 unsigned int pkts_compl = 0, bytes_compl = 0;
5879 if (tg3_flag(tp, ENABLE_TSS))
5880 index--;
5882 txq = netdev_get_tx_queue(tp->dev, index);
5884 while (sw_idx != hw_idx) {
5885 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5886 struct sk_buff *skb = ri->skb;
5887 int i, tx_bug = 0;
5889 if (unlikely(skb == NULL)) {
5890 tg3_tx_recover(tp);
5891 return;
5894 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5895 struct skb_shared_hwtstamps timestamp;
5896 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5897 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5899 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5901 skb_tstamp_tx(skb, &timestamp);
5904 pci_unmap_single(tp->pdev,
5905 dma_unmap_addr(ri, mapping),
5906 skb_headlen(skb),
5907 PCI_DMA_TODEVICE);
5909 ri->skb = NULL;
5911 while (ri->fragmented) {
5912 ri->fragmented = false;
5913 sw_idx = NEXT_TX(sw_idx);
5914 ri = &tnapi->tx_buffers[sw_idx];
5917 sw_idx = NEXT_TX(sw_idx);
5919 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5920 ri = &tnapi->tx_buffers[sw_idx];
5921 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5922 tx_bug = 1;
5924 pci_unmap_page(tp->pdev,
5925 dma_unmap_addr(ri, mapping),
5926 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5927 PCI_DMA_TODEVICE);
5929 while (ri->fragmented) {
5930 ri->fragmented = false;
5931 sw_idx = NEXT_TX(sw_idx);
5932 ri = &tnapi->tx_buffers[sw_idx];
5935 sw_idx = NEXT_TX(sw_idx);
5938 pkts_compl++;
5939 bytes_compl += skb->len;
5941 dev_kfree_skb(skb);
5943 if (unlikely(tx_bug)) {
5944 tg3_tx_recover(tp);
5945 return;
5949 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5951 tnapi->tx_cons = sw_idx;
5953 /* Need to make the tx_cons update visible to tg3_start_xmit()
5954 * before checking for netif_queue_stopped(). Without the
5955 * memory barrier, there is a small possibility that tg3_start_xmit()
5956 * will miss it and cause the queue to be stopped forever.
5958 smp_mb();
5960 if (unlikely(netif_tx_queue_stopped(txq) &&
5961 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5962 __netif_tx_lock(txq, smp_processor_id());
5963 if (netif_tx_queue_stopped(txq) &&
5964 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5965 netif_tx_wake_queue(txq);
5966 __netif_tx_unlock(txq);
5970 static void tg3_frag_free(bool is_frag, void *data)
5972 if (is_frag)
5973 put_page(virt_to_head_page(data));
5974 else
5975 kfree(data);
5978 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5980 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5981 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5983 if (!ri->data)
5984 return;
5986 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5987 map_sz, PCI_DMA_FROMDEVICE);
5988 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5989 ri->data = NULL;
5993 /* Returns size of skb allocated or < 0 on error.
5995 * We only need to fill in the address because the other members
5996 * of the RX descriptor are invariant, see tg3_init_rings.
5998 * Note the purposeful assymetry of cpu vs. chip accesses. For
5999 * posting buffers we only dirty the first cache line of the RX
6000 * descriptor (containing the address). Whereas for the RX status
6001 * buffers the cpu only reads the last cacheline of the RX descriptor
6002 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6004 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6005 u32 opaque_key, u32 dest_idx_unmasked,
6006 unsigned int *frag_size)
6008 struct tg3_rx_buffer_desc *desc;
6009 struct ring_info *map;
6010 u8 *data;
6011 dma_addr_t mapping;
6012 int skb_size, data_size, dest_idx;
6014 switch (opaque_key) {
6015 case RXD_OPAQUE_RING_STD:
6016 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6017 desc = &tpr->rx_std[dest_idx];
6018 map = &tpr->rx_std_buffers[dest_idx];
6019 data_size = tp->rx_pkt_map_sz;
6020 break;
6022 case RXD_OPAQUE_RING_JUMBO:
6023 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6024 desc = &tpr->rx_jmb[dest_idx].std;
6025 map = &tpr->rx_jmb_buffers[dest_idx];
6026 data_size = TG3_RX_JMB_MAP_SZ;
6027 break;
6029 default:
6030 return -EINVAL;
6033 /* Do not overwrite any of the map or rp information
6034 * until we are sure we can commit to a new buffer.
6036 * Callers depend upon this behavior and assume that
6037 * we leave everything unchanged if we fail.
6039 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6040 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6041 if (skb_size <= PAGE_SIZE) {
6042 data = netdev_alloc_frag(skb_size);
6043 *frag_size = skb_size;
6044 } else {
6045 data = kmalloc(skb_size, GFP_ATOMIC);
6046 *frag_size = 0;
6048 if (!data)
6049 return -ENOMEM;
6051 mapping = pci_map_single(tp->pdev,
6052 data + TG3_RX_OFFSET(tp),
6053 data_size,
6054 PCI_DMA_FROMDEVICE);
6055 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6056 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6057 return -EIO;
6060 map->data = data;
6061 dma_unmap_addr_set(map, mapping, mapping);
6063 desc->addr_hi = ((u64)mapping >> 32);
6064 desc->addr_lo = ((u64)mapping & 0xffffffff);
6066 return data_size;
6069 /* We only need to move over in the address because the other
6070 * members of the RX descriptor are invariant. See notes above
6071 * tg3_alloc_rx_data for full details.
6073 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6074 struct tg3_rx_prodring_set *dpr,
6075 u32 opaque_key, int src_idx,
6076 u32 dest_idx_unmasked)
6078 struct tg3 *tp = tnapi->tp;
6079 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6080 struct ring_info *src_map, *dest_map;
6081 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6082 int dest_idx;
6084 switch (opaque_key) {
6085 case RXD_OPAQUE_RING_STD:
6086 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6087 dest_desc = &dpr->rx_std[dest_idx];
6088 dest_map = &dpr->rx_std_buffers[dest_idx];
6089 src_desc = &spr->rx_std[src_idx];
6090 src_map = &spr->rx_std_buffers[src_idx];
6091 break;
6093 case RXD_OPAQUE_RING_JUMBO:
6094 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6095 dest_desc = &dpr->rx_jmb[dest_idx].std;
6096 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6097 src_desc = &spr->rx_jmb[src_idx].std;
6098 src_map = &spr->rx_jmb_buffers[src_idx];
6099 break;
6101 default:
6102 return;
6105 dest_map->data = src_map->data;
6106 dma_unmap_addr_set(dest_map, mapping,
6107 dma_unmap_addr(src_map, mapping));
6108 dest_desc->addr_hi = src_desc->addr_hi;
6109 dest_desc->addr_lo = src_desc->addr_lo;
6111 /* Ensure that the update to the skb happens after the physical
6112 * addresses have been transferred to the new BD location.
6114 smp_wmb();
6116 src_map->data = NULL;
6119 /* The RX ring scheme is composed of multiple rings which post fresh
6120 * buffers to the chip, and one special ring the chip uses to report
6121 * status back to the host.
6123 * The special ring reports the status of received packets to the
6124 * host. The chip does not write into the original descriptor the
6125 * RX buffer was obtained from. The chip simply takes the original
6126 * descriptor as provided by the host, updates the status and length
6127 * field, then writes this into the next status ring entry.
6129 * Each ring the host uses to post buffers to the chip is described
6130 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6131 * it is first placed into the on-chip ram. When the packet's length
6132 * is known, it walks down the TG3_BDINFO entries to select the ring.
6133 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6134 * which is within the range of the new packet's length is chosen.
6136 * The "separate ring for rx status" scheme may sound queer, but it makes
6137 * sense from a cache coherency perspective. If only the host writes
6138 * to the buffer post rings, and only the chip writes to the rx status
6139 * rings, then cache lines never move beyond shared-modified state.
6140 * If both the host and chip were to write into the same ring, cache line
6141 * eviction could occur since both entities want it in an exclusive state.
6143 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6145 struct tg3 *tp = tnapi->tp;
6146 u32 work_mask, rx_std_posted = 0;
6147 u32 std_prod_idx, jmb_prod_idx;
6148 u32 sw_idx = tnapi->rx_rcb_ptr;
6149 u16 hw_idx;
6150 int received;
6151 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6153 hw_idx = *(tnapi->rx_rcb_prod_idx);
6155 * We need to order the read of hw_idx and the read of
6156 * the opaque cookie.
6158 rmb();
6159 work_mask = 0;
6160 received = 0;
6161 std_prod_idx = tpr->rx_std_prod_idx;
6162 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6163 while (sw_idx != hw_idx && budget > 0) {
6164 struct ring_info *ri;
6165 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6166 unsigned int len;
6167 struct sk_buff *skb;
6168 dma_addr_t dma_addr;
6169 u32 opaque_key, desc_idx, *post_ptr;
6170 u8 *data;
6171 u64 tstamp = 0;
6173 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6174 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6175 if (opaque_key == RXD_OPAQUE_RING_STD) {
6176 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6177 dma_addr = dma_unmap_addr(ri, mapping);
6178 data = ri->data;
6179 post_ptr = &std_prod_idx;
6180 rx_std_posted++;
6181 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6182 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6183 dma_addr = dma_unmap_addr(ri, mapping);
6184 data = ri->data;
6185 post_ptr = &jmb_prod_idx;
6186 } else
6187 goto next_pkt_nopost;
6189 work_mask |= opaque_key;
6191 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6192 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6193 drop_it:
6194 tg3_recycle_rx(tnapi, tpr, opaque_key,
6195 desc_idx, *post_ptr);
6196 drop_it_no_recycle:
6197 /* Other statistics kept track of by card. */
6198 tp->rx_dropped++;
6199 goto next_pkt;
6202 prefetch(data + TG3_RX_OFFSET(tp));
6203 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6204 ETH_FCS_LEN;
6206 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6207 RXD_FLAG_PTPSTAT_PTPV1 ||
6208 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6209 RXD_FLAG_PTPSTAT_PTPV2) {
6210 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6211 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6214 if (len > TG3_RX_COPY_THRESH(tp)) {
6215 int skb_size;
6216 unsigned int frag_size;
6218 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6219 *post_ptr, &frag_size);
6220 if (skb_size < 0)
6221 goto drop_it;
6223 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6224 PCI_DMA_FROMDEVICE);
6226 skb = build_skb(data, frag_size);
6227 if (!skb) {
6228 tg3_frag_free(frag_size != 0, data);
6229 goto drop_it_no_recycle;
6231 skb_reserve(skb, TG3_RX_OFFSET(tp));
6232 /* Ensure that the update to the data happens
6233 * after the usage of the old DMA mapping.
6235 smp_wmb();
6237 ri->data = NULL;
6239 } else {
6240 tg3_recycle_rx(tnapi, tpr, opaque_key,
6241 desc_idx, *post_ptr);
6243 skb = netdev_alloc_skb(tp->dev,
6244 len + TG3_RAW_IP_ALIGN);
6245 if (skb == NULL)
6246 goto drop_it_no_recycle;
6248 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6249 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6250 memcpy(skb->data,
6251 data + TG3_RX_OFFSET(tp),
6252 len);
6253 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6256 skb_put(skb, len);
6257 if (tstamp)
6258 tg3_hwclock_to_timestamp(tp, tstamp,
6259 skb_hwtstamps(skb));
6261 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6262 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6263 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6264 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6265 skb->ip_summed = CHECKSUM_UNNECESSARY;
6266 else
6267 skb_checksum_none_assert(skb);
6269 skb->protocol = eth_type_trans(skb, tp->dev);
6271 if (len > (tp->dev->mtu + ETH_HLEN) &&
6272 skb->protocol != htons(ETH_P_8021Q)) {
6273 dev_kfree_skb(skb);
6274 goto drop_it_no_recycle;
6277 if (desc->type_flags & RXD_FLAG_VLAN &&
6278 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6279 __vlan_hwaccel_put_tag(skb,
6280 desc->err_vlan & RXD_VLAN_MASK);
6282 napi_gro_receive(&tnapi->napi, skb);
6284 received++;
6285 budget--;
6287 next_pkt:
6288 (*post_ptr)++;
6290 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6291 tpr->rx_std_prod_idx = std_prod_idx &
6292 tp->rx_std_ring_mask;
6293 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6294 tpr->rx_std_prod_idx);
6295 work_mask &= ~RXD_OPAQUE_RING_STD;
6296 rx_std_posted = 0;
6298 next_pkt_nopost:
6299 sw_idx++;
6300 sw_idx &= tp->rx_ret_ring_mask;
6302 /* Refresh hw_idx to see if there is new work */
6303 if (sw_idx == hw_idx) {
6304 hw_idx = *(tnapi->rx_rcb_prod_idx);
6305 rmb();
6309 /* ACK the status ring. */
6310 tnapi->rx_rcb_ptr = sw_idx;
6311 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6313 /* Refill RX ring(s). */
6314 if (!tg3_flag(tp, ENABLE_RSS)) {
6315 /* Sync BD data before updating mailbox */
6316 wmb();
6318 if (work_mask & RXD_OPAQUE_RING_STD) {
6319 tpr->rx_std_prod_idx = std_prod_idx &
6320 tp->rx_std_ring_mask;
6321 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6322 tpr->rx_std_prod_idx);
6324 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6325 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6326 tp->rx_jmb_ring_mask;
6327 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6328 tpr->rx_jmb_prod_idx);
6330 mmiowb();
6331 } else if (work_mask) {
6332 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6333 * updated before the producer indices can be updated.
6335 smp_wmb();
6337 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6338 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6340 if (tnapi != &tp->napi[1]) {
6341 tp->rx_refill = true;
6342 napi_schedule(&tp->napi[1].napi);
6346 return received;
6349 static void tg3_poll_link(struct tg3 *tp)
6351 /* handle link change and other phy events */
6352 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6353 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6355 if (sblk->status & SD_STATUS_LINK_CHG) {
6356 sblk->status = SD_STATUS_UPDATED |
6357 (sblk->status & ~SD_STATUS_LINK_CHG);
6358 spin_lock(&tp->lock);
6359 if (tg3_flag(tp, USE_PHYLIB)) {
6360 tw32_f(MAC_STATUS,
6361 (MAC_STATUS_SYNC_CHANGED |
6362 MAC_STATUS_CFG_CHANGED |
6363 MAC_STATUS_MI_COMPLETION |
6364 MAC_STATUS_LNKSTATE_CHANGED));
6365 udelay(40);
6366 } else
6367 tg3_setup_phy(tp, 0);
6368 spin_unlock(&tp->lock);
6373 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6374 struct tg3_rx_prodring_set *dpr,
6375 struct tg3_rx_prodring_set *spr)
6377 u32 si, di, cpycnt, src_prod_idx;
6378 int i, err = 0;
6380 while (1) {
6381 src_prod_idx = spr->rx_std_prod_idx;
6383 /* Make sure updates to the rx_std_buffers[] entries and the
6384 * standard producer index are seen in the correct order.
6386 smp_rmb();
6388 if (spr->rx_std_cons_idx == src_prod_idx)
6389 break;
6391 if (spr->rx_std_cons_idx < src_prod_idx)
6392 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6393 else
6394 cpycnt = tp->rx_std_ring_mask + 1 -
6395 spr->rx_std_cons_idx;
6397 cpycnt = min(cpycnt,
6398 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6400 si = spr->rx_std_cons_idx;
6401 di = dpr->rx_std_prod_idx;
6403 for (i = di; i < di + cpycnt; i++) {
6404 if (dpr->rx_std_buffers[i].data) {
6405 cpycnt = i - di;
6406 err = -ENOSPC;
6407 break;
6411 if (!cpycnt)
6412 break;
6414 /* Ensure that updates to the rx_std_buffers ring and the
6415 * shadowed hardware producer ring from tg3_recycle_skb() are
6416 * ordered correctly WRT the skb check above.
6418 smp_rmb();
6420 memcpy(&dpr->rx_std_buffers[di],
6421 &spr->rx_std_buffers[si],
6422 cpycnt * sizeof(struct ring_info));
6424 for (i = 0; i < cpycnt; i++, di++, si++) {
6425 struct tg3_rx_buffer_desc *sbd, *dbd;
6426 sbd = &spr->rx_std[si];
6427 dbd = &dpr->rx_std[di];
6428 dbd->addr_hi = sbd->addr_hi;
6429 dbd->addr_lo = sbd->addr_lo;
6432 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6433 tp->rx_std_ring_mask;
6434 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6435 tp->rx_std_ring_mask;
6438 while (1) {
6439 src_prod_idx = spr->rx_jmb_prod_idx;
6441 /* Make sure updates to the rx_jmb_buffers[] entries and
6442 * the jumbo producer index are seen in the correct order.
6444 smp_rmb();
6446 if (spr->rx_jmb_cons_idx == src_prod_idx)
6447 break;
6449 if (spr->rx_jmb_cons_idx < src_prod_idx)
6450 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6451 else
6452 cpycnt = tp->rx_jmb_ring_mask + 1 -
6453 spr->rx_jmb_cons_idx;
6455 cpycnt = min(cpycnt,
6456 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6458 si = spr->rx_jmb_cons_idx;
6459 di = dpr->rx_jmb_prod_idx;
6461 for (i = di; i < di + cpycnt; i++) {
6462 if (dpr->rx_jmb_buffers[i].data) {
6463 cpycnt = i - di;
6464 err = -ENOSPC;
6465 break;
6469 if (!cpycnt)
6470 break;
6472 /* Ensure that updates to the rx_jmb_buffers ring and the
6473 * shadowed hardware producer ring from tg3_recycle_skb() are
6474 * ordered correctly WRT the skb check above.
6476 smp_rmb();
6478 memcpy(&dpr->rx_jmb_buffers[di],
6479 &spr->rx_jmb_buffers[si],
6480 cpycnt * sizeof(struct ring_info));
6482 for (i = 0; i < cpycnt; i++, di++, si++) {
6483 struct tg3_rx_buffer_desc *sbd, *dbd;
6484 sbd = &spr->rx_jmb[si].std;
6485 dbd = &dpr->rx_jmb[di].std;
6486 dbd->addr_hi = sbd->addr_hi;
6487 dbd->addr_lo = sbd->addr_lo;
6490 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6491 tp->rx_jmb_ring_mask;
6492 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6493 tp->rx_jmb_ring_mask;
6496 return err;
6499 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6501 struct tg3 *tp = tnapi->tp;
6503 /* run TX completion thread */
6504 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6505 tg3_tx(tnapi);
6506 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6507 return work_done;
6510 if (!tnapi->rx_rcb_prod_idx)
6511 return work_done;
6513 /* run RX thread, within the bounds set by NAPI.
6514 * All RX "locking" is done by ensuring outside
6515 * code synchronizes with tg3->napi.poll()
6517 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6518 work_done += tg3_rx(tnapi, budget - work_done);
6520 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6521 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6522 int i, err = 0;
6523 u32 std_prod_idx = dpr->rx_std_prod_idx;
6524 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6526 tp->rx_refill = false;
6527 for (i = 1; i <= tp->rxq_cnt; i++)
6528 err |= tg3_rx_prodring_xfer(tp, dpr,
6529 &tp->napi[i].prodring);
6531 wmb();
6533 if (std_prod_idx != dpr->rx_std_prod_idx)
6534 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6535 dpr->rx_std_prod_idx);
6537 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6538 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6539 dpr->rx_jmb_prod_idx);
6541 mmiowb();
6543 if (err)
6544 tw32_f(HOSTCC_MODE, tp->coal_now);
6547 return work_done;
6550 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6552 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6553 schedule_work(&tp->reset_task);
6556 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6558 cancel_work_sync(&tp->reset_task);
6559 tg3_flag_clear(tp, RESET_TASK_PENDING);
6560 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6563 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6565 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6566 struct tg3 *tp = tnapi->tp;
6567 int work_done = 0;
6568 struct tg3_hw_status *sblk = tnapi->hw_status;
6570 while (1) {
6571 work_done = tg3_poll_work(tnapi, work_done, budget);
6573 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6574 goto tx_recovery;
6576 if (unlikely(work_done >= budget))
6577 break;
6579 /* tp->last_tag is used in tg3_int_reenable() below
6580 * to tell the hw how much work has been processed,
6581 * so we must read it before checking for more work.
6583 tnapi->last_tag = sblk->status_tag;
6584 tnapi->last_irq_tag = tnapi->last_tag;
6585 rmb();
6587 /* check for RX/TX work to do */
6588 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6589 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6591 /* This test here is not race free, but will reduce
6592 * the number of interrupts by looping again.
6594 if (tnapi == &tp->napi[1] && tp->rx_refill)
6595 continue;
6597 napi_complete(napi);
6598 /* Reenable interrupts. */
6599 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6601 /* This test here is synchronized by napi_schedule()
6602 * and napi_complete() to close the race condition.
6604 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6605 tw32(HOSTCC_MODE, tp->coalesce_mode |
6606 HOSTCC_MODE_ENABLE |
6607 tnapi->coal_now);
6609 mmiowb();
6610 break;
6614 return work_done;
6616 tx_recovery:
6617 /* work_done is guaranteed to be less than budget. */
6618 napi_complete(napi);
6619 tg3_reset_task_schedule(tp);
6620 return work_done;
6623 static void tg3_process_error(struct tg3 *tp)
6625 u32 val;
6626 bool real_error = false;
6628 if (tg3_flag(tp, ERROR_PROCESSED))
6629 return;
6631 /* Check Flow Attention register */
6632 val = tr32(HOSTCC_FLOW_ATTN);
6633 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6634 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6635 real_error = true;
6638 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6639 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6640 real_error = true;
6643 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6644 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6645 real_error = true;
6648 if (!real_error)
6649 return;
6651 tg3_dump_state(tp);
6653 tg3_flag_set(tp, ERROR_PROCESSED);
6654 tg3_reset_task_schedule(tp);
6657 static int tg3_poll(struct napi_struct *napi, int budget)
6659 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6660 struct tg3 *tp = tnapi->tp;
6661 int work_done = 0;
6662 struct tg3_hw_status *sblk = tnapi->hw_status;
6664 while (1) {
6665 if (sblk->status & SD_STATUS_ERROR)
6666 tg3_process_error(tp);
6668 tg3_poll_link(tp);
6670 work_done = tg3_poll_work(tnapi, work_done, budget);
6672 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6673 goto tx_recovery;
6675 if (unlikely(work_done >= budget))
6676 break;
6678 if (tg3_flag(tp, TAGGED_STATUS)) {
6679 /* tp->last_tag is used in tg3_int_reenable() below
6680 * to tell the hw how much work has been processed,
6681 * so we must read it before checking for more work.
6683 tnapi->last_tag = sblk->status_tag;
6684 tnapi->last_irq_tag = tnapi->last_tag;
6685 rmb();
6686 } else
6687 sblk->status &= ~SD_STATUS_UPDATED;
6689 if (likely(!tg3_has_work(tnapi))) {
6690 napi_complete(napi);
6691 tg3_int_reenable(tnapi);
6692 break;
6696 return work_done;
6698 tx_recovery:
6699 /* work_done is guaranteed to be less than budget. */
6700 napi_complete(napi);
6701 tg3_reset_task_schedule(tp);
6702 return work_done;
6705 static void tg3_napi_disable(struct tg3 *tp)
6707 int i;
6709 for (i = tp->irq_cnt - 1; i >= 0; i--)
6710 napi_disable(&tp->napi[i].napi);
6713 static void tg3_napi_enable(struct tg3 *tp)
6715 int i;
6717 for (i = 0; i < tp->irq_cnt; i++)
6718 napi_enable(&tp->napi[i].napi);
6721 static void tg3_napi_init(struct tg3 *tp)
6723 int i;
6725 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6726 for (i = 1; i < tp->irq_cnt; i++)
6727 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6730 static void tg3_napi_fini(struct tg3 *tp)
6732 int i;
6734 for (i = 0; i < tp->irq_cnt; i++)
6735 netif_napi_del(&tp->napi[i].napi);
6738 static inline void tg3_netif_stop(struct tg3 *tp)
6740 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6741 tg3_napi_disable(tp);
6742 netif_carrier_off(tp->dev);
6743 netif_tx_disable(tp->dev);
6746 /* tp->lock must be held */
6747 static inline void tg3_netif_start(struct tg3 *tp)
6749 tg3_ptp_resume(tp);
6751 /* NOTE: unconditional netif_tx_wake_all_queues is only
6752 * appropriate so long as all callers are assured to
6753 * have free tx slots (such as after tg3_init_hw)
6755 netif_tx_wake_all_queues(tp->dev);
6757 if (tp->link_up)
6758 netif_carrier_on(tp->dev);
6760 tg3_napi_enable(tp);
6761 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6762 tg3_enable_ints(tp);
6765 static void tg3_irq_quiesce(struct tg3 *tp)
6767 int i;
6769 BUG_ON(tp->irq_sync);
6771 tp->irq_sync = 1;
6772 smp_mb();
6774 for (i = 0; i < tp->irq_cnt; i++)
6775 synchronize_irq(tp->napi[i].irq_vec);
6778 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6779 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6780 * with as well. Most of the time, this is not necessary except when
6781 * shutting down the device.
6783 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6785 spin_lock_bh(&tp->lock);
6786 if (irq_sync)
6787 tg3_irq_quiesce(tp);
6790 static inline void tg3_full_unlock(struct tg3 *tp)
6792 spin_unlock_bh(&tp->lock);
6795 /* One-shot MSI handler - Chip automatically disables interrupt
6796 * after sending MSI so driver doesn't have to do it.
6798 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6800 struct tg3_napi *tnapi = dev_id;
6801 struct tg3 *tp = tnapi->tp;
6803 prefetch(tnapi->hw_status);
6804 if (tnapi->rx_rcb)
6805 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6807 if (likely(!tg3_irq_sync(tp)))
6808 napi_schedule(&tnapi->napi);
6810 return IRQ_HANDLED;
6813 /* MSI ISR - No need to check for interrupt sharing and no need to
6814 * flush status block and interrupt mailbox. PCI ordering rules
6815 * guarantee that MSI will arrive after the status block.
6817 static irqreturn_t tg3_msi(int irq, void *dev_id)
6819 struct tg3_napi *tnapi = dev_id;
6820 struct tg3 *tp = tnapi->tp;
6822 prefetch(tnapi->hw_status);
6823 if (tnapi->rx_rcb)
6824 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6826 * Writing any value to intr-mbox-0 clears PCI INTA# and
6827 * chip-internal interrupt pending events.
6828 * Writing non-zero to intr-mbox-0 additional tells the
6829 * NIC to stop sending us irqs, engaging "in-intr-handler"
6830 * event coalescing.
6832 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6833 if (likely(!tg3_irq_sync(tp)))
6834 napi_schedule(&tnapi->napi);
6836 return IRQ_RETVAL(1);
6839 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6841 struct tg3_napi *tnapi = dev_id;
6842 struct tg3 *tp = tnapi->tp;
6843 struct tg3_hw_status *sblk = tnapi->hw_status;
6844 unsigned int handled = 1;
6846 /* In INTx mode, it is possible for the interrupt to arrive at
6847 * the CPU before the status block posted prior to the interrupt.
6848 * Reading the PCI State register will confirm whether the
6849 * interrupt is ours and will flush the status block.
6851 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6852 if (tg3_flag(tp, CHIP_RESETTING) ||
6853 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6854 handled = 0;
6855 goto out;
6860 * Writing any value to intr-mbox-0 clears PCI INTA# and
6861 * chip-internal interrupt pending events.
6862 * Writing non-zero to intr-mbox-0 additional tells the
6863 * NIC to stop sending us irqs, engaging "in-intr-handler"
6864 * event coalescing.
6866 * Flush the mailbox to de-assert the IRQ immediately to prevent
6867 * spurious interrupts. The flush impacts performance but
6868 * excessive spurious interrupts can be worse in some cases.
6870 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6871 if (tg3_irq_sync(tp))
6872 goto out;
6873 sblk->status &= ~SD_STATUS_UPDATED;
6874 if (likely(tg3_has_work(tnapi))) {
6875 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6876 napi_schedule(&tnapi->napi);
6877 } else {
6878 /* No work, shared interrupt perhaps? re-enable
6879 * interrupts, and flush that PCI write
6881 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6882 0x00000000);
6884 out:
6885 return IRQ_RETVAL(handled);
6888 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6890 struct tg3_napi *tnapi = dev_id;
6891 struct tg3 *tp = tnapi->tp;
6892 struct tg3_hw_status *sblk = tnapi->hw_status;
6893 unsigned int handled = 1;
6895 /* In INTx mode, it is possible for the interrupt to arrive at
6896 * the CPU before the status block posted prior to the interrupt.
6897 * Reading the PCI State register will confirm whether the
6898 * interrupt is ours and will flush the status block.
6900 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6901 if (tg3_flag(tp, CHIP_RESETTING) ||
6902 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6903 handled = 0;
6904 goto out;
6909 * writing any value to intr-mbox-0 clears PCI INTA# and
6910 * chip-internal interrupt pending events.
6911 * writing non-zero to intr-mbox-0 additional tells the
6912 * NIC to stop sending us irqs, engaging "in-intr-handler"
6913 * event coalescing.
6915 * Flush the mailbox to de-assert the IRQ immediately to prevent
6916 * spurious interrupts. The flush impacts performance but
6917 * excessive spurious interrupts can be worse in some cases.
6919 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6922 * In a shared interrupt configuration, sometimes other devices'
6923 * interrupts will scream. We record the current status tag here
6924 * so that the above check can report that the screaming interrupts
6925 * are unhandled. Eventually they will be silenced.
6927 tnapi->last_irq_tag = sblk->status_tag;
6929 if (tg3_irq_sync(tp))
6930 goto out;
6932 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6934 napi_schedule(&tnapi->napi);
6936 out:
6937 return IRQ_RETVAL(handled);
6940 /* ISR for interrupt test */
6941 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6943 struct tg3_napi *tnapi = dev_id;
6944 struct tg3 *tp = tnapi->tp;
6945 struct tg3_hw_status *sblk = tnapi->hw_status;
6947 if ((sblk->status & SD_STATUS_UPDATED) ||
6948 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6949 tg3_disable_ints(tp);
6950 return IRQ_RETVAL(1);
6952 return IRQ_RETVAL(0);
6955 #ifdef CONFIG_NET_POLL_CONTROLLER
6956 static void tg3_poll_controller(struct net_device *dev)
6958 int i;
6959 struct tg3 *tp = netdev_priv(dev);
6961 for (i = 0; i < tp->irq_cnt; i++)
6962 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6964 #endif
6966 static void tg3_tx_timeout(struct net_device *dev)
6968 struct tg3 *tp = netdev_priv(dev);
6970 if (netif_msg_tx_err(tp)) {
6971 netdev_err(dev, "transmit timed out, resetting\n");
6972 tg3_dump_state(tp);
6975 tg3_reset_task_schedule(tp);
6978 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6979 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6981 u32 base = (u32) mapping & 0xffffffff;
6983 return (base > 0xffffdcc0) && (base + len + 8 < base);
6986 /* Test for DMA addresses > 40-bit */
6987 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6988 int len)
6990 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6991 if (tg3_flag(tp, 40BIT_DMA_BUG))
6992 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6993 return 0;
6994 #else
6995 return 0;
6996 #endif
6999 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7000 dma_addr_t mapping, u32 len, u32 flags,
7001 u32 mss, u32 vlan)
7003 txbd->addr_hi = ((u64) mapping >> 32);
7004 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7005 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7006 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7009 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7010 dma_addr_t map, u32 len, u32 flags,
7011 u32 mss, u32 vlan)
7013 struct tg3 *tp = tnapi->tp;
7014 bool hwbug = false;
7016 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7017 hwbug = true;
7019 if (tg3_4g_overflow_test(map, len))
7020 hwbug = true;
7022 if (tg3_40bit_overflow_test(tp, map, len))
7023 hwbug = true;
7025 if (tp->dma_limit) {
7026 u32 prvidx = *entry;
7027 u32 tmp_flag = flags & ~TXD_FLAG_END;
7028 while (len > tp->dma_limit && *budget) {
7029 u32 frag_len = tp->dma_limit;
7030 len -= tp->dma_limit;
7032 /* Avoid the 8byte DMA problem */
7033 if (len <= 8) {
7034 len += tp->dma_limit / 2;
7035 frag_len = tp->dma_limit / 2;
7038 tnapi->tx_buffers[*entry].fragmented = true;
7040 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7041 frag_len, tmp_flag, mss, vlan);
7042 *budget -= 1;
7043 prvidx = *entry;
7044 *entry = NEXT_TX(*entry);
7046 map += frag_len;
7049 if (len) {
7050 if (*budget) {
7051 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7052 len, flags, mss, vlan);
7053 *budget -= 1;
7054 *entry = NEXT_TX(*entry);
7055 } else {
7056 hwbug = true;
7057 tnapi->tx_buffers[prvidx].fragmented = false;
7060 } else {
7061 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7062 len, flags, mss, vlan);
7063 *entry = NEXT_TX(*entry);
7066 return hwbug;
7069 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7071 int i;
7072 struct sk_buff *skb;
7073 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7075 skb = txb->skb;
7076 txb->skb = NULL;
7078 pci_unmap_single(tnapi->tp->pdev,
7079 dma_unmap_addr(txb, mapping),
7080 skb_headlen(skb),
7081 PCI_DMA_TODEVICE);
7083 while (txb->fragmented) {
7084 txb->fragmented = false;
7085 entry = NEXT_TX(entry);
7086 txb = &tnapi->tx_buffers[entry];
7089 for (i = 0; i <= last; i++) {
7090 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7092 entry = NEXT_TX(entry);
7093 txb = &tnapi->tx_buffers[entry];
7095 pci_unmap_page(tnapi->tp->pdev,
7096 dma_unmap_addr(txb, mapping),
7097 skb_frag_size(frag), PCI_DMA_TODEVICE);
7099 while (txb->fragmented) {
7100 txb->fragmented = false;
7101 entry = NEXT_TX(entry);
7102 txb = &tnapi->tx_buffers[entry];
7107 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7108 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7109 struct sk_buff **pskb,
7110 u32 *entry, u32 *budget,
7111 u32 base_flags, u32 mss, u32 vlan)
7113 struct tg3 *tp = tnapi->tp;
7114 struct sk_buff *new_skb, *skb = *pskb;
7115 dma_addr_t new_addr = 0;
7116 int ret = 0;
7118 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7119 new_skb = skb_copy(skb, GFP_ATOMIC);
7120 else {
7121 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7123 new_skb = skb_copy_expand(skb,
7124 skb_headroom(skb) + more_headroom,
7125 skb_tailroom(skb), GFP_ATOMIC);
7128 if (!new_skb) {
7129 ret = -1;
7130 } else {
7131 /* New SKB is guaranteed to be linear. */
7132 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7133 PCI_DMA_TODEVICE);
7134 /* Make sure the mapping succeeded */
7135 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7136 dev_kfree_skb(new_skb);
7137 ret = -1;
7138 } else {
7139 u32 save_entry = *entry;
7141 base_flags |= TXD_FLAG_END;
7143 tnapi->tx_buffers[*entry].skb = new_skb;
7144 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7145 mapping, new_addr);
7147 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7148 new_skb->len, base_flags,
7149 mss, vlan)) {
7150 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7151 dev_kfree_skb(new_skb);
7152 ret = -1;
7157 dev_kfree_skb(skb);
7158 *pskb = new_skb;
7159 return ret;
7162 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7164 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7165 * TSO header is greater than 80 bytes.
7167 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7169 struct sk_buff *segs, *nskb;
7170 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7172 /* Estimate the number of fragments in the worst case */
7173 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7174 netif_stop_queue(tp->dev);
7176 /* netif_tx_stop_queue() must be done before checking
7177 * checking tx index in tg3_tx_avail() below, because in
7178 * tg3_tx(), we update tx index before checking for
7179 * netif_tx_queue_stopped().
7181 smp_mb();
7182 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7183 return NETDEV_TX_BUSY;
7185 netif_wake_queue(tp->dev);
7188 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7189 if (IS_ERR(segs))
7190 goto tg3_tso_bug_end;
7192 do {
7193 nskb = segs;
7194 segs = segs->next;
7195 nskb->next = NULL;
7196 tg3_start_xmit(nskb, tp->dev);
7197 } while (segs);
7199 tg3_tso_bug_end:
7200 dev_kfree_skb(skb);
7202 return NETDEV_TX_OK;
7205 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7206 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7208 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7210 struct tg3 *tp = netdev_priv(dev);
7211 u32 len, entry, base_flags, mss, vlan = 0;
7212 u32 budget;
7213 int i = -1, would_hit_hwbug;
7214 dma_addr_t mapping;
7215 struct tg3_napi *tnapi;
7216 struct netdev_queue *txq;
7217 unsigned int last;
7219 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7220 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7221 if (tg3_flag(tp, ENABLE_TSS))
7222 tnapi++;
7224 budget = tg3_tx_avail(tnapi);
7226 /* We are running in BH disabled context with netif_tx_lock
7227 * and TX reclaim runs via tp->napi.poll inside of a software
7228 * interrupt. Furthermore, IRQ processing runs lockless so we have
7229 * no IRQ context deadlocks to worry about either. Rejoice!
7231 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7232 if (!netif_tx_queue_stopped(txq)) {
7233 netif_tx_stop_queue(txq);
7235 /* This is a hard error, log it. */
7236 netdev_err(dev,
7237 "BUG! Tx Ring full when queue awake!\n");
7239 return NETDEV_TX_BUSY;
7242 entry = tnapi->tx_prod;
7243 base_flags = 0;
7244 if (skb->ip_summed == CHECKSUM_PARTIAL)
7245 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7247 mss = skb_shinfo(skb)->gso_size;
7248 if (mss) {
7249 struct iphdr *iph;
7250 u32 tcp_opt_len, hdr_len;
7252 if (skb_header_cloned(skb) &&
7253 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7254 goto drop;
7256 iph = ip_hdr(skb);
7257 tcp_opt_len = tcp_optlen(skb);
7259 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7261 if (!skb_is_gso_v6(skb)) {
7262 iph->check = 0;
7263 iph->tot_len = htons(mss + hdr_len);
7266 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7267 tg3_flag(tp, TSO_BUG))
7268 return tg3_tso_bug(tp, skb);
7270 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7271 TXD_FLAG_CPU_POST_DMA);
7273 if (tg3_flag(tp, HW_TSO_1) ||
7274 tg3_flag(tp, HW_TSO_2) ||
7275 tg3_flag(tp, HW_TSO_3)) {
7276 tcp_hdr(skb)->check = 0;
7277 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7278 } else
7279 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7280 iph->daddr, 0,
7281 IPPROTO_TCP,
7284 if (tg3_flag(tp, HW_TSO_3)) {
7285 mss |= (hdr_len & 0xc) << 12;
7286 if (hdr_len & 0x10)
7287 base_flags |= 0x00000010;
7288 base_flags |= (hdr_len & 0x3e0) << 5;
7289 } else if (tg3_flag(tp, HW_TSO_2))
7290 mss |= hdr_len << 9;
7291 else if (tg3_flag(tp, HW_TSO_1) ||
7292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7293 if (tcp_opt_len || iph->ihl > 5) {
7294 int tsflags;
7296 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7297 mss |= (tsflags << 11);
7299 } else {
7300 if (tcp_opt_len || iph->ihl > 5) {
7301 int tsflags;
7303 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7304 base_flags |= tsflags << 12;
7309 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7310 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7311 base_flags |= TXD_FLAG_JMB_PKT;
7313 if (vlan_tx_tag_present(skb)) {
7314 base_flags |= TXD_FLAG_VLAN;
7315 vlan = vlan_tx_tag_get(skb);
7318 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7319 tg3_flag(tp, TX_TSTAMP_EN)) {
7320 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7321 base_flags |= TXD_FLAG_HWTSTAMP;
7324 len = skb_headlen(skb);
7326 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7327 if (pci_dma_mapping_error(tp->pdev, mapping))
7328 goto drop;
7331 tnapi->tx_buffers[entry].skb = skb;
7332 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7334 would_hit_hwbug = 0;
7336 if (tg3_flag(tp, 5701_DMA_BUG))
7337 would_hit_hwbug = 1;
7339 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7340 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7341 mss, vlan)) {
7342 would_hit_hwbug = 1;
7343 } else if (skb_shinfo(skb)->nr_frags > 0) {
7344 u32 tmp_mss = mss;
7346 if (!tg3_flag(tp, HW_TSO_1) &&
7347 !tg3_flag(tp, HW_TSO_2) &&
7348 !tg3_flag(tp, HW_TSO_3))
7349 tmp_mss = 0;
7351 /* Now loop through additional data
7352 * fragments, and queue them.
7354 last = skb_shinfo(skb)->nr_frags - 1;
7355 for (i = 0; i <= last; i++) {
7356 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7358 len = skb_frag_size(frag);
7359 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7360 len, DMA_TO_DEVICE);
7362 tnapi->tx_buffers[entry].skb = NULL;
7363 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7364 mapping);
7365 if (dma_mapping_error(&tp->pdev->dev, mapping))
7366 goto dma_error;
7368 if (!budget ||
7369 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7370 len, base_flags |
7371 ((i == last) ? TXD_FLAG_END : 0),
7372 tmp_mss, vlan)) {
7373 would_hit_hwbug = 1;
7374 break;
7379 if (would_hit_hwbug) {
7380 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7382 /* If the workaround fails due to memory/mapping
7383 * failure, silently drop this packet.
7385 entry = tnapi->tx_prod;
7386 budget = tg3_tx_avail(tnapi);
7387 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7388 base_flags, mss, vlan))
7389 goto drop_nofree;
7392 skb_tx_timestamp(skb);
7393 netdev_tx_sent_queue(txq, skb->len);
7395 /* Sync BD data before updating mailbox */
7396 wmb();
7398 /* Packets are ready, update Tx producer idx local and on card. */
7399 tw32_tx_mbox(tnapi->prodmbox, entry);
7401 tnapi->tx_prod = entry;
7402 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7403 netif_tx_stop_queue(txq);
7405 /* netif_tx_stop_queue() must be done before checking
7406 * checking tx index in tg3_tx_avail() below, because in
7407 * tg3_tx(), we update tx index before checking for
7408 * netif_tx_queue_stopped().
7410 smp_mb();
7411 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7412 netif_tx_wake_queue(txq);
7415 mmiowb();
7416 return NETDEV_TX_OK;
7418 dma_error:
7419 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7420 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7421 drop:
7422 dev_kfree_skb(skb);
7423 drop_nofree:
7424 tp->tx_dropped++;
7425 return NETDEV_TX_OK;
7428 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7430 if (enable) {
7431 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7432 MAC_MODE_PORT_MODE_MASK);
7434 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7436 if (!tg3_flag(tp, 5705_PLUS))
7437 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7439 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7440 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7441 else
7442 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7443 } else {
7444 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7446 if (tg3_flag(tp, 5705_PLUS) ||
7447 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7448 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7449 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7452 tw32(MAC_MODE, tp->mac_mode);
7453 udelay(40);
7456 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7458 u32 val, bmcr, mac_mode, ptest = 0;
7460 tg3_phy_toggle_apd(tp, false);
7461 tg3_phy_toggle_automdix(tp, 0);
7463 if (extlpbk && tg3_phy_set_extloopbk(tp))
7464 return -EIO;
7466 bmcr = BMCR_FULLDPLX;
7467 switch (speed) {
7468 case SPEED_10:
7469 break;
7470 case SPEED_100:
7471 bmcr |= BMCR_SPEED100;
7472 break;
7473 case SPEED_1000:
7474 default:
7475 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7476 speed = SPEED_100;
7477 bmcr |= BMCR_SPEED100;
7478 } else {
7479 speed = SPEED_1000;
7480 bmcr |= BMCR_SPEED1000;
7484 if (extlpbk) {
7485 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7486 tg3_readphy(tp, MII_CTRL1000, &val);
7487 val |= CTL1000_AS_MASTER |
7488 CTL1000_ENABLE_MASTER;
7489 tg3_writephy(tp, MII_CTRL1000, val);
7490 } else {
7491 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7492 MII_TG3_FET_PTEST_TRIM_2;
7493 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7495 } else
7496 bmcr |= BMCR_LOOPBACK;
7498 tg3_writephy(tp, MII_BMCR, bmcr);
7500 /* The write needs to be flushed for the FETs */
7501 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7502 tg3_readphy(tp, MII_BMCR, &bmcr);
7504 udelay(40);
7506 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7508 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7509 MII_TG3_FET_PTEST_FRC_TX_LINK |
7510 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7512 /* The write needs to be flushed for the AC131 */
7513 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7516 /* Reset to prevent losing 1st rx packet intermittently */
7517 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7518 tg3_flag(tp, 5780_CLASS)) {
7519 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7520 udelay(10);
7521 tw32_f(MAC_RX_MODE, tp->rx_mode);
7524 mac_mode = tp->mac_mode &
7525 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7526 if (speed == SPEED_1000)
7527 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7528 else
7529 mac_mode |= MAC_MODE_PORT_MODE_MII;
7531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7532 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7534 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7535 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7536 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7537 mac_mode |= MAC_MODE_LINK_POLARITY;
7539 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7540 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7543 tw32(MAC_MODE, mac_mode);
7544 udelay(40);
7546 return 0;
7549 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7551 struct tg3 *tp = netdev_priv(dev);
7553 if (features & NETIF_F_LOOPBACK) {
7554 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7555 return;
7557 spin_lock_bh(&tp->lock);
7558 tg3_mac_loopback(tp, true);
7559 netif_carrier_on(tp->dev);
7560 spin_unlock_bh(&tp->lock);
7561 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7562 } else {
7563 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7564 return;
7566 spin_lock_bh(&tp->lock);
7567 tg3_mac_loopback(tp, false);
7568 /* Force link status check */
7569 tg3_setup_phy(tp, 1);
7570 spin_unlock_bh(&tp->lock);
7571 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7575 static netdev_features_t tg3_fix_features(struct net_device *dev,
7576 netdev_features_t features)
7578 struct tg3 *tp = netdev_priv(dev);
7580 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7581 features &= ~NETIF_F_ALL_TSO;
7583 return features;
7586 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7588 netdev_features_t changed = dev->features ^ features;
7590 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7591 tg3_set_loopback(dev, features);
7593 return 0;
7596 static void tg3_rx_prodring_free(struct tg3 *tp,
7597 struct tg3_rx_prodring_set *tpr)
7599 int i;
7601 if (tpr != &tp->napi[0].prodring) {
7602 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7603 i = (i + 1) & tp->rx_std_ring_mask)
7604 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7605 tp->rx_pkt_map_sz);
7607 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7608 for (i = tpr->rx_jmb_cons_idx;
7609 i != tpr->rx_jmb_prod_idx;
7610 i = (i + 1) & tp->rx_jmb_ring_mask) {
7611 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7612 TG3_RX_JMB_MAP_SZ);
7616 return;
7619 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7620 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7621 tp->rx_pkt_map_sz);
7623 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7624 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7625 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7626 TG3_RX_JMB_MAP_SZ);
7630 /* Initialize rx rings for packet processing.
7632 * The chip has been shut down and the driver detached from
7633 * the networking, so no interrupts or new tx packets will
7634 * end up in the driver. tp->{tx,}lock are held and thus
7635 * we may not sleep.
7637 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7638 struct tg3_rx_prodring_set *tpr)
7640 u32 i, rx_pkt_dma_sz;
7642 tpr->rx_std_cons_idx = 0;
7643 tpr->rx_std_prod_idx = 0;
7644 tpr->rx_jmb_cons_idx = 0;
7645 tpr->rx_jmb_prod_idx = 0;
7647 if (tpr != &tp->napi[0].prodring) {
7648 memset(&tpr->rx_std_buffers[0], 0,
7649 TG3_RX_STD_BUFF_RING_SIZE(tp));
7650 if (tpr->rx_jmb_buffers)
7651 memset(&tpr->rx_jmb_buffers[0], 0,
7652 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7653 goto done;
7656 /* Zero out all descriptors. */
7657 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7659 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7660 if (tg3_flag(tp, 5780_CLASS) &&
7661 tp->dev->mtu > ETH_DATA_LEN)
7662 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7663 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7665 /* Initialize invariants of the rings, we only set this
7666 * stuff once. This works because the card does not
7667 * write into the rx buffer posting rings.
7669 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7670 struct tg3_rx_buffer_desc *rxd;
7672 rxd = &tpr->rx_std[i];
7673 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7674 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7675 rxd->opaque = (RXD_OPAQUE_RING_STD |
7676 (i << RXD_OPAQUE_INDEX_SHIFT));
7679 /* Now allocate fresh SKBs for each rx ring. */
7680 for (i = 0; i < tp->rx_pending; i++) {
7681 unsigned int frag_size;
7683 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7684 &frag_size) < 0) {
7685 netdev_warn(tp->dev,
7686 "Using a smaller RX standard ring. Only "
7687 "%d out of %d buffers were allocated "
7688 "successfully\n", i, tp->rx_pending);
7689 if (i == 0)
7690 goto initfail;
7691 tp->rx_pending = i;
7692 break;
7696 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7697 goto done;
7699 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7701 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7702 goto done;
7704 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7705 struct tg3_rx_buffer_desc *rxd;
7707 rxd = &tpr->rx_jmb[i].std;
7708 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7709 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7710 RXD_FLAG_JUMBO;
7711 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7712 (i << RXD_OPAQUE_INDEX_SHIFT));
7715 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7716 unsigned int frag_size;
7718 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7719 &frag_size) < 0) {
7720 netdev_warn(tp->dev,
7721 "Using a smaller RX jumbo ring. Only %d "
7722 "out of %d buffers were allocated "
7723 "successfully\n", i, tp->rx_jumbo_pending);
7724 if (i == 0)
7725 goto initfail;
7726 tp->rx_jumbo_pending = i;
7727 break;
7731 done:
7732 return 0;
7734 initfail:
7735 tg3_rx_prodring_free(tp, tpr);
7736 return -ENOMEM;
7739 static void tg3_rx_prodring_fini(struct tg3 *tp,
7740 struct tg3_rx_prodring_set *tpr)
7742 kfree(tpr->rx_std_buffers);
7743 tpr->rx_std_buffers = NULL;
7744 kfree(tpr->rx_jmb_buffers);
7745 tpr->rx_jmb_buffers = NULL;
7746 if (tpr->rx_std) {
7747 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7748 tpr->rx_std, tpr->rx_std_mapping);
7749 tpr->rx_std = NULL;
7751 if (tpr->rx_jmb) {
7752 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7753 tpr->rx_jmb, tpr->rx_jmb_mapping);
7754 tpr->rx_jmb = NULL;
7758 static int tg3_rx_prodring_init(struct tg3 *tp,
7759 struct tg3_rx_prodring_set *tpr)
7761 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7762 GFP_KERNEL);
7763 if (!tpr->rx_std_buffers)
7764 return -ENOMEM;
7766 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7767 TG3_RX_STD_RING_BYTES(tp),
7768 &tpr->rx_std_mapping,
7769 GFP_KERNEL);
7770 if (!tpr->rx_std)
7771 goto err_out;
7773 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7774 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7775 GFP_KERNEL);
7776 if (!tpr->rx_jmb_buffers)
7777 goto err_out;
7779 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7780 TG3_RX_JMB_RING_BYTES(tp),
7781 &tpr->rx_jmb_mapping,
7782 GFP_KERNEL);
7783 if (!tpr->rx_jmb)
7784 goto err_out;
7787 return 0;
7789 err_out:
7790 tg3_rx_prodring_fini(tp, tpr);
7791 return -ENOMEM;
7794 /* Free up pending packets in all rx/tx rings.
7796 * The chip has been shut down and the driver detached from
7797 * the networking, so no interrupts or new tx packets will
7798 * end up in the driver. tp->{tx,}lock is not held and we are not
7799 * in an interrupt context and thus may sleep.
7801 static void tg3_free_rings(struct tg3 *tp)
7803 int i, j;
7805 for (j = 0; j < tp->irq_cnt; j++) {
7806 struct tg3_napi *tnapi = &tp->napi[j];
7808 tg3_rx_prodring_free(tp, &tnapi->prodring);
7810 if (!tnapi->tx_buffers)
7811 continue;
7813 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7814 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7816 if (!skb)
7817 continue;
7819 tg3_tx_skb_unmap(tnapi, i,
7820 skb_shinfo(skb)->nr_frags - 1);
7822 dev_kfree_skb_any(skb);
7824 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7828 /* Initialize tx/rx rings for packet processing.
7830 * The chip has been shut down and the driver detached from
7831 * the networking, so no interrupts or new tx packets will
7832 * end up in the driver. tp->{tx,}lock are held and thus
7833 * we may not sleep.
7835 static int tg3_init_rings(struct tg3 *tp)
7837 int i;
7839 /* Free up all the SKBs. */
7840 tg3_free_rings(tp);
7842 for (i = 0; i < tp->irq_cnt; i++) {
7843 struct tg3_napi *tnapi = &tp->napi[i];
7845 tnapi->last_tag = 0;
7846 tnapi->last_irq_tag = 0;
7847 tnapi->hw_status->status = 0;
7848 tnapi->hw_status->status_tag = 0;
7849 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7851 tnapi->tx_prod = 0;
7852 tnapi->tx_cons = 0;
7853 if (tnapi->tx_ring)
7854 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7856 tnapi->rx_rcb_ptr = 0;
7857 if (tnapi->rx_rcb)
7858 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7860 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7861 tg3_free_rings(tp);
7862 return -ENOMEM;
7866 return 0;
7869 static void tg3_mem_tx_release(struct tg3 *tp)
7871 int i;
7873 for (i = 0; i < tp->irq_max; i++) {
7874 struct tg3_napi *tnapi = &tp->napi[i];
7876 if (tnapi->tx_ring) {
7877 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7878 tnapi->tx_ring, tnapi->tx_desc_mapping);
7879 tnapi->tx_ring = NULL;
7882 kfree(tnapi->tx_buffers);
7883 tnapi->tx_buffers = NULL;
7887 static int tg3_mem_tx_acquire(struct tg3 *tp)
7889 int i;
7890 struct tg3_napi *tnapi = &tp->napi[0];
7892 /* If multivector TSS is enabled, vector 0 does not handle
7893 * tx interrupts. Don't allocate any resources for it.
7895 if (tg3_flag(tp, ENABLE_TSS))
7896 tnapi++;
7898 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7899 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7900 TG3_TX_RING_SIZE, GFP_KERNEL);
7901 if (!tnapi->tx_buffers)
7902 goto err_out;
7904 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7905 TG3_TX_RING_BYTES,
7906 &tnapi->tx_desc_mapping,
7907 GFP_KERNEL);
7908 if (!tnapi->tx_ring)
7909 goto err_out;
7912 return 0;
7914 err_out:
7915 tg3_mem_tx_release(tp);
7916 return -ENOMEM;
7919 static void tg3_mem_rx_release(struct tg3 *tp)
7921 int i;
7923 for (i = 0; i < tp->irq_max; i++) {
7924 struct tg3_napi *tnapi = &tp->napi[i];
7926 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7928 if (!tnapi->rx_rcb)
7929 continue;
7931 dma_free_coherent(&tp->pdev->dev,
7932 TG3_RX_RCB_RING_BYTES(tp),
7933 tnapi->rx_rcb,
7934 tnapi->rx_rcb_mapping);
7935 tnapi->rx_rcb = NULL;
7939 static int tg3_mem_rx_acquire(struct tg3 *tp)
7941 unsigned int i, limit;
7943 limit = tp->rxq_cnt;
7945 /* If RSS is enabled, we need a (dummy) producer ring
7946 * set on vector zero. This is the true hw prodring.
7948 if (tg3_flag(tp, ENABLE_RSS))
7949 limit++;
7951 for (i = 0; i < limit; i++) {
7952 struct tg3_napi *tnapi = &tp->napi[i];
7954 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7955 goto err_out;
7957 /* If multivector RSS is enabled, vector 0
7958 * does not handle rx or tx interrupts.
7959 * Don't allocate any resources for it.
7961 if (!i && tg3_flag(tp, ENABLE_RSS))
7962 continue;
7964 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7965 TG3_RX_RCB_RING_BYTES(tp),
7966 &tnapi->rx_rcb_mapping,
7967 GFP_KERNEL);
7968 if (!tnapi->rx_rcb)
7969 goto err_out;
7971 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7974 return 0;
7976 err_out:
7977 tg3_mem_rx_release(tp);
7978 return -ENOMEM;
7982 * Must not be invoked with interrupt sources disabled and
7983 * the hardware shutdown down.
7985 static void tg3_free_consistent(struct tg3 *tp)
7987 int i;
7989 for (i = 0; i < tp->irq_cnt; i++) {
7990 struct tg3_napi *tnapi = &tp->napi[i];
7992 if (tnapi->hw_status) {
7993 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7994 tnapi->hw_status,
7995 tnapi->status_mapping);
7996 tnapi->hw_status = NULL;
8000 tg3_mem_rx_release(tp);
8001 tg3_mem_tx_release(tp);
8003 if (tp->hw_stats) {
8004 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8005 tp->hw_stats, tp->stats_mapping);
8006 tp->hw_stats = NULL;
8011 * Must not be invoked with interrupt sources disabled and
8012 * the hardware shutdown down. Can sleep.
8014 static int tg3_alloc_consistent(struct tg3 *tp)
8016 int i;
8018 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8019 sizeof(struct tg3_hw_stats),
8020 &tp->stats_mapping,
8021 GFP_KERNEL);
8022 if (!tp->hw_stats)
8023 goto err_out;
8025 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8027 for (i = 0; i < tp->irq_cnt; i++) {
8028 struct tg3_napi *tnapi = &tp->napi[i];
8029 struct tg3_hw_status *sblk;
8031 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8032 TG3_HW_STATUS_SIZE,
8033 &tnapi->status_mapping,
8034 GFP_KERNEL);
8035 if (!tnapi->hw_status)
8036 goto err_out;
8038 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8039 sblk = tnapi->hw_status;
8041 if (tg3_flag(tp, ENABLE_RSS)) {
8042 u16 *prodptr = NULL;
8045 * When RSS is enabled, the status block format changes
8046 * slightly. The "rx_jumbo_consumer", "reserved",
8047 * and "rx_mini_consumer" members get mapped to the
8048 * other three rx return ring producer indexes.
8050 switch (i) {
8051 case 1:
8052 prodptr = &sblk->idx[0].rx_producer;
8053 break;
8054 case 2:
8055 prodptr = &sblk->rx_jumbo_consumer;
8056 break;
8057 case 3:
8058 prodptr = &sblk->reserved;
8059 break;
8060 case 4:
8061 prodptr = &sblk->rx_mini_consumer;
8062 break;
8064 tnapi->rx_rcb_prod_idx = prodptr;
8065 } else {
8066 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8070 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8071 goto err_out;
8073 return 0;
8075 err_out:
8076 tg3_free_consistent(tp);
8077 return -ENOMEM;
8080 #define MAX_WAIT_CNT 1000
8082 /* To stop a block, clear the enable bit and poll till it
8083 * clears. tp->lock is held.
8085 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8087 unsigned int i;
8088 u32 val;
8090 if (tg3_flag(tp, 5705_PLUS)) {
8091 switch (ofs) {
8092 case RCVLSC_MODE:
8093 case DMAC_MODE:
8094 case MBFREE_MODE:
8095 case BUFMGR_MODE:
8096 case MEMARB_MODE:
8097 /* We can't enable/disable these bits of the
8098 * 5705/5750, just say success.
8100 return 0;
8102 default:
8103 break;
8107 val = tr32(ofs);
8108 val &= ~enable_bit;
8109 tw32_f(ofs, val);
8111 for (i = 0; i < MAX_WAIT_CNT; i++) {
8112 udelay(100);
8113 val = tr32(ofs);
8114 if ((val & enable_bit) == 0)
8115 break;
8118 if (i == MAX_WAIT_CNT && !silent) {
8119 dev_err(&tp->pdev->dev,
8120 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8121 ofs, enable_bit);
8122 return -ENODEV;
8125 return 0;
8128 /* tp->lock is held. */
8129 static int tg3_abort_hw(struct tg3 *tp, int silent)
8131 int i, err;
8133 tg3_disable_ints(tp);
8135 tp->rx_mode &= ~RX_MODE_ENABLE;
8136 tw32_f(MAC_RX_MODE, tp->rx_mode);
8137 udelay(10);
8139 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8140 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8141 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8142 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8143 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8144 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8146 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8147 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8148 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8149 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8150 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8151 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8152 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8154 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8155 tw32_f(MAC_MODE, tp->mac_mode);
8156 udelay(40);
8158 tp->tx_mode &= ~TX_MODE_ENABLE;
8159 tw32_f(MAC_TX_MODE, tp->tx_mode);
8161 for (i = 0; i < MAX_WAIT_CNT; i++) {
8162 udelay(100);
8163 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8164 break;
8166 if (i >= MAX_WAIT_CNT) {
8167 dev_err(&tp->pdev->dev,
8168 "%s timed out, TX_MODE_ENABLE will not clear "
8169 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8170 err |= -ENODEV;
8173 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8174 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8175 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8177 tw32(FTQ_RESET, 0xffffffff);
8178 tw32(FTQ_RESET, 0x00000000);
8180 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8181 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8183 for (i = 0; i < tp->irq_cnt; i++) {
8184 struct tg3_napi *tnapi = &tp->napi[i];
8185 if (tnapi->hw_status)
8186 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8189 return err;
8192 /* Save PCI command register before chip reset */
8193 static void tg3_save_pci_state(struct tg3 *tp)
8195 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8198 /* Restore PCI state after chip reset */
8199 static void tg3_restore_pci_state(struct tg3 *tp)
8201 u32 val;
8203 /* Re-enable indirect register accesses. */
8204 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8205 tp->misc_host_ctrl);
8207 /* Set MAX PCI retry to zero. */
8208 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8209 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8210 tg3_flag(tp, PCIX_MODE))
8211 val |= PCISTATE_RETRY_SAME_DMA;
8212 /* Allow reads and writes to the APE register and memory space. */
8213 if (tg3_flag(tp, ENABLE_APE))
8214 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8215 PCISTATE_ALLOW_APE_SHMEM_WR |
8216 PCISTATE_ALLOW_APE_PSPACE_WR;
8217 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8219 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8221 if (!tg3_flag(tp, PCI_EXPRESS)) {
8222 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8223 tp->pci_cacheline_sz);
8224 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8225 tp->pci_lat_timer);
8228 /* Make sure PCI-X relaxed ordering bit is clear. */
8229 if (tg3_flag(tp, PCIX_MODE)) {
8230 u16 pcix_cmd;
8232 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8233 &pcix_cmd);
8234 pcix_cmd &= ~PCI_X_CMD_ERO;
8235 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8236 pcix_cmd);
8239 if (tg3_flag(tp, 5780_CLASS)) {
8241 /* Chip reset on 5780 will reset MSI enable bit,
8242 * so need to restore it.
8244 if (tg3_flag(tp, USING_MSI)) {
8245 u16 ctrl;
8247 pci_read_config_word(tp->pdev,
8248 tp->msi_cap + PCI_MSI_FLAGS,
8249 &ctrl);
8250 pci_write_config_word(tp->pdev,
8251 tp->msi_cap + PCI_MSI_FLAGS,
8252 ctrl | PCI_MSI_FLAGS_ENABLE);
8253 val = tr32(MSGINT_MODE);
8254 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8259 /* tp->lock is held. */
8260 static int tg3_chip_reset(struct tg3 *tp)
8262 u32 val;
8263 void (*write_op)(struct tg3 *, u32, u32);
8264 int i, err;
8266 tg3_nvram_lock(tp);
8268 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8270 /* No matching tg3_nvram_unlock() after this because
8271 * chip reset below will undo the nvram lock.
8273 tp->nvram_lock_cnt = 0;
8275 /* GRC_MISC_CFG core clock reset will clear the memory
8276 * enable bit in PCI register 4 and the MSI enable bit
8277 * on some chips, so we save relevant registers here.
8279 tg3_save_pci_state(tp);
8281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8282 tg3_flag(tp, 5755_PLUS))
8283 tw32(GRC_FASTBOOT_PC, 0);
8286 * We must avoid the readl() that normally takes place.
8287 * It locks machines, causes machine checks, and other
8288 * fun things. So, temporarily disable the 5701
8289 * hardware workaround, while we do the reset.
8291 write_op = tp->write32;
8292 if (write_op == tg3_write_flush_reg32)
8293 tp->write32 = tg3_write32;
8295 /* Prevent the irq handler from reading or writing PCI registers
8296 * during chip reset when the memory enable bit in the PCI command
8297 * register may be cleared. The chip does not generate interrupt
8298 * at this time, but the irq handler may still be called due to irq
8299 * sharing or irqpoll.
8301 tg3_flag_set(tp, CHIP_RESETTING);
8302 for (i = 0; i < tp->irq_cnt; i++) {
8303 struct tg3_napi *tnapi = &tp->napi[i];
8304 if (tnapi->hw_status) {
8305 tnapi->hw_status->status = 0;
8306 tnapi->hw_status->status_tag = 0;
8308 tnapi->last_tag = 0;
8309 tnapi->last_irq_tag = 0;
8311 smp_mb();
8313 for (i = 0; i < tp->irq_cnt; i++)
8314 synchronize_irq(tp->napi[i].irq_vec);
8316 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8317 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8318 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8321 /* do the reset */
8322 val = GRC_MISC_CFG_CORECLK_RESET;
8324 if (tg3_flag(tp, PCI_EXPRESS)) {
8325 /* Force PCIe 1.0a mode */
8326 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8327 !tg3_flag(tp, 57765_PLUS) &&
8328 tr32(TG3_PCIE_PHY_TSTCTL) ==
8329 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8330 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8332 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8333 tw32(GRC_MISC_CFG, (1 << 29));
8334 val |= (1 << 29);
8338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8339 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8340 tw32(GRC_VCPU_EXT_CTRL,
8341 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8344 /* Manage gphy power for all CPMU absent PCIe devices. */
8345 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8346 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8348 tw32(GRC_MISC_CFG, val);
8350 /* restore 5701 hardware bug workaround write method */
8351 tp->write32 = write_op;
8353 /* Unfortunately, we have to delay before the PCI read back.
8354 * Some 575X chips even will not respond to a PCI cfg access
8355 * when the reset command is given to the chip.
8357 * How do these hardware designers expect things to work
8358 * properly if the PCI write is posted for a long period
8359 * of time? It is always necessary to have some method by
8360 * which a register read back can occur to push the write
8361 * out which does the reset.
8363 * For most tg3 variants the trick below was working.
8364 * Ho hum...
8366 udelay(120);
8368 /* Flush PCI posted writes. The normal MMIO registers
8369 * are inaccessible at this time so this is the only
8370 * way to make this reliably (actually, this is no longer
8371 * the case, see above). I tried to use indirect
8372 * register read/write but this upset some 5701 variants.
8374 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8376 udelay(120);
8378 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8379 u16 val16;
8381 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8382 int j;
8383 u32 cfg_val;
8385 /* Wait for link training to complete. */
8386 for (j = 0; j < 5000; j++)
8387 udelay(100);
8389 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8390 pci_write_config_dword(tp->pdev, 0xc4,
8391 cfg_val | (1 << 15));
8394 /* Clear the "no snoop" and "relaxed ordering" bits. */
8395 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8397 * Older PCIe devices only support the 128 byte
8398 * MPS setting. Enforce the restriction.
8400 if (!tg3_flag(tp, CPMU_PRESENT))
8401 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8402 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8404 /* Clear error status */
8405 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8406 PCI_EXP_DEVSTA_CED |
8407 PCI_EXP_DEVSTA_NFED |
8408 PCI_EXP_DEVSTA_FED |
8409 PCI_EXP_DEVSTA_URD);
8412 tg3_restore_pci_state(tp);
8414 tg3_flag_clear(tp, CHIP_RESETTING);
8415 tg3_flag_clear(tp, ERROR_PROCESSED);
8417 val = 0;
8418 if (tg3_flag(tp, 5780_CLASS))
8419 val = tr32(MEMARB_MODE);
8420 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8422 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8423 tg3_stop_fw(tp);
8424 tw32(0x5000, 0x400);
8427 tw32(GRC_MODE, tp->grc_mode);
8429 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8430 val = tr32(0xc4);
8432 tw32(0xc4, val | (1 << 15));
8435 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8437 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8438 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8439 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8440 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8443 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8444 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8445 val = tp->mac_mode;
8446 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8447 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8448 val = tp->mac_mode;
8449 } else
8450 val = 0;
8452 tw32_f(MAC_MODE, val);
8453 udelay(40);
8455 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8457 err = tg3_poll_fw(tp);
8458 if (err)
8459 return err;
8461 tg3_mdio_start(tp);
8463 if (tg3_flag(tp, PCI_EXPRESS) &&
8464 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8465 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8466 !tg3_flag(tp, 57765_PLUS)) {
8467 val = tr32(0x7c00);
8469 tw32(0x7c00, val | (1 << 25));
8472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8473 val = tr32(TG3_CPMU_CLCK_ORIDE);
8474 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8477 /* Reprobe ASF enable state. */
8478 tg3_flag_clear(tp, ENABLE_ASF);
8479 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8480 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8481 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8482 u32 nic_cfg;
8484 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8485 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8486 tg3_flag_set(tp, ENABLE_ASF);
8487 tp->last_event_jiffies = jiffies;
8488 if (tg3_flag(tp, 5750_PLUS))
8489 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8493 return 0;
8496 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8497 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8499 /* tp->lock is held. */
8500 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8502 int err;
8504 tg3_stop_fw(tp);
8506 tg3_write_sig_pre_reset(tp, kind);
8508 tg3_abort_hw(tp, silent);
8509 err = tg3_chip_reset(tp);
8511 __tg3_set_mac_addr(tp, 0);
8513 tg3_write_sig_legacy(tp, kind);
8514 tg3_write_sig_post_reset(tp, kind);
8516 if (tp->hw_stats) {
8517 /* Save the stats across chip resets... */
8518 tg3_get_nstats(tp, &tp->net_stats_prev);
8519 tg3_get_estats(tp, &tp->estats_prev);
8521 /* And make sure the next sample is new data */
8522 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8525 if (err)
8526 return err;
8528 return 0;
8531 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8533 struct tg3 *tp = netdev_priv(dev);
8534 struct sockaddr *addr = p;
8535 int err = 0, skip_mac_1 = 0;
8537 if (!is_valid_ether_addr(addr->sa_data))
8538 return -EADDRNOTAVAIL;
8540 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8542 if (!netif_running(dev))
8543 return 0;
8545 if (tg3_flag(tp, ENABLE_ASF)) {
8546 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8548 addr0_high = tr32(MAC_ADDR_0_HIGH);
8549 addr0_low = tr32(MAC_ADDR_0_LOW);
8550 addr1_high = tr32(MAC_ADDR_1_HIGH);
8551 addr1_low = tr32(MAC_ADDR_1_LOW);
8553 /* Skip MAC addr 1 if ASF is using it. */
8554 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8555 !(addr1_high == 0 && addr1_low == 0))
8556 skip_mac_1 = 1;
8558 spin_lock_bh(&tp->lock);
8559 __tg3_set_mac_addr(tp, skip_mac_1);
8560 spin_unlock_bh(&tp->lock);
8562 return err;
8565 /* tp->lock is held. */
8566 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8567 dma_addr_t mapping, u32 maxlen_flags,
8568 u32 nic_addr)
8570 tg3_write_mem(tp,
8571 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8572 ((u64) mapping >> 32));
8573 tg3_write_mem(tp,
8574 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8575 ((u64) mapping & 0xffffffff));
8576 tg3_write_mem(tp,
8577 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8578 maxlen_flags);
8580 if (!tg3_flag(tp, 5705_PLUS))
8581 tg3_write_mem(tp,
8582 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8583 nic_addr);
8587 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8589 int i = 0;
8591 if (!tg3_flag(tp, ENABLE_TSS)) {
8592 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8593 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8594 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8595 } else {
8596 tw32(HOSTCC_TXCOL_TICKS, 0);
8597 tw32(HOSTCC_TXMAX_FRAMES, 0);
8598 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8600 for (; i < tp->txq_cnt; i++) {
8601 u32 reg;
8603 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8604 tw32(reg, ec->tx_coalesce_usecs);
8605 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8606 tw32(reg, ec->tx_max_coalesced_frames);
8607 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8608 tw32(reg, ec->tx_max_coalesced_frames_irq);
8612 for (; i < tp->irq_max - 1; i++) {
8613 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8614 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8615 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8619 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8621 int i = 0;
8622 u32 limit = tp->rxq_cnt;
8624 if (!tg3_flag(tp, ENABLE_RSS)) {
8625 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8626 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8627 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8628 limit--;
8629 } else {
8630 tw32(HOSTCC_RXCOL_TICKS, 0);
8631 tw32(HOSTCC_RXMAX_FRAMES, 0);
8632 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8635 for (; i < limit; i++) {
8636 u32 reg;
8638 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8639 tw32(reg, ec->rx_coalesce_usecs);
8640 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8641 tw32(reg, ec->rx_max_coalesced_frames);
8642 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8643 tw32(reg, ec->rx_max_coalesced_frames_irq);
8646 for (; i < tp->irq_max - 1; i++) {
8647 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8648 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8649 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8653 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8655 tg3_coal_tx_init(tp, ec);
8656 tg3_coal_rx_init(tp, ec);
8658 if (!tg3_flag(tp, 5705_PLUS)) {
8659 u32 val = ec->stats_block_coalesce_usecs;
8661 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8662 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8664 if (!tp->link_up)
8665 val = 0;
8667 tw32(HOSTCC_STAT_COAL_TICKS, val);
8671 /* tp->lock is held. */
8672 static void tg3_rings_reset(struct tg3 *tp)
8674 int i;
8675 u32 stblk, txrcb, rxrcb, limit;
8676 struct tg3_napi *tnapi = &tp->napi[0];
8678 /* Disable all transmit rings but the first. */
8679 if (!tg3_flag(tp, 5705_PLUS))
8680 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8681 else if (tg3_flag(tp, 5717_PLUS))
8682 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8683 else if (tg3_flag(tp, 57765_CLASS) ||
8684 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
8685 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8686 else
8687 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8689 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8690 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8691 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8692 BDINFO_FLAGS_DISABLED);
8695 /* Disable all receive return rings but the first. */
8696 if (tg3_flag(tp, 5717_PLUS))
8697 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8698 else if (!tg3_flag(tp, 5705_PLUS))
8699 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8700 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8701 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
8702 tg3_flag(tp, 57765_CLASS))
8703 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8704 else
8705 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8707 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8708 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8709 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8710 BDINFO_FLAGS_DISABLED);
8712 /* Disable interrupts */
8713 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8714 tp->napi[0].chk_msi_cnt = 0;
8715 tp->napi[0].last_rx_cons = 0;
8716 tp->napi[0].last_tx_cons = 0;
8718 /* Zero mailbox registers. */
8719 if (tg3_flag(tp, SUPPORT_MSIX)) {
8720 for (i = 1; i < tp->irq_max; i++) {
8721 tp->napi[i].tx_prod = 0;
8722 tp->napi[i].tx_cons = 0;
8723 if (tg3_flag(tp, ENABLE_TSS))
8724 tw32_mailbox(tp->napi[i].prodmbox, 0);
8725 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8726 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8727 tp->napi[i].chk_msi_cnt = 0;
8728 tp->napi[i].last_rx_cons = 0;
8729 tp->napi[i].last_tx_cons = 0;
8731 if (!tg3_flag(tp, ENABLE_TSS))
8732 tw32_mailbox(tp->napi[0].prodmbox, 0);
8733 } else {
8734 tp->napi[0].tx_prod = 0;
8735 tp->napi[0].tx_cons = 0;
8736 tw32_mailbox(tp->napi[0].prodmbox, 0);
8737 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8740 /* Make sure the NIC-based send BD rings are disabled. */
8741 if (!tg3_flag(tp, 5705_PLUS)) {
8742 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8743 for (i = 0; i < 16; i++)
8744 tw32_tx_mbox(mbox + i * 8, 0);
8747 txrcb = NIC_SRAM_SEND_RCB;
8748 rxrcb = NIC_SRAM_RCV_RET_RCB;
8750 /* Clear status block in ram. */
8751 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8753 /* Set status block DMA address */
8754 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8755 ((u64) tnapi->status_mapping >> 32));
8756 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8757 ((u64) tnapi->status_mapping & 0xffffffff));
8759 if (tnapi->tx_ring) {
8760 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8761 (TG3_TX_RING_SIZE <<
8762 BDINFO_FLAGS_MAXLEN_SHIFT),
8763 NIC_SRAM_TX_BUFFER_DESC);
8764 txrcb += TG3_BDINFO_SIZE;
8767 if (tnapi->rx_rcb) {
8768 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8769 (tp->rx_ret_ring_mask + 1) <<
8770 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8771 rxrcb += TG3_BDINFO_SIZE;
8774 stblk = HOSTCC_STATBLCK_RING1;
8776 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8777 u64 mapping = (u64)tnapi->status_mapping;
8778 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8779 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8781 /* Clear status block in ram. */
8782 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8784 if (tnapi->tx_ring) {
8785 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8786 (TG3_TX_RING_SIZE <<
8787 BDINFO_FLAGS_MAXLEN_SHIFT),
8788 NIC_SRAM_TX_BUFFER_DESC);
8789 txrcb += TG3_BDINFO_SIZE;
8792 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8793 ((tp->rx_ret_ring_mask + 1) <<
8794 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8796 stblk += 8;
8797 rxrcb += TG3_BDINFO_SIZE;
8801 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8803 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8805 if (!tg3_flag(tp, 5750_PLUS) ||
8806 tg3_flag(tp, 5780_CLASS) ||
8807 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8808 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8809 tg3_flag(tp, 57765_PLUS))
8810 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8811 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8812 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8813 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8814 else
8815 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8817 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8818 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8820 val = min(nic_rep_thresh, host_rep_thresh);
8821 tw32(RCVBDI_STD_THRESH, val);
8823 if (tg3_flag(tp, 57765_PLUS))
8824 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8826 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8827 return;
8829 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8831 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8833 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8834 tw32(RCVBDI_JUMBO_THRESH, val);
8836 if (tg3_flag(tp, 57765_PLUS))
8837 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8840 static inline u32 calc_crc(unsigned char *buf, int len)
8842 u32 reg;
8843 u32 tmp;
8844 int j, k;
8846 reg = 0xffffffff;
8848 for (j = 0; j < len; j++) {
8849 reg ^= buf[j];
8851 for (k = 0; k < 8; k++) {
8852 tmp = reg & 0x01;
8854 reg >>= 1;
8856 if (tmp)
8857 reg ^= 0xedb88320;
8861 return ~reg;
8864 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8866 /* accept or reject all multicast frames */
8867 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8868 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8869 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8870 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8873 static void __tg3_set_rx_mode(struct net_device *dev)
8875 struct tg3 *tp = netdev_priv(dev);
8876 u32 rx_mode;
8878 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8879 RX_MODE_KEEP_VLAN_TAG);
8881 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8882 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8883 * flag clear.
8885 if (!tg3_flag(tp, ENABLE_ASF))
8886 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8887 #endif
8889 if (dev->flags & IFF_PROMISC) {
8890 /* Promiscuous mode. */
8891 rx_mode |= RX_MODE_PROMISC;
8892 } else if (dev->flags & IFF_ALLMULTI) {
8893 /* Accept all multicast. */
8894 tg3_set_multi(tp, 1);
8895 } else if (netdev_mc_empty(dev)) {
8896 /* Reject all multicast. */
8897 tg3_set_multi(tp, 0);
8898 } else {
8899 /* Accept one or more multicast(s). */
8900 struct netdev_hw_addr *ha;
8901 u32 mc_filter[4] = { 0, };
8902 u32 regidx;
8903 u32 bit;
8904 u32 crc;
8906 netdev_for_each_mc_addr(ha, dev) {
8907 crc = calc_crc(ha->addr, ETH_ALEN);
8908 bit = ~crc & 0x7f;
8909 regidx = (bit & 0x60) >> 5;
8910 bit &= 0x1f;
8911 mc_filter[regidx] |= (1 << bit);
8914 tw32(MAC_HASH_REG_0, mc_filter[0]);
8915 tw32(MAC_HASH_REG_1, mc_filter[1]);
8916 tw32(MAC_HASH_REG_2, mc_filter[2]);
8917 tw32(MAC_HASH_REG_3, mc_filter[3]);
8920 if (rx_mode != tp->rx_mode) {
8921 tp->rx_mode = rx_mode;
8922 tw32_f(MAC_RX_MODE, rx_mode);
8923 udelay(10);
8927 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8929 int i;
8931 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8932 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8935 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8937 int i;
8939 if (!tg3_flag(tp, SUPPORT_MSIX))
8940 return;
8942 if (tp->rxq_cnt == 1) {
8943 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8944 return;
8947 /* Validate table against current IRQ count */
8948 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8949 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
8950 break;
8953 if (i != TG3_RSS_INDIR_TBL_SIZE)
8954 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8957 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8959 int i = 0;
8960 u32 reg = MAC_RSS_INDIR_TBL_0;
8962 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8963 u32 val = tp->rss_ind_tbl[i];
8964 i++;
8965 for (; i % 8; i++) {
8966 val <<= 4;
8967 val |= tp->rss_ind_tbl[i];
8969 tw32(reg, val);
8970 reg += 4;
8974 /* tp->lock is held. */
8975 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8977 u32 val, rdmac_mode;
8978 int i, err, limit;
8979 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8981 tg3_disable_ints(tp);
8983 tg3_stop_fw(tp);
8985 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8987 if (tg3_flag(tp, INIT_COMPLETE))
8988 tg3_abort_hw(tp, 1);
8990 /* Enable MAC control of LPI */
8991 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8992 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8993 TG3_CPMU_EEE_LNKIDL_UART_IDL;
8994 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8995 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
8997 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
8999 tw32_f(TG3_CPMU_EEE_CTRL,
9000 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9002 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9003 TG3_CPMU_EEEMD_LPI_IN_TX |
9004 TG3_CPMU_EEEMD_LPI_IN_RX |
9005 TG3_CPMU_EEEMD_EEE_ENABLE;
9007 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9008 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9010 if (tg3_flag(tp, ENABLE_APE))
9011 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9013 tw32_f(TG3_CPMU_EEE_MODE, val);
9015 tw32_f(TG3_CPMU_EEE_DBTMR1,
9016 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9017 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9019 tw32_f(TG3_CPMU_EEE_DBTMR2,
9020 TG3_CPMU_DBTMR2_APE_TX_2047US |
9021 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9024 if (reset_phy)
9025 tg3_phy_reset(tp);
9027 err = tg3_chip_reset(tp);
9028 if (err)
9029 return err;
9031 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9033 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9034 val = tr32(TG3_CPMU_CTRL);
9035 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9036 tw32(TG3_CPMU_CTRL, val);
9038 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9039 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9040 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9041 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9043 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9044 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9045 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9046 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9048 val = tr32(TG3_CPMU_HST_ACC);
9049 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9050 val |= CPMU_HST_ACC_MACCLK_6_25;
9051 tw32(TG3_CPMU_HST_ACC, val);
9054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9055 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9056 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9057 PCIE_PWR_MGMT_L1_THRESH_4MS;
9058 tw32(PCIE_PWR_MGMT_THRESH, val);
9060 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9061 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9063 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9065 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9066 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9069 if (tg3_flag(tp, L1PLLPD_EN)) {
9070 u32 grc_mode = tr32(GRC_MODE);
9072 /* Access the lower 1K of PL PCIE block registers. */
9073 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9074 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9076 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9077 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9078 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9080 tw32(GRC_MODE, grc_mode);
9083 if (tg3_flag(tp, 57765_CLASS)) {
9084 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9085 u32 grc_mode = tr32(GRC_MODE);
9087 /* Access the lower 1K of PL PCIE block registers. */
9088 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9089 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9091 val = tr32(TG3_PCIE_TLDLPL_PORT +
9092 TG3_PCIE_PL_LO_PHYCTL5);
9093 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9094 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9096 tw32(GRC_MODE, grc_mode);
9099 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9100 u32 grc_mode = tr32(GRC_MODE);
9102 /* Access the lower 1K of DL PCIE block registers. */
9103 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9104 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9106 val = tr32(TG3_PCIE_TLDLPL_PORT +
9107 TG3_PCIE_DL_LO_FTSMAX);
9108 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9109 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9110 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9112 tw32(GRC_MODE, grc_mode);
9115 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9116 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9117 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9118 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9121 /* This works around an issue with Athlon chipsets on
9122 * B3 tigon3 silicon. This bit has no effect on any
9123 * other revision. But do not set this on PCI Express
9124 * chips and don't even touch the clocks if the CPMU is present.
9126 if (!tg3_flag(tp, CPMU_PRESENT)) {
9127 if (!tg3_flag(tp, PCI_EXPRESS))
9128 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9129 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9132 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9133 tg3_flag(tp, PCIX_MODE)) {
9134 val = tr32(TG3PCI_PCISTATE);
9135 val |= PCISTATE_RETRY_SAME_DMA;
9136 tw32(TG3PCI_PCISTATE, val);
9139 if (tg3_flag(tp, ENABLE_APE)) {
9140 /* Allow reads and writes to the
9141 * APE register and memory space.
9143 val = tr32(TG3PCI_PCISTATE);
9144 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9145 PCISTATE_ALLOW_APE_SHMEM_WR |
9146 PCISTATE_ALLOW_APE_PSPACE_WR;
9147 tw32(TG3PCI_PCISTATE, val);
9150 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9151 /* Enable some hw fixes. */
9152 val = tr32(TG3PCI_MSI_DATA);
9153 val |= (1 << 26) | (1 << 28) | (1 << 29);
9154 tw32(TG3PCI_MSI_DATA, val);
9157 /* Descriptor ring init may make accesses to the
9158 * NIC SRAM area to setup the TX descriptors, so we
9159 * can only do this after the hardware has been
9160 * successfully reset.
9162 err = tg3_init_rings(tp);
9163 if (err)
9164 return err;
9166 if (tg3_flag(tp, 57765_PLUS)) {
9167 val = tr32(TG3PCI_DMA_RW_CTRL) &
9168 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9169 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9170 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9171 if (!tg3_flag(tp, 57765_CLASS) &&
9172 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9173 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
9174 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9175 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9176 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9177 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9178 /* This value is determined during the probe time DMA
9179 * engine test, tg3_test_dma.
9181 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9184 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9185 GRC_MODE_4X_NIC_SEND_RINGS |
9186 GRC_MODE_NO_TX_PHDR_CSUM |
9187 GRC_MODE_NO_RX_PHDR_CSUM);
9188 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9190 /* Pseudo-header checksum is done by hardware logic and not
9191 * the offload processers, so make the chip do the pseudo-
9192 * header checksums on receive. For transmit it is more
9193 * convenient to do the pseudo-header checksum in software
9194 * as Linux does that on transmit for us in all cases.
9196 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9198 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9199 if (tp->rxptpctl)
9200 tw32(TG3_RX_PTP_CTL,
9201 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9203 if (tg3_flag(tp, PTP_CAPABLE))
9204 val |= GRC_MODE_TIME_SYNC_ENABLE;
9206 tw32(GRC_MODE, tp->grc_mode | val);
9208 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9209 val = tr32(GRC_MISC_CFG);
9210 val &= ~0xff;
9211 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9212 tw32(GRC_MISC_CFG, val);
9214 /* Initialize MBUF/DESC pool. */
9215 if (tg3_flag(tp, 5750_PLUS)) {
9216 /* Do nothing. */
9217 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9218 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9220 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9221 else
9222 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9223 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9224 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9225 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9226 int fw_len;
9228 fw_len = tp->fw_len;
9229 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9230 tw32(BUFMGR_MB_POOL_ADDR,
9231 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9232 tw32(BUFMGR_MB_POOL_SIZE,
9233 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9236 if (tp->dev->mtu <= ETH_DATA_LEN) {
9237 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9238 tp->bufmgr_config.mbuf_read_dma_low_water);
9239 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9240 tp->bufmgr_config.mbuf_mac_rx_low_water);
9241 tw32(BUFMGR_MB_HIGH_WATER,
9242 tp->bufmgr_config.mbuf_high_water);
9243 } else {
9244 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9245 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9246 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9247 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9248 tw32(BUFMGR_MB_HIGH_WATER,
9249 tp->bufmgr_config.mbuf_high_water_jumbo);
9251 tw32(BUFMGR_DMA_LOW_WATER,
9252 tp->bufmgr_config.dma_low_water);
9253 tw32(BUFMGR_DMA_HIGH_WATER,
9254 tp->bufmgr_config.dma_high_water);
9256 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9258 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9259 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9260 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9261 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9262 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9263 tw32(BUFMGR_MODE, val);
9264 for (i = 0; i < 2000; i++) {
9265 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9266 break;
9267 udelay(10);
9269 if (i >= 2000) {
9270 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9271 return -ENODEV;
9274 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9275 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9277 tg3_setup_rxbd_thresholds(tp);
9279 /* Initialize TG3_BDINFO's at:
9280 * RCVDBDI_STD_BD: standard eth size rx ring
9281 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9282 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9284 * like so:
9285 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9286 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9287 * ring attribute flags
9288 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9290 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9291 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9293 * The size of each ring is fixed in the firmware, but the location is
9294 * configurable.
9296 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9297 ((u64) tpr->rx_std_mapping >> 32));
9298 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9299 ((u64) tpr->rx_std_mapping & 0xffffffff));
9300 if (!tg3_flag(tp, 5717_PLUS))
9301 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9302 NIC_SRAM_RX_BUFFER_DESC);
9304 /* Disable the mini ring */
9305 if (!tg3_flag(tp, 5705_PLUS))
9306 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9307 BDINFO_FLAGS_DISABLED);
9309 /* Program the jumbo buffer descriptor ring control
9310 * blocks on those devices that have them.
9312 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9313 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9315 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9316 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9317 ((u64) tpr->rx_jmb_mapping >> 32));
9318 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9319 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9320 val = TG3_RX_JMB_RING_SIZE(tp) <<
9321 BDINFO_FLAGS_MAXLEN_SHIFT;
9322 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9323 val | BDINFO_FLAGS_USE_EXT_RECV);
9324 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9325 tg3_flag(tp, 57765_CLASS) ||
9326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9327 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9328 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9329 } else {
9330 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9331 BDINFO_FLAGS_DISABLED);
9334 if (tg3_flag(tp, 57765_PLUS)) {
9335 val = TG3_RX_STD_RING_SIZE(tp);
9336 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9337 val |= (TG3_RX_STD_DMA_SZ << 2);
9338 } else
9339 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9340 } else
9341 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9343 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9345 tpr->rx_std_prod_idx = tp->rx_pending;
9346 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9348 tpr->rx_jmb_prod_idx =
9349 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9350 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9352 tg3_rings_reset(tp);
9354 /* Initialize MAC address and backoff seed. */
9355 __tg3_set_mac_addr(tp, 0);
9357 /* MTU + ethernet header + FCS + optional VLAN tag */
9358 tw32(MAC_RX_MTU_SIZE,
9359 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9361 /* The slot time is changed by tg3_setup_phy if we
9362 * run at gigabit with half duplex.
9364 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9365 (6 << TX_LENGTHS_IPG_SHIFT) |
9366 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9368 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9369 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9370 val |= tr32(MAC_TX_LENGTHS) &
9371 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9372 TX_LENGTHS_CNT_DWN_VAL_MSK);
9374 tw32(MAC_TX_LENGTHS, val);
9376 /* Receive rules. */
9377 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9378 tw32(RCVLPC_CONFIG, 0x0181);
9380 /* Calculate RDMAC_MODE setting early, we need it to determine
9381 * the RCVLPC_STATE_ENABLE mask.
9383 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9384 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9385 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9386 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9387 RDMAC_MODE_LNGREAD_ENAB);
9389 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9390 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9392 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9393 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9394 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9395 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9396 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9397 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9400 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9401 if (tg3_flag(tp, TSO_CAPABLE) &&
9402 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9403 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9404 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9405 !tg3_flag(tp, IS_5788)) {
9406 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9410 if (tg3_flag(tp, PCI_EXPRESS))
9411 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9413 if (tg3_flag(tp, HW_TSO_1) ||
9414 tg3_flag(tp, HW_TSO_2) ||
9415 tg3_flag(tp, HW_TSO_3))
9416 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9418 if (tg3_flag(tp, 57765_PLUS) ||
9419 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9421 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9423 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9424 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9425 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9428 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9430 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9431 tg3_flag(tp, 57765_PLUS)) {
9432 u32 tgtreg;
9434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9435 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9436 else
9437 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9439 val = tr32(tgtreg);
9440 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9442 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9443 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9444 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9445 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9446 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9447 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9449 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9453 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9455 u32 tgtreg;
9457 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9458 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9459 else
9460 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9462 val = tr32(tgtreg);
9463 tw32(tgtreg, val |
9464 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9465 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9468 /* Receive/send statistics. */
9469 if (tg3_flag(tp, 5750_PLUS)) {
9470 val = tr32(RCVLPC_STATS_ENABLE);
9471 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9472 tw32(RCVLPC_STATS_ENABLE, val);
9473 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9474 tg3_flag(tp, TSO_CAPABLE)) {
9475 val = tr32(RCVLPC_STATS_ENABLE);
9476 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9477 tw32(RCVLPC_STATS_ENABLE, val);
9478 } else {
9479 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9481 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9482 tw32(SNDDATAI_STATSENAB, 0xffffff);
9483 tw32(SNDDATAI_STATSCTRL,
9484 (SNDDATAI_SCTRL_ENABLE |
9485 SNDDATAI_SCTRL_FASTUPD));
9487 /* Setup host coalescing engine. */
9488 tw32(HOSTCC_MODE, 0);
9489 for (i = 0; i < 2000; i++) {
9490 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9491 break;
9492 udelay(10);
9495 __tg3_set_coalesce(tp, &tp->coal);
9497 if (!tg3_flag(tp, 5705_PLUS)) {
9498 /* Status/statistics block address. See tg3_timer,
9499 * the tg3_periodic_fetch_stats call there, and
9500 * tg3_get_stats to see how this works for 5705/5750 chips.
9502 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9503 ((u64) tp->stats_mapping >> 32));
9504 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9505 ((u64) tp->stats_mapping & 0xffffffff));
9506 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9508 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9510 /* Clear statistics and status block memory areas */
9511 for (i = NIC_SRAM_STATS_BLK;
9512 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9513 i += sizeof(u32)) {
9514 tg3_write_mem(tp, i, 0);
9515 udelay(40);
9519 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9521 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9522 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9523 if (!tg3_flag(tp, 5705_PLUS))
9524 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9526 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9527 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9528 /* reset to prevent losing 1st rx packet intermittently */
9529 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9530 udelay(10);
9533 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9534 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9535 MAC_MODE_FHDE_ENABLE;
9536 if (tg3_flag(tp, ENABLE_APE))
9537 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9538 if (!tg3_flag(tp, 5705_PLUS) &&
9539 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9540 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9541 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9542 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9543 udelay(40);
9545 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9546 * If TG3_FLAG_IS_NIC is zero, we should read the
9547 * register to preserve the GPIO settings for LOMs. The GPIOs,
9548 * whether used as inputs or outputs, are set by boot code after
9549 * reset.
9551 if (!tg3_flag(tp, IS_NIC)) {
9552 u32 gpio_mask;
9554 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9555 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9556 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9559 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9560 GRC_LCLCTRL_GPIO_OUTPUT3;
9562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9563 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9565 tp->grc_local_ctrl &= ~gpio_mask;
9566 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9568 /* GPIO1 must be driven high for eeprom write protect */
9569 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9570 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9571 GRC_LCLCTRL_GPIO_OUTPUT1);
9573 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9574 udelay(100);
9576 if (tg3_flag(tp, USING_MSIX)) {
9577 val = tr32(MSGINT_MODE);
9578 val |= MSGINT_MODE_ENABLE;
9579 if (tp->irq_cnt > 1)
9580 val |= MSGINT_MODE_MULTIVEC_EN;
9581 if (!tg3_flag(tp, 1SHOT_MSI))
9582 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9583 tw32(MSGINT_MODE, val);
9586 if (!tg3_flag(tp, 5705_PLUS)) {
9587 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9588 udelay(40);
9591 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9592 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9593 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9594 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9595 WDMAC_MODE_LNGREAD_ENAB);
9597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9598 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9599 if (tg3_flag(tp, TSO_CAPABLE) &&
9600 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9601 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9602 /* nothing */
9603 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9604 !tg3_flag(tp, IS_5788)) {
9605 val |= WDMAC_MODE_RX_ACCEL;
9609 /* Enable host coalescing bug fix */
9610 if (tg3_flag(tp, 5755_PLUS))
9611 val |= WDMAC_MODE_STATUS_TAG_FIX;
9613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9614 val |= WDMAC_MODE_BURST_ALL_DATA;
9616 tw32_f(WDMAC_MODE, val);
9617 udelay(40);
9619 if (tg3_flag(tp, PCIX_MODE)) {
9620 u16 pcix_cmd;
9622 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9623 &pcix_cmd);
9624 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9625 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9626 pcix_cmd |= PCI_X_CMD_READ_2K;
9627 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9628 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9629 pcix_cmd |= PCI_X_CMD_READ_2K;
9631 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9632 pcix_cmd);
9635 tw32_f(RDMAC_MODE, rdmac_mode);
9636 udelay(40);
9638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9639 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9640 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9641 break;
9643 if (i < TG3_NUM_RDMA_CHANNELS) {
9644 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9645 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9646 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9647 tg3_flag_set(tp, 5719_RDMA_BUG);
9651 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9652 if (!tg3_flag(tp, 5705_PLUS))
9653 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9655 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9656 tw32(SNDDATAC_MODE,
9657 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9658 else
9659 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9661 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9662 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9663 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9664 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9665 val |= RCVDBDI_MODE_LRG_RING_SZ;
9666 tw32(RCVDBDI_MODE, val);
9667 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9668 if (tg3_flag(tp, HW_TSO_1) ||
9669 tg3_flag(tp, HW_TSO_2) ||
9670 tg3_flag(tp, HW_TSO_3))
9671 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9672 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9673 if (tg3_flag(tp, ENABLE_TSS))
9674 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9675 tw32(SNDBDI_MODE, val);
9676 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9678 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9679 err = tg3_load_5701_a0_firmware_fix(tp);
9680 if (err)
9681 return err;
9684 if (tg3_flag(tp, TSO_CAPABLE)) {
9685 err = tg3_load_tso_firmware(tp);
9686 if (err)
9687 return err;
9690 tp->tx_mode = TX_MODE_ENABLE;
9692 if (tg3_flag(tp, 5755_PLUS) ||
9693 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9694 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9696 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9697 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9698 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9699 tp->tx_mode &= ~val;
9700 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9703 tw32_f(MAC_TX_MODE, tp->tx_mode);
9704 udelay(100);
9706 if (tg3_flag(tp, ENABLE_RSS)) {
9707 tg3_rss_write_indir_tbl(tp);
9709 /* Setup the "secret" hash key. */
9710 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9711 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9712 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9713 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9714 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9715 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9716 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9717 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9718 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9719 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9722 tp->rx_mode = RX_MODE_ENABLE;
9723 if (tg3_flag(tp, 5755_PLUS))
9724 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9726 if (tg3_flag(tp, ENABLE_RSS))
9727 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9728 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9729 RX_MODE_RSS_IPV6_HASH_EN |
9730 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9731 RX_MODE_RSS_IPV4_HASH_EN |
9732 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9734 tw32_f(MAC_RX_MODE, tp->rx_mode);
9735 udelay(10);
9737 tw32(MAC_LED_CTRL, tp->led_ctrl);
9739 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9740 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9741 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9742 udelay(10);
9744 tw32_f(MAC_RX_MODE, tp->rx_mode);
9745 udelay(10);
9747 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9748 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9749 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9750 /* Set drive transmission level to 1.2V */
9751 /* only if the signal pre-emphasis bit is not set */
9752 val = tr32(MAC_SERDES_CFG);
9753 val &= 0xfffff000;
9754 val |= 0x880;
9755 tw32(MAC_SERDES_CFG, val);
9757 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9758 tw32(MAC_SERDES_CFG, 0x616000);
9761 /* Prevent chip from dropping frames when flow control
9762 * is enabled.
9764 if (tg3_flag(tp, 57765_CLASS))
9765 val = 1;
9766 else
9767 val = 2;
9768 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9770 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9771 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9772 /* Use hardware link auto-negotiation */
9773 tg3_flag_set(tp, HW_AUTONEG);
9776 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9777 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9778 u32 tmp;
9780 tmp = tr32(SERDES_RX_CTRL);
9781 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9782 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9783 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9784 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9787 if (!tg3_flag(tp, USE_PHYLIB)) {
9788 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9789 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9791 err = tg3_setup_phy(tp, 0);
9792 if (err)
9793 return err;
9795 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9796 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9797 u32 tmp;
9799 /* Clear CRC stats. */
9800 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9801 tg3_writephy(tp, MII_TG3_TEST1,
9802 tmp | MII_TG3_TEST1_CRC_EN);
9803 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9808 __tg3_set_rx_mode(tp->dev);
9810 /* Initialize receive rules. */
9811 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9812 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9813 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9814 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9816 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9817 limit = 8;
9818 else
9819 limit = 16;
9820 if (tg3_flag(tp, ENABLE_ASF))
9821 limit -= 4;
9822 switch (limit) {
9823 case 16:
9824 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9825 case 15:
9826 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9827 case 14:
9828 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9829 case 13:
9830 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9831 case 12:
9832 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9833 case 11:
9834 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9835 case 10:
9836 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9837 case 9:
9838 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9839 case 8:
9840 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9841 case 7:
9842 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9843 case 6:
9844 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9845 case 5:
9846 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9847 case 4:
9848 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9849 case 3:
9850 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9851 case 2:
9852 case 1:
9854 default:
9855 break;
9858 if (tg3_flag(tp, ENABLE_APE))
9859 /* Write our heartbeat update interval to APE. */
9860 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9861 APE_HOST_HEARTBEAT_INT_DISABLE);
9863 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9865 return 0;
9868 /* Called at device open time to get the chip ready for
9869 * packet processing. Invoked with tp->lock held.
9871 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9873 tg3_switch_clocks(tp);
9875 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9877 return tg3_reset_hw(tp, reset_phy);
9880 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9882 int i;
9884 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9885 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9887 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9888 off += len;
9890 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9891 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9892 memset(ocir, 0, TG3_OCIR_LEN);
9896 /* sysfs attributes for hwmon */
9897 static ssize_t tg3_show_temp(struct device *dev,
9898 struct device_attribute *devattr, char *buf)
9900 struct pci_dev *pdev = to_pci_dev(dev);
9901 struct net_device *netdev = pci_get_drvdata(pdev);
9902 struct tg3 *tp = netdev_priv(netdev);
9903 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9904 u32 temperature;
9906 spin_lock_bh(&tp->lock);
9907 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9908 sizeof(temperature));
9909 spin_unlock_bh(&tp->lock);
9910 return sprintf(buf, "%u\n", temperature);
9914 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9915 TG3_TEMP_SENSOR_OFFSET);
9916 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9917 TG3_TEMP_CAUTION_OFFSET);
9918 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9919 TG3_TEMP_MAX_OFFSET);
9921 static struct attribute *tg3_attributes[] = {
9922 &sensor_dev_attr_temp1_input.dev_attr.attr,
9923 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9924 &sensor_dev_attr_temp1_max.dev_attr.attr,
9925 NULL
9928 static const struct attribute_group tg3_group = {
9929 .attrs = tg3_attributes,
9932 static void tg3_hwmon_close(struct tg3 *tp)
9934 if (tp->hwmon_dev) {
9935 hwmon_device_unregister(tp->hwmon_dev);
9936 tp->hwmon_dev = NULL;
9937 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9941 static void tg3_hwmon_open(struct tg3 *tp)
9943 int i, err;
9944 u32 size = 0;
9945 struct pci_dev *pdev = tp->pdev;
9946 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9948 tg3_sd_scan_scratchpad(tp, ocirs);
9950 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9951 if (!ocirs[i].src_data_length)
9952 continue;
9954 size += ocirs[i].src_hdr_length;
9955 size += ocirs[i].src_data_length;
9958 if (!size)
9959 return;
9961 /* Register hwmon sysfs hooks */
9962 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9963 if (err) {
9964 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9965 return;
9968 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9969 if (IS_ERR(tp->hwmon_dev)) {
9970 tp->hwmon_dev = NULL;
9971 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9972 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9977 #define TG3_STAT_ADD32(PSTAT, REG) \
9978 do { u32 __val = tr32(REG); \
9979 (PSTAT)->low += __val; \
9980 if ((PSTAT)->low < __val) \
9981 (PSTAT)->high += 1; \
9982 } while (0)
9984 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9986 struct tg3_hw_stats *sp = tp->hw_stats;
9988 if (!tp->link_up)
9989 return;
9991 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9992 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9993 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9994 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9995 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9996 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9997 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9998 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9999 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10000 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10001 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10002 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10003 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10004 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10005 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10006 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10007 u32 val;
10009 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10010 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10011 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10012 tg3_flag_clear(tp, 5719_RDMA_BUG);
10015 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10016 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10017 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10018 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10019 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10020 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10021 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10022 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10023 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10024 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10025 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10026 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10027 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10028 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10030 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10031 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10032 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10033 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10034 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10035 } else {
10036 u32 val = tr32(HOSTCC_FLOW_ATTN);
10037 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10038 if (val) {
10039 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10040 sp->rx_discards.low += val;
10041 if (sp->rx_discards.low < val)
10042 sp->rx_discards.high += 1;
10044 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10046 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10049 static void tg3_chk_missed_msi(struct tg3 *tp)
10051 u32 i;
10053 for (i = 0; i < tp->irq_cnt; i++) {
10054 struct tg3_napi *tnapi = &tp->napi[i];
10056 if (tg3_has_work(tnapi)) {
10057 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10058 tnapi->last_tx_cons == tnapi->tx_cons) {
10059 if (tnapi->chk_msi_cnt < 1) {
10060 tnapi->chk_msi_cnt++;
10061 return;
10063 tg3_msi(0, tnapi);
10066 tnapi->chk_msi_cnt = 0;
10067 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10068 tnapi->last_tx_cons = tnapi->tx_cons;
10072 static void tg3_timer(unsigned long __opaque)
10074 struct tg3 *tp = (struct tg3 *) __opaque;
10076 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10077 goto restart_timer;
10079 spin_lock(&tp->lock);
10081 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10082 tg3_flag(tp, 57765_CLASS))
10083 tg3_chk_missed_msi(tp);
10085 if (!tg3_flag(tp, TAGGED_STATUS)) {
10086 /* All of this garbage is because when using non-tagged
10087 * IRQ status the mailbox/status_block protocol the chip
10088 * uses with the cpu is race prone.
10090 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10091 tw32(GRC_LOCAL_CTRL,
10092 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10093 } else {
10094 tw32(HOSTCC_MODE, tp->coalesce_mode |
10095 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10098 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10099 spin_unlock(&tp->lock);
10100 tg3_reset_task_schedule(tp);
10101 goto restart_timer;
10105 /* This part only runs once per second. */
10106 if (!--tp->timer_counter) {
10107 if (tg3_flag(tp, 5705_PLUS))
10108 tg3_periodic_fetch_stats(tp);
10110 if (tp->setlpicnt && !--tp->setlpicnt)
10111 tg3_phy_eee_enable(tp);
10113 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10114 u32 mac_stat;
10115 int phy_event;
10117 mac_stat = tr32(MAC_STATUS);
10119 phy_event = 0;
10120 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10121 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10122 phy_event = 1;
10123 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10124 phy_event = 1;
10126 if (phy_event)
10127 tg3_setup_phy(tp, 0);
10128 } else if (tg3_flag(tp, POLL_SERDES)) {
10129 u32 mac_stat = tr32(MAC_STATUS);
10130 int need_setup = 0;
10132 if (tp->link_up &&
10133 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10134 need_setup = 1;
10136 if (!tp->link_up &&
10137 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10138 MAC_STATUS_SIGNAL_DET))) {
10139 need_setup = 1;
10141 if (need_setup) {
10142 if (!tp->serdes_counter) {
10143 tw32_f(MAC_MODE,
10144 (tp->mac_mode &
10145 ~MAC_MODE_PORT_MODE_MASK));
10146 udelay(40);
10147 tw32_f(MAC_MODE, tp->mac_mode);
10148 udelay(40);
10150 tg3_setup_phy(tp, 0);
10152 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10153 tg3_flag(tp, 5780_CLASS)) {
10154 tg3_serdes_parallel_detect(tp);
10157 tp->timer_counter = tp->timer_multiplier;
10160 /* Heartbeat is only sent once every 2 seconds.
10162 * The heartbeat is to tell the ASF firmware that the host
10163 * driver is still alive. In the event that the OS crashes,
10164 * ASF needs to reset the hardware to free up the FIFO space
10165 * that may be filled with rx packets destined for the host.
10166 * If the FIFO is full, ASF will no longer function properly.
10168 * Unintended resets have been reported on real time kernels
10169 * where the timer doesn't run on time. Netpoll will also have
10170 * same problem.
10172 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10173 * to check the ring condition when the heartbeat is expiring
10174 * before doing the reset. This will prevent most unintended
10175 * resets.
10177 if (!--tp->asf_counter) {
10178 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10179 tg3_wait_for_event_ack(tp);
10181 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10182 FWCMD_NICDRV_ALIVE3);
10183 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10184 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10185 TG3_FW_UPDATE_TIMEOUT_SEC);
10187 tg3_generate_fw_event(tp);
10189 tp->asf_counter = tp->asf_multiplier;
10192 spin_unlock(&tp->lock);
10194 restart_timer:
10195 tp->timer.expires = jiffies + tp->timer_offset;
10196 add_timer(&tp->timer);
10199 static void tg3_timer_init(struct tg3 *tp)
10201 if (tg3_flag(tp, TAGGED_STATUS) &&
10202 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10203 !tg3_flag(tp, 57765_CLASS))
10204 tp->timer_offset = HZ;
10205 else
10206 tp->timer_offset = HZ / 10;
10208 BUG_ON(tp->timer_offset > HZ);
10210 tp->timer_multiplier = (HZ / tp->timer_offset);
10211 tp->asf_multiplier = (HZ / tp->timer_offset) *
10212 TG3_FW_UPDATE_FREQ_SEC;
10214 init_timer(&tp->timer);
10215 tp->timer.data = (unsigned long) tp;
10216 tp->timer.function = tg3_timer;
10219 static void tg3_timer_start(struct tg3 *tp)
10221 tp->asf_counter = tp->asf_multiplier;
10222 tp->timer_counter = tp->timer_multiplier;
10224 tp->timer.expires = jiffies + tp->timer_offset;
10225 add_timer(&tp->timer);
10228 static void tg3_timer_stop(struct tg3 *tp)
10230 del_timer_sync(&tp->timer);
10233 /* Restart hardware after configuration changes, self-test, etc.
10234 * Invoked with tp->lock held.
10236 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10237 __releases(tp->lock)
10238 __acquires(tp->lock)
10240 int err;
10242 err = tg3_init_hw(tp, reset_phy);
10243 if (err) {
10244 netdev_err(tp->dev,
10245 "Failed to re-initialize device, aborting\n");
10246 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10247 tg3_full_unlock(tp);
10248 tg3_timer_stop(tp);
10249 tp->irq_sync = 0;
10250 tg3_napi_enable(tp);
10251 dev_close(tp->dev);
10252 tg3_full_lock(tp, 0);
10254 return err;
10257 static void tg3_reset_task(struct work_struct *work)
10259 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10260 int err;
10262 tg3_full_lock(tp, 0);
10264 if (!netif_running(tp->dev)) {
10265 tg3_flag_clear(tp, RESET_TASK_PENDING);
10266 tg3_full_unlock(tp);
10267 return;
10270 tg3_full_unlock(tp);
10272 tg3_phy_stop(tp);
10274 tg3_netif_stop(tp);
10276 tg3_full_lock(tp, 1);
10278 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10279 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10280 tp->write32_rx_mbox = tg3_write_flush_reg32;
10281 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10282 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10285 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10286 err = tg3_init_hw(tp, 1);
10287 if (err)
10288 goto out;
10290 tg3_netif_start(tp);
10292 out:
10293 tg3_full_unlock(tp);
10295 if (!err)
10296 tg3_phy_start(tp);
10298 tg3_flag_clear(tp, RESET_TASK_PENDING);
10301 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10303 irq_handler_t fn;
10304 unsigned long flags;
10305 char *name;
10306 struct tg3_napi *tnapi = &tp->napi[irq_num];
10308 if (tp->irq_cnt == 1)
10309 name = tp->dev->name;
10310 else {
10311 name = &tnapi->irq_lbl[0];
10312 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10313 name[IFNAMSIZ-1] = 0;
10316 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10317 fn = tg3_msi;
10318 if (tg3_flag(tp, 1SHOT_MSI))
10319 fn = tg3_msi_1shot;
10320 flags = 0;
10321 } else {
10322 fn = tg3_interrupt;
10323 if (tg3_flag(tp, TAGGED_STATUS))
10324 fn = tg3_interrupt_tagged;
10325 flags = IRQF_SHARED;
10328 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10331 static int tg3_test_interrupt(struct tg3 *tp)
10333 struct tg3_napi *tnapi = &tp->napi[0];
10334 struct net_device *dev = tp->dev;
10335 int err, i, intr_ok = 0;
10336 u32 val;
10338 if (!netif_running(dev))
10339 return -ENODEV;
10341 tg3_disable_ints(tp);
10343 free_irq(tnapi->irq_vec, tnapi);
10346 * Turn off MSI one shot mode. Otherwise this test has no
10347 * observable way to know whether the interrupt was delivered.
10349 if (tg3_flag(tp, 57765_PLUS)) {
10350 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10351 tw32(MSGINT_MODE, val);
10354 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10355 IRQF_SHARED, dev->name, tnapi);
10356 if (err)
10357 return err;
10359 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10360 tg3_enable_ints(tp);
10362 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10363 tnapi->coal_now);
10365 for (i = 0; i < 5; i++) {
10366 u32 int_mbox, misc_host_ctrl;
10368 int_mbox = tr32_mailbox(tnapi->int_mbox);
10369 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10371 if ((int_mbox != 0) ||
10372 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10373 intr_ok = 1;
10374 break;
10377 if (tg3_flag(tp, 57765_PLUS) &&
10378 tnapi->hw_status->status_tag != tnapi->last_tag)
10379 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10381 msleep(10);
10384 tg3_disable_ints(tp);
10386 free_irq(tnapi->irq_vec, tnapi);
10388 err = tg3_request_irq(tp, 0);
10390 if (err)
10391 return err;
10393 if (intr_ok) {
10394 /* Reenable MSI one shot mode. */
10395 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10396 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10397 tw32(MSGINT_MODE, val);
10399 return 0;
10402 return -EIO;
10405 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10406 * successfully restored
10408 static int tg3_test_msi(struct tg3 *tp)
10410 int err;
10411 u16 pci_cmd;
10413 if (!tg3_flag(tp, USING_MSI))
10414 return 0;
10416 /* Turn off SERR reporting in case MSI terminates with Master
10417 * Abort.
10419 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10420 pci_write_config_word(tp->pdev, PCI_COMMAND,
10421 pci_cmd & ~PCI_COMMAND_SERR);
10423 err = tg3_test_interrupt(tp);
10425 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10427 if (!err)
10428 return 0;
10430 /* other failures */
10431 if (err != -EIO)
10432 return err;
10434 /* MSI test failed, go back to INTx mode */
10435 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10436 "to INTx mode. Please report this failure to the PCI "
10437 "maintainer and include system chipset information\n");
10439 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10441 pci_disable_msi(tp->pdev);
10443 tg3_flag_clear(tp, USING_MSI);
10444 tp->napi[0].irq_vec = tp->pdev->irq;
10446 err = tg3_request_irq(tp, 0);
10447 if (err)
10448 return err;
10450 /* Need to reset the chip because the MSI cycle may have terminated
10451 * with Master Abort.
10453 tg3_full_lock(tp, 1);
10455 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10456 err = tg3_init_hw(tp, 1);
10458 tg3_full_unlock(tp);
10460 if (err)
10461 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10463 return err;
10466 static int tg3_request_firmware(struct tg3 *tp)
10468 const __be32 *fw_data;
10470 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10471 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10472 tp->fw_needed);
10473 return -ENOENT;
10476 fw_data = (void *)tp->fw->data;
10478 /* Firmware blob starts with version numbers, followed by
10479 * start address and _full_ length including BSS sections
10480 * (which must be longer than the actual data, of course
10483 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10484 if (tp->fw_len < (tp->fw->size - 12)) {
10485 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10486 tp->fw_len, tp->fw_needed);
10487 release_firmware(tp->fw);
10488 tp->fw = NULL;
10489 return -EINVAL;
10492 /* We no longer need firmware; we have it. */
10493 tp->fw_needed = NULL;
10494 return 0;
10497 static u32 tg3_irq_count(struct tg3 *tp)
10499 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10501 if (irq_cnt > 1) {
10502 /* We want as many rx rings enabled as there are cpus.
10503 * In multiqueue MSI-X mode, the first MSI-X vector
10504 * only deals with link interrupts, etc, so we add
10505 * one to the number of vectors we are requesting.
10507 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10510 return irq_cnt;
10513 static bool tg3_enable_msix(struct tg3 *tp)
10515 int i, rc;
10516 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10518 tp->txq_cnt = tp->txq_req;
10519 tp->rxq_cnt = tp->rxq_req;
10520 if (!tp->rxq_cnt)
10521 tp->rxq_cnt = netif_get_num_default_rss_queues();
10522 if (tp->rxq_cnt > tp->rxq_max)
10523 tp->rxq_cnt = tp->rxq_max;
10525 /* Disable multiple TX rings by default. Simple round-robin hardware
10526 * scheduling of the TX rings can cause starvation of rings with
10527 * small packets when other rings have TSO or jumbo packets.
10529 if (!tp->txq_req)
10530 tp->txq_cnt = 1;
10532 tp->irq_cnt = tg3_irq_count(tp);
10534 for (i = 0; i < tp->irq_max; i++) {
10535 msix_ent[i].entry = i;
10536 msix_ent[i].vector = 0;
10539 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10540 if (rc < 0) {
10541 return false;
10542 } else if (rc != 0) {
10543 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10544 return false;
10545 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10546 tp->irq_cnt, rc);
10547 tp->irq_cnt = rc;
10548 tp->rxq_cnt = max(rc - 1, 1);
10549 if (tp->txq_cnt)
10550 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10553 for (i = 0; i < tp->irq_max; i++)
10554 tp->napi[i].irq_vec = msix_ent[i].vector;
10556 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10557 pci_disable_msix(tp->pdev);
10558 return false;
10561 if (tp->irq_cnt == 1)
10562 return true;
10564 tg3_flag_set(tp, ENABLE_RSS);
10566 if (tp->txq_cnt > 1)
10567 tg3_flag_set(tp, ENABLE_TSS);
10569 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10571 return true;
10574 static void tg3_ints_init(struct tg3 *tp)
10576 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10577 !tg3_flag(tp, TAGGED_STATUS)) {
10578 /* All MSI supporting chips should support tagged
10579 * status. Assert that this is the case.
10581 netdev_warn(tp->dev,
10582 "MSI without TAGGED_STATUS? Not using MSI\n");
10583 goto defcfg;
10586 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10587 tg3_flag_set(tp, USING_MSIX);
10588 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10589 tg3_flag_set(tp, USING_MSI);
10591 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10592 u32 msi_mode = tr32(MSGINT_MODE);
10593 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10594 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10595 if (!tg3_flag(tp, 1SHOT_MSI))
10596 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10597 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10599 defcfg:
10600 if (!tg3_flag(tp, USING_MSIX)) {
10601 tp->irq_cnt = 1;
10602 tp->napi[0].irq_vec = tp->pdev->irq;
10605 if (tp->irq_cnt == 1) {
10606 tp->txq_cnt = 1;
10607 tp->rxq_cnt = 1;
10608 netif_set_real_num_tx_queues(tp->dev, 1);
10609 netif_set_real_num_rx_queues(tp->dev, 1);
10613 static void tg3_ints_fini(struct tg3 *tp)
10615 if (tg3_flag(tp, USING_MSIX))
10616 pci_disable_msix(tp->pdev);
10617 else if (tg3_flag(tp, USING_MSI))
10618 pci_disable_msi(tp->pdev);
10619 tg3_flag_clear(tp, USING_MSI);
10620 tg3_flag_clear(tp, USING_MSIX);
10621 tg3_flag_clear(tp, ENABLE_RSS);
10622 tg3_flag_clear(tp, ENABLE_TSS);
10625 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10626 bool init)
10628 struct net_device *dev = tp->dev;
10629 int i, err;
10632 * Setup interrupts first so we know how
10633 * many NAPI resources to allocate
10635 tg3_ints_init(tp);
10637 tg3_rss_check_indir_tbl(tp);
10639 /* The placement of this call is tied
10640 * to the setup and use of Host TX descriptors.
10642 err = tg3_alloc_consistent(tp);
10643 if (err)
10644 goto err_out1;
10646 tg3_napi_init(tp);
10648 tg3_napi_enable(tp);
10650 for (i = 0; i < tp->irq_cnt; i++) {
10651 struct tg3_napi *tnapi = &tp->napi[i];
10652 err = tg3_request_irq(tp, i);
10653 if (err) {
10654 for (i--; i >= 0; i--) {
10655 tnapi = &tp->napi[i];
10656 free_irq(tnapi->irq_vec, tnapi);
10658 goto err_out2;
10662 tg3_full_lock(tp, 0);
10664 err = tg3_init_hw(tp, reset_phy);
10665 if (err) {
10666 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10667 tg3_free_rings(tp);
10670 tg3_full_unlock(tp);
10672 if (err)
10673 goto err_out3;
10675 if (test_irq && tg3_flag(tp, USING_MSI)) {
10676 err = tg3_test_msi(tp);
10678 if (err) {
10679 tg3_full_lock(tp, 0);
10680 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10681 tg3_free_rings(tp);
10682 tg3_full_unlock(tp);
10684 goto err_out2;
10687 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10688 u32 val = tr32(PCIE_TRANSACTION_CFG);
10690 tw32(PCIE_TRANSACTION_CFG,
10691 val | PCIE_TRANS_CFG_1SHOT_MSI);
10695 tg3_phy_start(tp);
10697 tg3_hwmon_open(tp);
10699 tg3_full_lock(tp, 0);
10701 tg3_timer_start(tp);
10702 tg3_flag_set(tp, INIT_COMPLETE);
10703 tg3_enable_ints(tp);
10705 if (init)
10706 tg3_ptp_init(tp);
10707 else
10708 tg3_ptp_resume(tp);
10711 tg3_full_unlock(tp);
10713 netif_tx_start_all_queues(dev);
10716 * Reset loopback feature if it was turned on while the device was down
10717 * make sure that it's installed properly now.
10719 if (dev->features & NETIF_F_LOOPBACK)
10720 tg3_set_loopback(dev, dev->features);
10722 return 0;
10724 err_out3:
10725 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10726 struct tg3_napi *tnapi = &tp->napi[i];
10727 free_irq(tnapi->irq_vec, tnapi);
10730 err_out2:
10731 tg3_napi_disable(tp);
10732 tg3_napi_fini(tp);
10733 tg3_free_consistent(tp);
10735 err_out1:
10736 tg3_ints_fini(tp);
10738 return err;
10741 static void tg3_stop(struct tg3 *tp)
10743 int i;
10745 tg3_reset_task_cancel(tp);
10746 tg3_netif_stop(tp);
10748 tg3_timer_stop(tp);
10750 tg3_hwmon_close(tp);
10752 tg3_phy_stop(tp);
10754 tg3_full_lock(tp, 1);
10756 tg3_disable_ints(tp);
10758 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10759 tg3_free_rings(tp);
10760 tg3_flag_clear(tp, INIT_COMPLETE);
10762 tg3_full_unlock(tp);
10764 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10765 struct tg3_napi *tnapi = &tp->napi[i];
10766 free_irq(tnapi->irq_vec, tnapi);
10769 tg3_ints_fini(tp);
10771 tg3_napi_fini(tp);
10773 tg3_free_consistent(tp);
10776 static int tg3_open(struct net_device *dev)
10778 struct tg3 *tp = netdev_priv(dev);
10779 int err;
10781 if (tp->fw_needed) {
10782 err = tg3_request_firmware(tp);
10783 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10784 if (err)
10785 return err;
10786 } else if (err) {
10787 netdev_warn(tp->dev, "TSO capability disabled\n");
10788 tg3_flag_clear(tp, TSO_CAPABLE);
10789 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10790 netdev_notice(tp->dev, "TSO capability restored\n");
10791 tg3_flag_set(tp, TSO_CAPABLE);
10795 tg3_carrier_off(tp);
10797 err = tg3_power_up(tp);
10798 if (err)
10799 return err;
10801 tg3_full_lock(tp, 0);
10803 tg3_disable_ints(tp);
10804 tg3_flag_clear(tp, INIT_COMPLETE);
10806 tg3_full_unlock(tp);
10808 err = tg3_start(tp, true, true, true);
10809 if (err) {
10810 tg3_frob_aux_power(tp, false);
10811 pci_set_power_state(tp->pdev, PCI_D3hot);
10814 if (tg3_flag(tp, PTP_CAPABLE)) {
10815 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10816 &tp->pdev->dev);
10817 if (IS_ERR(tp->ptp_clock))
10818 tp->ptp_clock = NULL;
10821 return err;
10824 static int tg3_close(struct net_device *dev)
10826 struct tg3 *tp = netdev_priv(dev);
10828 tg3_ptp_fini(tp);
10830 tg3_stop(tp);
10832 /* Clear stats across close / open calls */
10833 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10834 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10836 tg3_power_down(tp);
10838 tg3_carrier_off(tp);
10840 return 0;
10843 static inline u64 get_stat64(tg3_stat64_t *val)
10845 return ((u64)val->high << 32) | ((u64)val->low);
10848 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10850 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10852 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10853 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10854 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10855 u32 val;
10857 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10858 tg3_writephy(tp, MII_TG3_TEST1,
10859 val | MII_TG3_TEST1_CRC_EN);
10860 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10861 } else
10862 val = 0;
10864 tp->phy_crc_errors += val;
10866 return tp->phy_crc_errors;
10869 return get_stat64(&hw_stats->rx_fcs_errors);
10872 #define ESTAT_ADD(member) \
10873 estats->member = old_estats->member + \
10874 get_stat64(&hw_stats->member)
10876 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10878 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10879 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10881 ESTAT_ADD(rx_octets);
10882 ESTAT_ADD(rx_fragments);
10883 ESTAT_ADD(rx_ucast_packets);
10884 ESTAT_ADD(rx_mcast_packets);
10885 ESTAT_ADD(rx_bcast_packets);
10886 ESTAT_ADD(rx_fcs_errors);
10887 ESTAT_ADD(rx_align_errors);
10888 ESTAT_ADD(rx_xon_pause_rcvd);
10889 ESTAT_ADD(rx_xoff_pause_rcvd);
10890 ESTAT_ADD(rx_mac_ctrl_rcvd);
10891 ESTAT_ADD(rx_xoff_entered);
10892 ESTAT_ADD(rx_frame_too_long_errors);
10893 ESTAT_ADD(rx_jabbers);
10894 ESTAT_ADD(rx_undersize_packets);
10895 ESTAT_ADD(rx_in_length_errors);
10896 ESTAT_ADD(rx_out_length_errors);
10897 ESTAT_ADD(rx_64_or_less_octet_packets);
10898 ESTAT_ADD(rx_65_to_127_octet_packets);
10899 ESTAT_ADD(rx_128_to_255_octet_packets);
10900 ESTAT_ADD(rx_256_to_511_octet_packets);
10901 ESTAT_ADD(rx_512_to_1023_octet_packets);
10902 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10903 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10904 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10905 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10906 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10908 ESTAT_ADD(tx_octets);
10909 ESTAT_ADD(tx_collisions);
10910 ESTAT_ADD(tx_xon_sent);
10911 ESTAT_ADD(tx_xoff_sent);
10912 ESTAT_ADD(tx_flow_control);
10913 ESTAT_ADD(tx_mac_errors);
10914 ESTAT_ADD(tx_single_collisions);
10915 ESTAT_ADD(tx_mult_collisions);
10916 ESTAT_ADD(tx_deferred);
10917 ESTAT_ADD(tx_excessive_collisions);
10918 ESTAT_ADD(tx_late_collisions);
10919 ESTAT_ADD(tx_collide_2times);
10920 ESTAT_ADD(tx_collide_3times);
10921 ESTAT_ADD(tx_collide_4times);
10922 ESTAT_ADD(tx_collide_5times);
10923 ESTAT_ADD(tx_collide_6times);
10924 ESTAT_ADD(tx_collide_7times);
10925 ESTAT_ADD(tx_collide_8times);
10926 ESTAT_ADD(tx_collide_9times);
10927 ESTAT_ADD(tx_collide_10times);
10928 ESTAT_ADD(tx_collide_11times);
10929 ESTAT_ADD(tx_collide_12times);
10930 ESTAT_ADD(tx_collide_13times);
10931 ESTAT_ADD(tx_collide_14times);
10932 ESTAT_ADD(tx_collide_15times);
10933 ESTAT_ADD(tx_ucast_packets);
10934 ESTAT_ADD(tx_mcast_packets);
10935 ESTAT_ADD(tx_bcast_packets);
10936 ESTAT_ADD(tx_carrier_sense_errors);
10937 ESTAT_ADD(tx_discards);
10938 ESTAT_ADD(tx_errors);
10940 ESTAT_ADD(dma_writeq_full);
10941 ESTAT_ADD(dma_write_prioq_full);
10942 ESTAT_ADD(rxbds_empty);
10943 ESTAT_ADD(rx_discards);
10944 ESTAT_ADD(rx_errors);
10945 ESTAT_ADD(rx_threshold_hit);
10947 ESTAT_ADD(dma_readq_full);
10948 ESTAT_ADD(dma_read_prioq_full);
10949 ESTAT_ADD(tx_comp_queue_full);
10951 ESTAT_ADD(ring_set_send_prod_index);
10952 ESTAT_ADD(ring_status_update);
10953 ESTAT_ADD(nic_irqs);
10954 ESTAT_ADD(nic_avoided_irqs);
10955 ESTAT_ADD(nic_tx_threshold_hit);
10957 ESTAT_ADD(mbuf_lwm_thresh_hit);
10960 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10962 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10963 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10965 stats->rx_packets = old_stats->rx_packets +
10966 get_stat64(&hw_stats->rx_ucast_packets) +
10967 get_stat64(&hw_stats->rx_mcast_packets) +
10968 get_stat64(&hw_stats->rx_bcast_packets);
10970 stats->tx_packets = old_stats->tx_packets +
10971 get_stat64(&hw_stats->tx_ucast_packets) +
10972 get_stat64(&hw_stats->tx_mcast_packets) +
10973 get_stat64(&hw_stats->tx_bcast_packets);
10975 stats->rx_bytes = old_stats->rx_bytes +
10976 get_stat64(&hw_stats->rx_octets);
10977 stats->tx_bytes = old_stats->tx_bytes +
10978 get_stat64(&hw_stats->tx_octets);
10980 stats->rx_errors = old_stats->rx_errors +
10981 get_stat64(&hw_stats->rx_errors);
10982 stats->tx_errors = old_stats->tx_errors +
10983 get_stat64(&hw_stats->tx_errors) +
10984 get_stat64(&hw_stats->tx_mac_errors) +
10985 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10986 get_stat64(&hw_stats->tx_discards);
10988 stats->multicast = old_stats->multicast +
10989 get_stat64(&hw_stats->rx_mcast_packets);
10990 stats->collisions = old_stats->collisions +
10991 get_stat64(&hw_stats->tx_collisions);
10993 stats->rx_length_errors = old_stats->rx_length_errors +
10994 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10995 get_stat64(&hw_stats->rx_undersize_packets);
10997 stats->rx_over_errors = old_stats->rx_over_errors +
10998 get_stat64(&hw_stats->rxbds_empty);
10999 stats->rx_frame_errors = old_stats->rx_frame_errors +
11000 get_stat64(&hw_stats->rx_align_errors);
11001 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11002 get_stat64(&hw_stats->tx_discards);
11003 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11004 get_stat64(&hw_stats->tx_carrier_sense_errors);
11006 stats->rx_crc_errors = old_stats->rx_crc_errors +
11007 tg3_calc_crc_errors(tp);
11009 stats->rx_missed_errors = old_stats->rx_missed_errors +
11010 get_stat64(&hw_stats->rx_discards);
11012 stats->rx_dropped = tp->rx_dropped;
11013 stats->tx_dropped = tp->tx_dropped;
11016 static int tg3_get_regs_len(struct net_device *dev)
11018 return TG3_REG_BLK_SIZE;
11021 static void tg3_get_regs(struct net_device *dev,
11022 struct ethtool_regs *regs, void *_p)
11024 struct tg3 *tp = netdev_priv(dev);
11026 regs->version = 0;
11028 memset(_p, 0, TG3_REG_BLK_SIZE);
11030 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11031 return;
11033 tg3_full_lock(tp, 0);
11035 tg3_dump_legacy_regs(tp, (u32 *)_p);
11037 tg3_full_unlock(tp);
11040 static int tg3_get_eeprom_len(struct net_device *dev)
11042 struct tg3 *tp = netdev_priv(dev);
11044 return tp->nvram_size;
11047 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11049 struct tg3 *tp = netdev_priv(dev);
11050 int ret;
11051 u8 *pd;
11052 u32 i, offset, len, b_offset, b_count;
11053 __be32 val;
11055 if (tg3_flag(tp, NO_NVRAM))
11056 return -EINVAL;
11058 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11059 return -EAGAIN;
11061 offset = eeprom->offset;
11062 len = eeprom->len;
11063 eeprom->len = 0;
11065 eeprom->magic = TG3_EEPROM_MAGIC;
11067 if (offset & 3) {
11068 /* adjustments to start on required 4 byte boundary */
11069 b_offset = offset & 3;
11070 b_count = 4 - b_offset;
11071 if (b_count > len) {
11072 /* i.e. offset=1 len=2 */
11073 b_count = len;
11075 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11076 if (ret)
11077 return ret;
11078 memcpy(data, ((char *)&val) + b_offset, b_count);
11079 len -= b_count;
11080 offset += b_count;
11081 eeprom->len += b_count;
11084 /* read bytes up to the last 4 byte boundary */
11085 pd = &data[eeprom->len];
11086 for (i = 0; i < (len - (len & 3)); i += 4) {
11087 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11088 if (ret) {
11089 eeprom->len += i;
11090 return ret;
11092 memcpy(pd + i, &val, 4);
11094 eeprom->len += i;
11096 if (len & 3) {
11097 /* read last bytes not ending on 4 byte boundary */
11098 pd = &data[eeprom->len];
11099 b_count = len & 3;
11100 b_offset = offset + len - b_count;
11101 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11102 if (ret)
11103 return ret;
11104 memcpy(pd, &val, b_count);
11105 eeprom->len += b_count;
11107 return 0;
11110 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11112 struct tg3 *tp = netdev_priv(dev);
11113 int ret;
11114 u32 offset, len, b_offset, odd_len;
11115 u8 *buf;
11116 __be32 start, end;
11118 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11119 return -EAGAIN;
11121 if (tg3_flag(tp, NO_NVRAM) ||
11122 eeprom->magic != TG3_EEPROM_MAGIC)
11123 return -EINVAL;
11125 offset = eeprom->offset;
11126 len = eeprom->len;
11128 if ((b_offset = (offset & 3))) {
11129 /* adjustments to start on required 4 byte boundary */
11130 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11131 if (ret)
11132 return ret;
11133 len += b_offset;
11134 offset &= ~3;
11135 if (len < 4)
11136 len = 4;
11139 odd_len = 0;
11140 if (len & 3) {
11141 /* adjustments to end on required 4 byte boundary */
11142 odd_len = 1;
11143 len = (len + 3) & ~3;
11144 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11145 if (ret)
11146 return ret;
11149 buf = data;
11150 if (b_offset || odd_len) {
11151 buf = kmalloc(len, GFP_KERNEL);
11152 if (!buf)
11153 return -ENOMEM;
11154 if (b_offset)
11155 memcpy(buf, &start, 4);
11156 if (odd_len)
11157 memcpy(buf+len-4, &end, 4);
11158 memcpy(buf + b_offset, data, eeprom->len);
11161 ret = tg3_nvram_write_block(tp, offset, len, buf);
11163 if (buf != data)
11164 kfree(buf);
11166 return ret;
11169 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11171 struct tg3 *tp = netdev_priv(dev);
11173 if (tg3_flag(tp, USE_PHYLIB)) {
11174 struct phy_device *phydev;
11175 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11176 return -EAGAIN;
11177 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11178 return phy_ethtool_gset(phydev, cmd);
11181 cmd->supported = (SUPPORTED_Autoneg);
11183 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11184 cmd->supported |= (SUPPORTED_1000baseT_Half |
11185 SUPPORTED_1000baseT_Full);
11187 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11188 cmd->supported |= (SUPPORTED_100baseT_Half |
11189 SUPPORTED_100baseT_Full |
11190 SUPPORTED_10baseT_Half |
11191 SUPPORTED_10baseT_Full |
11192 SUPPORTED_TP);
11193 cmd->port = PORT_TP;
11194 } else {
11195 cmd->supported |= SUPPORTED_FIBRE;
11196 cmd->port = PORT_FIBRE;
11199 cmd->advertising = tp->link_config.advertising;
11200 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11201 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11202 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11203 cmd->advertising |= ADVERTISED_Pause;
11204 } else {
11205 cmd->advertising |= ADVERTISED_Pause |
11206 ADVERTISED_Asym_Pause;
11208 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11209 cmd->advertising |= ADVERTISED_Asym_Pause;
11212 if (netif_running(dev) && tp->link_up) {
11213 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11214 cmd->duplex = tp->link_config.active_duplex;
11215 cmd->lp_advertising = tp->link_config.rmt_adv;
11216 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11217 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11218 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11219 else
11220 cmd->eth_tp_mdix = ETH_TP_MDI;
11222 } else {
11223 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11224 cmd->duplex = DUPLEX_UNKNOWN;
11225 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11227 cmd->phy_address = tp->phy_addr;
11228 cmd->transceiver = XCVR_INTERNAL;
11229 cmd->autoneg = tp->link_config.autoneg;
11230 cmd->maxtxpkt = 0;
11231 cmd->maxrxpkt = 0;
11232 return 0;
11235 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11237 struct tg3 *tp = netdev_priv(dev);
11238 u32 speed = ethtool_cmd_speed(cmd);
11240 if (tg3_flag(tp, USE_PHYLIB)) {
11241 struct phy_device *phydev;
11242 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11243 return -EAGAIN;
11244 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11245 return phy_ethtool_sset(phydev, cmd);
11248 if (cmd->autoneg != AUTONEG_ENABLE &&
11249 cmd->autoneg != AUTONEG_DISABLE)
11250 return -EINVAL;
11252 if (cmd->autoneg == AUTONEG_DISABLE &&
11253 cmd->duplex != DUPLEX_FULL &&
11254 cmd->duplex != DUPLEX_HALF)
11255 return -EINVAL;
11257 if (cmd->autoneg == AUTONEG_ENABLE) {
11258 u32 mask = ADVERTISED_Autoneg |
11259 ADVERTISED_Pause |
11260 ADVERTISED_Asym_Pause;
11262 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11263 mask |= ADVERTISED_1000baseT_Half |
11264 ADVERTISED_1000baseT_Full;
11266 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11267 mask |= ADVERTISED_100baseT_Half |
11268 ADVERTISED_100baseT_Full |
11269 ADVERTISED_10baseT_Half |
11270 ADVERTISED_10baseT_Full |
11271 ADVERTISED_TP;
11272 else
11273 mask |= ADVERTISED_FIBRE;
11275 if (cmd->advertising & ~mask)
11276 return -EINVAL;
11278 mask &= (ADVERTISED_1000baseT_Half |
11279 ADVERTISED_1000baseT_Full |
11280 ADVERTISED_100baseT_Half |
11281 ADVERTISED_100baseT_Full |
11282 ADVERTISED_10baseT_Half |
11283 ADVERTISED_10baseT_Full);
11285 cmd->advertising &= mask;
11286 } else {
11287 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11288 if (speed != SPEED_1000)
11289 return -EINVAL;
11291 if (cmd->duplex != DUPLEX_FULL)
11292 return -EINVAL;
11293 } else {
11294 if (speed != SPEED_100 &&
11295 speed != SPEED_10)
11296 return -EINVAL;
11300 tg3_full_lock(tp, 0);
11302 tp->link_config.autoneg = cmd->autoneg;
11303 if (cmd->autoneg == AUTONEG_ENABLE) {
11304 tp->link_config.advertising = (cmd->advertising |
11305 ADVERTISED_Autoneg);
11306 tp->link_config.speed = SPEED_UNKNOWN;
11307 tp->link_config.duplex = DUPLEX_UNKNOWN;
11308 } else {
11309 tp->link_config.advertising = 0;
11310 tp->link_config.speed = speed;
11311 tp->link_config.duplex = cmd->duplex;
11314 if (netif_running(dev))
11315 tg3_setup_phy(tp, 1);
11317 tg3_full_unlock(tp);
11319 return 0;
11322 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11324 struct tg3 *tp = netdev_priv(dev);
11326 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11327 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11328 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11329 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11332 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11334 struct tg3 *tp = netdev_priv(dev);
11336 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11337 wol->supported = WAKE_MAGIC;
11338 else
11339 wol->supported = 0;
11340 wol->wolopts = 0;
11341 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11342 wol->wolopts = WAKE_MAGIC;
11343 memset(&wol->sopass, 0, sizeof(wol->sopass));
11346 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11348 struct tg3 *tp = netdev_priv(dev);
11349 struct device *dp = &tp->pdev->dev;
11351 if (wol->wolopts & ~WAKE_MAGIC)
11352 return -EINVAL;
11353 if ((wol->wolopts & WAKE_MAGIC) &&
11354 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11355 return -EINVAL;
11357 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11359 spin_lock_bh(&tp->lock);
11360 if (device_may_wakeup(dp))
11361 tg3_flag_set(tp, WOL_ENABLE);
11362 else
11363 tg3_flag_clear(tp, WOL_ENABLE);
11364 spin_unlock_bh(&tp->lock);
11366 return 0;
11369 static u32 tg3_get_msglevel(struct net_device *dev)
11371 struct tg3 *tp = netdev_priv(dev);
11372 return tp->msg_enable;
11375 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11377 struct tg3 *tp = netdev_priv(dev);
11378 tp->msg_enable = value;
11381 static int tg3_nway_reset(struct net_device *dev)
11383 struct tg3 *tp = netdev_priv(dev);
11384 int r;
11386 if (!netif_running(dev))
11387 return -EAGAIN;
11389 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11390 return -EINVAL;
11392 if (tg3_flag(tp, USE_PHYLIB)) {
11393 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11394 return -EAGAIN;
11395 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11396 } else {
11397 u32 bmcr;
11399 spin_lock_bh(&tp->lock);
11400 r = -EINVAL;
11401 tg3_readphy(tp, MII_BMCR, &bmcr);
11402 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11403 ((bmcr & BMCR_ANENABLE) ||
11404 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11405 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11406 BMCR_ANENABLE);
11407 r = 0;
11409 spin_unlock_bh(&tp->lock);
11412 return r;
11415 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11417 struct tg3 *tp = netdev_priv(dev);
11419 ering->rx_max_pending = tp->rx_std_ring_mask;
11420 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11421 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11422 else
11423 ering->rx_jumbo_max_pending = 0;
11425 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11427 ering->rx_pending = tp->rx_pending;
11428 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11429 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11430 else
11431 ering->rx_jumbo_pending = 0;
11433 ering->tx_pending = tp->napi[0].tx_pending;
11436 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11438 struct tg3 *tp = netdev_priv(dev);
11439 int i, irq_sync = 0, err = 0;
11441 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11442 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11443 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11444 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11445 (tg3_flag(tp, TSO_BUG) &&
11446 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11447 return -EINVAL;
11449 if (netif_running(dev)) {
11450 tg3_phy_stop(tp);
11451 tg3_netif_stop(tp);
11452 irq_sync = 1;
11455 tg3_full_lock(tp, irq_sync);
11457 tp->rx_pending = ering->rx_pending;
11459 if (tg3_flag(tp, MAX_RXPEND_64) &&
11460 tp->rx_pending > 63)
11461 tp->rx_pending = 63;
11462 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11464 for (i = 0; i < tp->irq_max; i++)
11465 tp->napi[i].tx_pending = ering->tx_pending;
11467 if (netif_running(dev)) {
11468 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11469 err = tg3_restart_hw(tp, 1);
11470 if (!err)
11471 tg3_netif_start(tp);
11474 tg3_full_unlock(tp);
11476 if (irq_sync && !err)
11477 tg3_phy_start(tp);
11479 return err;
11482 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11484 struct tg3 *tp = netdev_priv(dev);
11486 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11488 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11489 epause->rx_pause = 1;
11490 else
11491 epause->rx_pause = 0;
11493 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11494 epause->tx_pause = 1;
11495 else
11496 epause->tx_pause = 0;
11499 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11501 struct tg3 *tp = netdev_priv(dev);
11502 int err = 0;
11504 if (tg3_flag(tp, USE_PHYLIB)) {
11505 u32 newadv;
11506 struct phy_device *phydev;
11508 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11510 if (!(phydev->supported & SUPPORTED_Pause) ||
11511 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11512 (epause->rx_pause != epause->tx_pause)))
11513 return -EINVAL;
11515 tp->link_config.flowctrl = 0;
11516 if (epause->rx_pause) {
11517 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11519 if (epause->tx_pause) {
11520 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11521 newadv = ADVERTISED_Pause;
11522 } else
11523 newadv = ADVERTISED_Pause |
11524 ADVERTISED_Asym_Pause;
11525 } else if (epause->tx_pause) {
11526 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11527 newadv = ADVERTISED_Asym_Pause;
11528 } else
11529 newadv = 0;
11531 if (epause->autoneg)
11532 tg3_flag_set(tp, PAUSE_AUTONEG);
11533 else
11534 tg3_flag_clear(tp, PAUSE_AUTONEG);
11536 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11537 u32 oldadv = phydev->advertising &
11538 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11539 if (oldadv != newadv) {
11540 phydev->advertising &=
11541 ~(ADVERTISED_Pause |
11542 ADVERTISED_Asym_Pause);
11543 phydev->advertising |= newadv;
11544 if (phydev->autoneg) {
11546 * Always renegotiate the link to
11547 * inform our link partner of our
11548 * flow control settings, even if the
11549 * flow control is forced. Let
11550 * tg3_adjust_link() do the final
11551 * flow control setup.
11553 return phy_start_aneg(phydev);
11557 if (!epause->autoneg)
11558 tg3_setup_flow_control(tp, 0, 0);
11559 } else {
11560 tp->link_config.advertising &=
11561 ~(ADVERTISED_Pause |
11562 ADVERTISED_Asym_Pause);
11563 tp->link_config.advertising |= newadv;
11565 } else {
11566 int irq_sync = 0;
11568 if (netif_running(dev)) {
11569 tg3_netif_stop(tp);
11570 irq_sync = 1;
11573 tg3_full_lock(tp, irq_sync);
11575 if (epause->autoneg)
11576 tg3_flag_set(tp, PAUSE_AUTONEG);
11577 else
11578 tg3_flag_clear(tp, PAUSE_AUTONEG);
11579 if (epause->rx_pause)
11580 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11581 else
11582 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11583 if (epause->tx_pause)
11584 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11585 else
11586 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11588 if (netif_running(dev)) {
11589 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11590 err = tg3_restart_hw(tp, 1);
11591 if (!err)
11592 tg3_netif_start(tp);
11595 tg3_full_unlock(tp);
11598 return err;
11601 static int tg3_get_sset_count(struct net_device *dev, int sset)
11603 switch (sset) {
11604 case ETH_SS_TEST:
11605 return TG3_NUM_TEST;
11606 case ETH_SS_STATS:
11607 return TG3_NUM_STATS;
11608 default:
11609 return -EOPNOTSUPP;
11613 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11614 u32 *rules __always_unused)
11616 struct tg3 *tp = netdev_priv(dev);
11618 if (!tg3_flag(tp, SUPPORT_MSIX))
11619 return -EOPNOTSUPP;
11621 switch (info->cmd) {
11622 case ETHTOOL_GRXRINGS:
11623 if (netif_running(tp->dev))
11624 info->data = tp->rxq_cnt;
11625 else {
11626 info->data = num_online_cpus();
11627 if (info->data > TG3_RSS_MAX_NUM_QS)
11628 info->data = TG3_RSS_MAX_NUM_QS;
11631 /* The first interrupt vector only
11632 * handles link interrupts.
11634 info->data -= 1;
11635 return 0;
11637 default:
11638 return -EOPNOTSUPP;
11642 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11644 u32 size = 0;
11645 struct tg3 *tp = netdev_priv(dev);
11647 if (tg3_flag(tp, SUPPORT_MSIX))
11648 size = TG3_RSS_INDIR_TBL_SIZE;
11650 return size;
11653 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11655 struct tg3 *tp = netdev_priv(dev);
11656 int i;
11658 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11659 indir[i] = tp->rss_ind_tbl[i];
11661 return 0;
11664 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11666 struct tg3 *tp = netdev_priv(dev);
11667 size_t i;
11669 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11670 tp->rss_ind_tbl[i] = indir[i];
11672 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11673 return 0;
11675 /* It is legal to write the indirection
11676 * table while the device is running.
11678 tg3_full_lock(tp, 0);
11679 tg3_rss_write_indir_tbl(tp);
11680 tg3_full_unlock(tp);
11682 return 0;
11685 static void tg3_get_channels(struct net_device *dev,
11686 struct ethtool_channels *channel)
11688 struct tg3 *tp = netdev_priv(dev);
11689 u32 deflt_qs = netif_get_num_default_rss_queues();
11691 channel->max_rx = tp->rxq_max;
11692 channel->max_tx = tp->txq_max;
11694 if (netif_running(dev)) {
11695 channel->rx_count = tp->rxq_cnt;
11696 channel->tx_count = tp->txq_cnt;
11697 } else {
11698 if (tp->rxq_req)
11699 channel->rx_count = tp->rxq_req;
11700 else
11701 channel->rx_count = min(deflt_qs, tp->rxq_max);
11703 if (tp->txq_req)
11704 channel->tx_count = tp->txq_req;
11705 else
11706 channel->tx_count = min(deflt_qs, tp->txq_max);
11710 static int tg3_set_channels(struct net_device *dev,
11711 struct ethtool_channels *channel)
11713 struct tg3 *tp = netdev_priv(dev);
11715 if (!tg3_flag(tp, SUPPORT_MSIX))
11716 return -EOPNOTSUPP;
11718 if (channel->rx_count > tp->rxq_max ||
11719 channel->tx_count > tp->txq_max)
11720 return -EINVAL;
11722 tp->rxq_req = channel->rx_count;
11723 tp->txq_req = channel->tx_count;
11725 if (!netif_running(dev))
11726 return 0;
11728 tg3_stop(tp);
11730 tg3_carrier_off(tp);
11732 tg3_start(tp, true, false, false);
11734 return 0;
11737 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11739 switch (stringset) {
11740 case ETH_SS_STATS:
11741 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11742 break;
11743 case ETH_SS_TEST:
11744 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11745 break;
11746 default:
11747 WARN_ON(1); /* we need a WARN() */
11748 break;
11752 static int tg3_set_phys_id(struct net_device *dev,
11753 enum ethtool_phys_id_state state)
11755 struct tg3 *tp = netdev_priv(dev);
11757 if (!netif_running(tp->dev))
11758 return -EAGAIN;
11760 switch (state) {
11761 case ETHTOOL_ID_ACTIVE:
11762 return 1; /* cycle on/off once per second */
11764 case ETHTOOL_ID_ON:
11765 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11766 LED_CTRL_1000MBPS_ON |
11767 LED_CTRL_100MBPS_ON |
11768 LED_CTRL_10MBPS_ON |
11769 LED_CTRL_TRAFFIC_OVERRIDE |
11770 LED_CTRL_TRAFFIC_BLINK |
11771 LED_CTRL_TRAFFIC_LED);
11772 break;
11774 case ETHTOOL_ID_OFF:
11775 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11776 LED_CTRL_TRAFFIC_OVERRIDE);
11777 break;
11779 case ETHTOOL_ID_INACTIVE:
11780 tw32(MAC_LED_CTRL, tp->led_ctrl);
11781 break;
11784 return 0;
11787 static void tg3_get_ethtool_stats(struct net_device *dev,
11788 struct ethtool_stats *estats, u64 *tmp_stats)
11790 struct tg3 *tp = netdev_priv(dev);
11792 if (tp->hw_stats)
11793 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11794 else
11795 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11798 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11800 int i;
11801 __be32 *buf;
11802 u32 offset = 0, len = 0;
11803 u32 magic, val;
11805 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11806 return NULL;
11808 if (magic == TG3_EEPROM_MAGIC) {
11809 for (offset = TG3_NVM_DIR_START;
11810 offset < TG3_NVM_DIR_END;
11811 offset += TG3_NVM_DIRENT_SIZE) {
11812 if (tg3_nvram_read(tp, offset, &val))
11813 return NULL;
11815 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11816 TG3_NVM_DIRTYPE_EXTVPD)
11817 break;
11820 if (offset != TG3_NVM_DIR_END) {
11821 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11822 if (tg3_nvram_read(tp, offset + 4, &offset))
11823 return NULL;
11825 offset = tg3_nvram_logical_addr(tp, offset);
11829 if (!offset || !len) {
11830 offset = TG3_NVM_VPD_OFF;
11831 len = TG3_NVM_VPD_LEN;
11834 buf = kmalloc(len, GFP_KERNEL);
11835 if (buf == NULL)
11836 return NULL;
11838 if (magic == TG3_EEPROM_MAGIC) {
11839 for (i = 0; i < len; i += 4) {
11840 /* The data is in little-endian format in NVRAM.
11841 * Use the big-endian read routines to preserve
11842 * the byte order as it exists in NVRAM.
11844 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11845 goto error;
11847 } else {
11848 u8 *ptr;
11849 ssize_t cnt;
11850 unsigned int pos = 0;
11852 ptr = (u8 *)&buf[0];
11853 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11854 cnt = pci_read_vpd(tp->pdev, pos,
11855 len - pos, ptr);
11856 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11857 cnt = 0;
11858 else if (cnt < 0)
11859 goto error;
11861 if (pos != len)
11862 goto error;
11865 *vpdlen = len;
11867 return buf;
11869 error:
11870 kfree(buf);
11871 return NULL;
11874 #define NVRAM_TEST_SIZE 0x100
11875 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11876 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11877 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11878 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11879 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11880 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11881 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11882 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11884 static int tg3_test_nvram(struct tg3 *tp)
11886 u32 csum, magic, len;
11887 __be32 *buf;
11888 int i, j, k, err = 0, size;
11890 if (tg3_flag(tp, NO_NVRAM))
11891 return 0;
11893 if (tg3_nvram_read(tp, 0, &magic) != 0)
11894 return -EIO;
11896 if (magic == TG3_EEPROM_MAGIC)
11897 size = NVRAM_TEST_SIZE;
11898 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11899 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11900 TG3_EEPROM_SB_FORMAT_1) {
11901 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11902 case TG3_EEPROM_SB_REVISION_0:
11903 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11904 break;
11905 case TG3_EEPROM_SB_REVISION_2:
11906 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11907 break;
11908 case TG3_EEPROM_SB_REVISION_3:
11909 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11910 break;
11911 case TG3_EEPROM_SB_REVISION_4:
11912 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11913 break;
11914 case TG3_EEPROM_SB_REVISION_5:
11915 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11916 break;
11917 case TG3_EEPROM_SB_REVISION_6:
11918 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11919 break;
11920 default:
11921 return -EIO;
11923 } else
11924 return 0;
11925 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11926 size = NVRAM_SELFBOOT_HW_SIZE;
11927 else
11928 return -EIO;
11930 buf = kmalloc(size, GFP_KERNEL);
11931 if (buf == NULL)
11932 return -ENOMEM;
11934 err = -EIO;
11935 for (i = 0, j = 0; i < size; i += 4, j++) {
11936 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11937 if (err)
11938 break;
11940 if (i < size)
11941 goto out;
11943 /* Selfboot format */
11944 magic = be32_to_cpu(buf[0]);
11945 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11946 TG3_EEPROM_MAGIC_FW) {
11947 u8 *buf8 = (u8 *) buf, csum8 = 0;
11949 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11950 TG3_EEPROM_SB_REVISION_2) {
11951 /* For rev 2, the csum doesn't include the MBA. */
11952 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11953 csum8 += buf8[i];
11954 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11955 csum8 += buf8[i];
11956 } else {
11957 for (i = 0; i < size; i++)
11958 csum8 += buf8[i];
11961 if (csum8 == 0) {
11962 err = 0;
11963 goto out;
11966 err = -EIO;
11967 goto out;
11970 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11971 TG3_EEPROM_MAGIC_HW) {
11972 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11973 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11974 u8 *buf8 = (u8 *) buf;
11976 /* Separate the parity bits and the data bytes. */
11977 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11978 if ((i == 0) || (i == 8)) {
11979 int l;
11980 u8 msk;
11982 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11983 parity[k++] = buf8[i] & msk;
11984 i++;
11985 } else if (i == 16) {
11986 int l;
11987 u8 msk;
11989 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11990 parity[k++] = buf8[i] & msk;
11991 i++;
11993 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11994 parity[k++] = buf8[i] & msk;
11995 i++;
11997 data[j++] = buf8[i];
12000 err = -EIO;
12001 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12002 u8 hw8 = hweight8(data[i]);
12004 if ((hw8 & 0x1) && parity[i])
12005 goto out;
12006 else if (!(hw8 & 0x1) && !parity[i])
12007 goto out;
12009 err = 0;
12010 goto out;
12013 err = -EIO;
12015 /* Bootstrap checksum at offset 0x10 */
12016 csum = calc_crc((unsigned char *) buf, 0x10);
12017 if (csum != le32_to_cpu(buf[0x10/4]))
12018 goto out;
12020 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12021 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12022 if (csum != le32_to_cpu(buf[0xfc/4]))
12023 goto out;
12025 kfree(buf);
12027 buf = tg3_vpd_readblock(tp, &len);
12028 if (!buf)
12029 return -ENOMEM;
12031 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12032 if (i > 0) {
12033 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12034 if (j < 0)
12035 goto out;
12037 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12038 goto out;
12040 i += PCI_VPD_LRDT_TAG_SIZE;
12041 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12042 PCI_VPD_RO_KEYWORD_CHKSUM);
12043 if (j > 0) {
12044 u8 csum8 = 0;
12046 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12048 for (i = 0; i <= j; i++)
12049 csum8 += ((u8 *)buf)[i];
12051 if (csum8)
12052 goto out;
12056 err = 0;
12058 out:
12059 kfree(buf);
12060 return err;
12063 #define TG3_SERDES_TIMEOUT_SEC 2
12064 #define TG3_COPPER_TIMEOUT_SEC 6
12066 static int tg3_test_link(struct tg3 *tp)
12068 int i, max;
12070 if (!netif_running(tp->dev))
12071 return -ENODEV;
12073 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12074 max = TG3_SERDES_TIMEOUT_SEC;
12075 else
12076 max = TG3_COPPER_TIMEOUT_SEC;
12078 for (i = 0; i < max; i++) {
12079 if (tp->link_up)
12080 return 0;
12082 if (msleep_interruptible(1000))
12083 break;
12086 return -EIO;
12089 /* Only test the commonly used registers */
12090 static int tg3_test_registers(struct tg3 *tp)
12092 int i, is_5705, is_5750;
12093 u32 offset, read_mask, write_mask, val, save_val, read_val;
12094 static struct {
12095 u16 offset;
12096 u16 flags;
12097 #define TG3_FL_5705 0x1
12098 #define TG3_FL_NOT_5705 0x2
12099 #define TG3_FL_NOT_5788 0x4
12100 #define TG3_FL_NOT_5750 0x8
12101 u32 read_mask;
12102 u32 write_mask;
12103 } reg_tbl[] = {
12104 /* MAC Control Registers */
12105 { MAC_MODE, TG3_FL_NOT_5705,
12106 0x00000000, 0x00ef6f8c },
12107 { MAC_MODE, TG3_FL_5705,
12108 0x00000000, 0x01ef6b8c },
12109 { MAC_STATUS, TG3_FL_NOT_5705,
12110 0x03800107, 0x00000000 },
12111 { MAC_STATUS, TG3_FL_5705,
12112 0x03800100, 0x00000000 },
12113 { MAC_ADDR_0_HIGH, 0x0000,
12114 0x00000000, 0x0000ffff },
12115 { MAC_ADDR_0_LOW, 0x0000,
12116 0x00000000, 0xffffffff },
12117 { MAC_RX_MTU_SIZE, 0x0000,
12118 0x00000000, 0x0000ffff },
12119 { MAC_TX_MODE, 0x0000,
12120 0x00000000, 0x00000070 },
12121 { MAC_TX_LENGTHS, 0x0000,
12122 0x00000000, 0x00003fff },
12123 { MAC_RX_MODE, TG3_FL_NOT_5705,
12124 0x00000000, 0x000007fc },
12125 { MAC_RX_MODE, TG3_FL_5705,
12126 0x00000000, 0x000007dc },
12127 { MAC_HASH_REG_0, 0x0000,
12128 0x00000000, 0xffffffff },
12129 { MAC_HASH_REG_1, 0x0000,
12130 0x00000000, 0xffffffff },
12131 { MAC_HASH_REG_2, 0x0000,
12132 0x00000000, 0xffffffff },
12133 { MAC_HASH_REG_3, 0x0000,
12134 0x00000000, 0xffffffff },
12136 /* Receive Data and Receive BD Initiator Control Registers. */
12137 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12138 0x00000000, 0xffffffff },
12139 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12140 0x00000000, 0xffffffff },
12141 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12142 0x00000000, 0x00000003 },
12143 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12144 0x00000000, 0xffffffff },
12145 { RCVDBDI_STD_BD+0, 0x0000,
12146 0x00000000, 0xffffffff },
12147 { RCVDBDI_STD_BD+4, 0x0000,
12148 0x00000000, 0xffffffff },
12149 { RCVDBDI_STD_BD+8, 0x0000,
12150 0x00000000, 0xffff0002 },
12151 { RCVDBDI_STD_BD+0xc, 0x0000,
12152 0x00000000, 0xffffffff },
12154 /* Receive BD Initiator Control Registers. */
12155 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12156 0x00000000, 0xffffffff },
12157 { RCVBDI_STD_THRESH, TG3_FL_5705,
12158 0x00000000, 0x000003ff },
12159 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12160 0x00000000, 0xffffffff },
12162 /* Host Coalescing Control Registers. */
12163 { HOSTCC_MODE, TG3_FL_NOT_5705,
12164 0x00000000, 0x00000004 },
12165 { HOSTCC_MODE, TG3_FL_5705,
12166 0x00000000, 0x000000f6 },
12167 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12168 0x00000000, 0xffffffff },
12169 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12170 0x00000000, 0x000003ff },
12171 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12172 0x00000000, 0xffffffff },
12173 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12174 0x00000000, 0x000003ff },
12175 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12176 0x00000000, 0xffffffff },
12177 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12178 0x00000000, 0x000000ff },
12179 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12180 0x00000000, 0xffffffff },
12181 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12182 0x00000000, 0x000000ff },
12183 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12184 0x00000000, 0xffffffff },
12185 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12186 0x00000000, 0xffffffff },
12187 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12188 0x00000000, 0xffffffff },
12189 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12190 0x00000000, 0x000000ff },
12191 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12192 0x00000000, 0xffffffff },
12193 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12194 0x00000000, 0x000000ff },
12195 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12196 0x00000000, 0xffffffff },
12197 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12198 0x00000000, 0xffffffff },
12199 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12200 0x00000000, 0xffffffff },
12201 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12202 0x00000000, 0xffffffff },
12203 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12204 0x00000000, 0xffffffff },
12205 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12206 0xffffffff, 0x00000000 },
12207 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12208 0xffffffff, 0x00000000 },
12210 /* Buffer Manager Control Registers. */
12211 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12212 0x00000000, 0x007fff80 },
12213 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12214 0x00000000, 0x007fffff },
12215 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12216 0x00000000, 0x0000003f },
12217 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12218 0x00000000, 0x000001ff },
12219 { BUFMGR_MB_HIGH_WATER, 0x0000,
12220 0x00000000, 0x000001ff },
12221 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12222 0xffffffff, 0x00000000 },
12223 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12224 0xffffffff, 0x00000000 },
12226 /* Mailbox Registers */
12227 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12228 0x00000000, 0x000001ff },
12229 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12230 0x00000000, 0x000001ff },
12231 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12232 0x00000000, 0x000007ff },
12233 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12234 0x00000000, 0x000001ff },
12236 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12239 is_5705 = is_5750 = 0;
12240 if (tg3_flag(tp, 5705_PLUS)) {
12241 is_5705 = 1;
12242 if (tg3_flag(tp, 5750_PLUS))
12243 is_5750 = 1;
12246 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12247 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12248 continue;
12250 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12251 continue;
12253 if (tg3_flag(tp, IS_5788) &&
12254 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12255 continue;
12257 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12258 continue;
12260 offset = (u32) reg_tbl[i].offset;
12261 read_mask = reg_tbl[i].read_mask;
12262 write_mask = reg_tbl[i].write_mask;
12264 /* Save the original register content */
12265 save_val = tr32(offset);
12267 /* Determine the read-only value. */
12268 read_val = save_val & read_mask;
12270 /* Write zero to the register, then make sure the read-only bits
12271 * are not changed and the read/write bits are all zeros.
12273 tw32(offset, 0);
12275 val = tr32(offset);
12277 /* Test the read-only and read/write bits. */
12278 if (((val & read_mask) != read_val) || (val & write_mask))
12279 goto out;
12281 /* Write ones to all the bits defined by RdMask and WrMask, then
12282 * make sure the read-only bits are not changed and the
12283 * read/write bits are all ones.
12285 tw32(offset, read_mask | write_mask);
12287 val = tr32(offset);
12289 /* Test the read-only bits. */
12290 if ((val & read_mask) != read_val)
12291 goto out;
12293 /* Test the read/write bits. */
12294 if ((val & write_mask) != write_mask)
12295 goto out;
12297 tw32(offset, save_val);
12300 return 0;
12302 out:
12303 if (netif_msg_hw(tp))
12304 netdev_err(tp->dev,
12305 "Register test failed at offset %x\n", offset);
12306 tw32(offset, save_val);
12307 return -EIO;
12310 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12312 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12313 int i;
12314 u32 j;
12316 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12317 for (j = 0; j < len; j += 4) {
12318 u32 val;
12320 tg3_write_mem(tp, offset + j, test_pattern[i]);
12321 tg3_read_mem(tp, offset + j, &val);
12322 if (val != test_pattern[i])
12323 return -EIO;
12326 return 0;
12329 static int tg3_test_memory(struct tg3 *tp)
12331 static struct mem_entry {
12332 u32 offset;
12333 u32 len;
12334 } mem_tbl_570x[] = {
12335 { 0x00000000, 0x00b50},
12336 { 0x00002000, 0x1c000},
12337 { 0xffffffff, 0x00000}
12338 }, mem_tbl_5705[] = {
12339 { 0x00000100, 0x0000c},
12340 { 0x00000200, 0x00008},
12341 { 0x00004000, 0x00800},
12342 { 0x00006000, 0x01000},
12343 { 0x00008000, 0x02000},
12344 { 0x00010000, 0x0e000},
12345 { 0xffffffff, 0x00000}
12346 }, mem_tbl_5755[] = {
12347 { 0x00000200, 0x00008},
12348 { 0x00004000, 0x00800},
12349 { 0x00006000, 0x00800},
12350 { 0x00008000, 0x02000},
12351 { 0x00010000, 0x0c000},
12352 { 0xffffffff, 0x00000}
12353 }, mem_tbl_5906[] = {
12354 { 0x00000200, 0x00008},
12355 { 0x00004000, 0x00400},
12356 { 0x00006000, 0x00400},
12357 { 0x00008000, 0x01000},
12358 { 0x00010000, 0x01000},
12359 { 0xffffffff, 0x00000}
12360 }, mem_tbl_5717[] = {
12361 { 0x00000200, 0x00008},
12362 { 0x00010000, 0x0a000},
12363 { 0x00020000, 0x13c00},
12364 { 0xffffffff, 0x00000}
12365 }, mem_tbl_57765[] = {
12366 { 0x00000200, 0x00008},
12367 { 0x00004000, 0x00800},
12368 { 0x00006000, 0x09800},
12369 { 0x00010000, 0x0a000},
12370 { 0xffffffff, 0x00000}
12372 struct mem_entry *mem_tbl;
12373 int err = 0;
12374 int i;
12376 if (tg3_flag(tp, 5717_PLUS))
12377 mem_tbl = mem_tbl_5717;
12378 else if (tg3_flag(tp, 57765_CLASS) ||
12379 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
12380 mem_tbl = mem_tbl_57765;
12381 else if (tg3_flag(tp, 5755_PLUS))
12382 mem_tbl = mem_tbl_5755;
12383 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12384 mem_tbl = mem_tbl_5906;
12385 else if (tg3_flag(tp, 5705_PLUS))
12386 mem_tbl = mem_tbl_5705;
12387 else
12388 mem_tbl = mem_tbl_570x;
12390 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12391 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12392 if (err)
12393 break;
12396 return err;
12399 #define TG3_TSO_MSS 500
12401 #define TG3_TSO_IP_HDR_LEN 20
12402 #define TG3_TSO_TCP_HDR_LEN 20
12403 #define TG3_TSO_TCP_OPT_LEN 12
12405 static const u8 tg3_tso_header[] = {
12406 0x08, 0x00,
12407 0x45, 0x00, 0x00, 0x00,
12408 0x00, 0x00, 0x40, 0x00,
12409 0x40, 0x06, 0x00, 0x00,
12410 0x0a, 0x00, 0x00, 0x01,
12411 0x0a, 0x00, 0x00, 0x02,
12412 0x0d, 0x00, 0xe0, 0x00,
12413 0x00, 0x00, 0x01, 0x00,
12414 0x00, 0x00, 0x02, 0x00,
12415 0x80, 0x10, 0x10, 0x00,
12416 0x14, 0x09, 0x00, 0x00,
12417 0x01, 0x01, 0x08, 0x0a,
12418 0x11, 0x11, 0x11, 0x11,
12419 0x11, 0x11, 0x11, 0x11,
12422 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12424 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12425 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12426 u32 budget;
12427 struct sk_buff *skb;
12428 u8 *tx_data, *rx_data;
12429 dma_addr_t map;
12430 int num_pkts, tx_len, rx_len, i, err;
12431 struct tg3_rx_buffer_desc *desc;
12432 struct tg3_napi *tnapi, *rnapi;
12433 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12435 tnapi = &tp->napi[0];
12436 rnapi = &tp->napi[0];
12437 if (tp->irq_cnt > 1) {
12438 if (tg3_flag(tp, ENABLE_RSS))
12439 rnapi = &tp->napi[1];
12440 if (tg3_flag(tp, ENABLE_TSS))
12441 tnapi = &tp->napi[1];
12443 coal_now = tnapi->coal_now | rnapi->coal_now;
12445 err = -EIO;
12447 tx_len = pktsz;
12448 skb = netdev_alloc_skb(tp->dev, tx_len);
12449 if (!skb)
12450 return -ENOMEM;
12452 tx_data = skb_put(skb, tx_len);
12453 memcpy(tx_data, tp->dev->dev_addr, 6);
12454 memset(tx_data + 6, 0x0, 8);
12456 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12458 if (tso_loopback) {
12459 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12461 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12462 TG3_TSO_TCP_OPT_LEN;
12464 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12465 sizeof(tg3_tso_header));
12466 mss = TG3_TSO_MSS;
12468 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12469 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12471 /* Set the total length field in the IP header */
12472 iph->tot_len = htons((u16)(mss + hdr_len));
12474 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12475 TXD_FLAG_CPU_POST_DMA);
12477 if (tg3_flag(tp, HW_TSO_1) ||
12478 tg3_flag(tp, HW_TSO_2) ||
12479 tg3_flag(tp, HW_TSO_3)) {
12480 struct tcphdr *th;
12481 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12482 th = (struct tcphdr *)&tx_data[val];
12483 th->check = 0;
12484 } else
12485 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12487 if (tg3_flag(tp, HW_TSO_3)) {
12488 mss |= (hdr_len & 0xc) << 12;
12489 if (hdr_len & 0x10)
12490 base_flags |= 0x00000010;
12491 base_flags |= (hdr_len & 0x3e0) << 5;
12492 } else if (tg3_flag(tp, HW_TSO_2))
12493 mss |= hdr_len << 9;
12494 else if (tg3_flag(tp, HW_TSO_1) ||
12495 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12496 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12497 } else {
12498 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12501 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12502 } else {
12503 num_pkts = 1;
12504 data_off = ETH_HLEN;
12506 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12507 tx_len > VLAN_ETH_FRAME_LEN)
12508 base_flags |= TXD_FLAG_JMB_PKT;
12511 for (i = data_off; i < tx_len; i++)
12512 tx_data[i] = (u8) (i & 0xff);
12514 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12515 if (pci_dma_mapping_error(tp->pdev, map)) {
12516 dev_kfree_skb(skb);
12517 return -EIO;
12520 val = tnapi->tx_prod;
12521 tnapi->tx_buffers[val].skb = skb;
12522 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12524 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12525 rnapi->coal_now);
12527 udelay(10);
12529 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12531 budget = tg3_tx_avail(tnapi);
12532 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12533 base_flags | TXD_FLAG_END, mss, 0)) {
12534 tnapi->tx_buffers[val].skb = NULL;
12535 dev_kfree_skb(skb);
12536 return -EIO;
12539 tnapi->tx_prod++;
12541 /* Sync BD data before updating mailbox */
12542 wmb();
12544 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12545 tr32_mailbox(tnapi->prodmbox);
12547 udelay(10);
12549 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12550 for (i = 0; i < 35; i++) {
12551 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12552 coal_now);
12554 udelay(10);
12556 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12557 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12558 if ((tx_idx == tnapi->tx_prod) &&
12559 (rx_idx == (rx_start_idx + num_pkts)))
12560 break;
12563 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12564 dev_kfree_skb(skb);
12566 if (tx_idx != tnapi->tx_prod)
12567 goto out;
12569 if (rx_idx != rx_start_idx + num_pkts)
12570 goto out;
12572 val = data_off;
12573 while (rx_idx != rx_start_idx) {
12574 desc = &rnapi->rx_rcb[rx_start_idx++];
12575 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12576 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12578 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12579 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12580 goto out;
12582 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12583 - ETH_FCS_LEN;
12585 if (!tso_loopback) {
12586 if (rx_len != tx_len)
12587 goto out;
12589 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12590 if (opaque_key != RXD_OPAQUE_RING_STD)
12591 goto out;
12592 } else {
12593 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12594 goto out;
12596 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12597 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12598 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12599 goto out;
12602 if (opaque_key == RXD_OPAQUE_RING_STD) {
12603 rx_data = tpr->rx_std_buffers[desc_idx].data;
12604 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12605 mapping);
12606 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12607 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12608 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12609 mapping);
12610 } else
12611 goto out;
12613 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12614 PCI_DMA_FROMDEVICE);
12616 rx_data += TG3_RX_OFFSET(tp);
12617 for (i = data_off; i < rx_len; i++, val++) {
12618 if (*(rx_data + i) != (u8) (val & 0xff))
12619 goto out;
12623 err = 0;
12625 /* tg3_free_rings will unmap and free the rx_data */
12626 out:
12627 return err;
12630 #define TG3_STD_LOOPBACK_FAILED 1
12631 #define TG3_JMB_LOOPBACK_FAILED 2
12632 #define TG3_TSO_LOOPBACK_FAILED 4
12633 #define TG3_LOOPBACK_FAILED \
12634 (TG3_STD_LOOPBACK_FAILED | \
12635 TG3_JMB_LOOPBACK_FAILED | \
12636 TG3_TSO_LOOPBACK_FAILED)
12638 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12640 int err = -EIO;
12641 u32 eee_cap;
12642 u32 jmb_pkt_sz = 9000;
12644 if (tp->dma_limit)
12645 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12647 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12648 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12650 if (!netif_running(tp->dev)) {
12651 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12652 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12653 if (do_extlpbk)
12654 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12655 goto done;
12658 err = tg3_reset_hw(tp, 1);
12659 if (err) {
12660 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12661 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12662 if (do_extlpbk)
12663 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12664 goto done;
12667 if (tg3_flag(tp, ENABLE_RSS)) {
12668 int i;
12670 /* Reroute all rx packets to the 1st queue */
12671 for (i = MAC_RSS_INDIR_TBL_0;
12672 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12673 tw32(i, 0x0);
12676 /* HW errata - mac loopback fails in some cases on 5780.
12677 * Normal traffic and PHY loopback are not affected by
12678 * errata. Also, the MAC loopback test is deprecated for
12679 * all newer ASIC revisions.
12681 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12682 !tg3_flag(tp, CPMU_PRESENT)) {
12683 tg3_mac_loopback(tp, true);
12685 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12686 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12688 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12689 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12690 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12692 tg3_mac_loopback(tp, false);
12695 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12696 !tg3_flag(tp, USE_PHYLIB)) {
12697 int i;
12699 tg3_phy_lpbk_set(tp, 0, false);
12701 /* Wait for link */
12702 for (i = 0; i < 100; i++) {
12703 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12704 break;
12705 mdelay(1);
12708 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12709 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12710 if (tg3_flag(tp, TSO_CAPABLE) &&
12711 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12712 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12713 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12714 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12715 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12717 if (do_extlpbk) {
12718 tg3_phy_lpbk_set(tp, 0, true);
12720 /* All link indications report up, but the hardware
12721 * isn't really ready for about 20 msec. Double it
12722 * to be sure.
12724 mdelay(40);
12726 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12727 data[TG3_EXT_LOOPB_TEST] |=
12728 TG3_STD_LOOPBACK_FAILED;
12729 if (tg3_flag(tp, TSO_CAPABLE) &&
12730 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12731 data[TG3_EXT_LOOPB_TEST] |=
12732 TG3_TSO_LOOPBACK_FAILED;
12733 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12734 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12735 data[TG3_EXT_LOOPB_TEST] |=
12736 TG3_JMB_LOOPBACK_FAILED;
12739 /* Re-enable gphy autopowerdown. */
12740 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12741 tg3_phy_toggle_apd(tp, true);
12744 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12745 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12747 done:
12748 tp->phy_flags |= eee_cap;
12750 return err;
12753 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12754 u64 *data)
12756 struct tg3 *tp = netdev_priv(dev);
12757 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12759 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12760 tg3_power_up(tp)) {
12761 etest->flags |= ETH_TEST_FL_FAILED;
12762 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12763 return;
12766 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12768 if (tg3_test_nvram(tp) != 0) {
12769 etest->flags |= ETH_TEST_FL_FAILED;
12770 data[TG3_NVRAM_TEST] = 1;
12772 if (!doextlpbk && tg3_test_link(tp)) {
12773 etest->flags |= ETH_TEST_FL_FAILED;
12774 data[TG3_LINK_TEST] = 1;
12776 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12777 int err, err2 = 0, irq_sync = 0;
12779 if (netif_running(dev)) {
12780 tg3_phy_stop(tp);
12781 tg3_netif_stop(tp);
12782 irq_sync = 1;
12785 tg3_full_lock(tp, irq_sync);
12786 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12787 err = tg3_nvram_lock(tp);
12788 tg3_halt_cpu(tp, RX_CPU_BASE);
12789 if (!tg3_flag(tp, 5705_PLUS))
12790 tg3_halt_cpu(tp, TX_CPU_BASE);
12791 if (!err)
12792 tg3_nvram_unlock(tp);
12794 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12795 tg3_phy_reset(tp);
12797 if (tg3_test_registers(tp) != 0) {
12798 etest->flags |= ETH_TEST_FL_FAILED;
12799 data[TG3_REGISTER_TEST] = 1;
12802 if (tg3_test_memory(tp) != 0) {
12803 etest->flags |= ETH_TEST_FL_FAILED;
12804 data[TG3_MEMORY_TEST] = 1;
12807 if (doextlpbk)
12808 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12810 if (tg3_test_loopback(tp, data, doextlpbk))
12811 etest->flags |= ETH_TEST_FL_FAILED;
12813 tg3_full_unlock(tp);
12815 if (tg3_test_interrupt(tp) != 0) {
12816 etest->flags |= ETH_TEST_FL_FAILED;
12817 data[TG3_INTERRUPT_TEST] = 1;
12820 tg3_full_lock(tp, 0);
12822 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12823 if (netif_running(dev)) {
12824 tg3_flag_set(tp, INIT_COMPLETE);
12825 err2 = tg3_restart_hw(tp, 1);
12826 if (!err2)
12827 tg3_netif_start(tp);
12830 tg3_full_unlock(tp);
12832 if (irq_sync && !err2)
12833 tg3_phy_start(tp);
12835 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12836 tg3_power_down(tp);
12840 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12841 struct ifreq *ifr, int cmd)
12843 struct tg3 *tp = netdev_priv(dev);
12844 struct hwtstamp_config stmpconf;
12846 if (!tg3_flag(tp, PTP_CAPABLE))
12847 return -EINVAL;
12849 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12850 return -EFAULT;
12852 if (stmpconf.flags)
12853 return -EINVAL;
12855 switch (stmpconf.tx_type) {
12856 case HWTSTAMP_TX_ON:
12857 tg3_flag_set(tp, TX_TSTAMP_EN);
12858 break;
12859 case HWTSTAMP_TX_OFF:
12860 tg3_flag_clear(tp, TX_TSTAMP_EN);
12861 break;
12862 default:
12863 return -ERANGE;
12866 switch (stmpconf.rx_filter) {
12867 case HWTSTAMP_FILTER_NONE:
12868 tp->rxptpctl = 0;
12869 break;
12870 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12871 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12872 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12873 break;
12874 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12875 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12876 TG3_RX_PTP_CTL_SYNC_EVNT;
12877 break;
12878 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12879 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12880 TG3_RX_PTP_CTL_DELAY_REQ;
12881 break;
12882 case HWTSTAMP_FILTER_PTP_V2_EVENT:
12883 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12884 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12885 break;
12886 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12887 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12888 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12889 break;
12890 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12891 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12892 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12893 break;
12894 case HWTSTAMP_FILTER_PTP_V2_SYNC:
12895 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12896 TG3_RX_PTP_CTL_SYNC_EVNT;
12897 break;
12898 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12899 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12900 TG3_RX_PTP_CTL_SYNC_EVNT;
12901 break;
12902 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12903 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12904 TG3_RX_PTP_CTL_SYNC_EVNT;
12905 break;
12906 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12907 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12908 TG3_RX_PTP_CTL_DELAY_REQ;
12909 break;
12910 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12911 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12912 TG3_RX_PTP_CTL_DELAY_REQ;
12913 break;
12914 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
12915 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12916 TG3_RX_PTP_CTL_DELAY_REQ;
12917 break;
12918 default:
12919 return -ERANGE;
12922 if (netif_running(dev) && tp->rxptpctl)
12923 tw32(TG3_RX_PTP_CTL,
12924 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
12926 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
12927 -EFAULT : 0;
12930 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12932 struct mii_ioctl_data *data = if_mii(ifr);
12933 struct tg3 *tp = netdev_priv(dev);
12934 int err;
12936 if (tg3_flag(tp, USE_PHYLIB)) {
12937 struct phy_device *phydev;
12938 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12939 return -EAGAIN;
12940 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12941 return phy_mii_ioctl(phydev, ifr, cmd);
12944 switch (cmd) {
12945 case SIOCGMIIPHY:
12946 data->phy_id = tp->phy_addr;
12948 /* fallthru */
12949 case SIOCGMIIREG: {
12950 u32 mii_regval;
12952 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12953 break; /* We have no PHY */
12955 if (!netif_running(dev))
12956 return -EAGAIN;
12958 spin_lock_bh(&tp->lock);
12959 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12960 spin_unlock_bh(&tp->lock);
12962 data->val_out = mii_regval;
12964 return err;
12967 case SIOCSMIIREG:
12968 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12969 break; /* We have no PHY */
12971 if (!netif_running(dev))
12972 return -EAGAIN;
12974 spin_lock_bh(&tp->lock);
12975 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12976 spin_unlock_bh(&tp->lock);
12978 return err;
12980 case SIOCSHWTSTAMP:
12981 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
12983 default:
12984 /* do nothing */
12985 break;
12987 return -EOPNOTSUPP;
12990 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12992 struct tg3 *tp = netdev_priv(dev);
12994 memcpy(ec, &tp->coal, sizeof(*ec));
12995 return 0;
12998 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13000 struct tg3 *tp = netdev_priv(dev);
13001 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13002 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13004 if (!tg3_flag(tp, 5705_PLUS)) {
13005 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13006 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13007 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13008 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13011 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13012 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13013 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13014 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13015 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13016 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13017 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13018 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13019 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13020 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13021 return -EINVAL;
13023 /* No rx interrupts will be generated if both are zero */
13024 if ((ec->rx_coalesce_usecs == 0) &&
13025 (ec->rx_max_coalesced_frames == 0))
13026 return -EINVAL;
13028 /* No tx interrupts will be generated if both are zero */
13029 if ((ec->tx_coalesce_usecs == 0) &&
13030 (ec->tx_max_coalesced_frames == 0))
13031 return -EINVAL;
13033 /* Only copy relevant parameters, ignore all others. */
13034 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13035 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13036 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13037 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13038 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13039 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13040 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13041 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13042 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13044 if (netif_running(dev)) {
13045 tg3_full_lock(tp, 0);
13046 __tg3_set_coalesce(tp, &tp->coal);
13047 tg3_full_unlock(tp);
13049 return 0;
13052 static const struct ethtool_ops tg3_ethtool_ops = {
13053 .get_settings = tg3_get_settings,
13054 .set_settings = tg3_set_settings,
13055 .get_drvinfo = tg3_get_drvinfo,
13056 .get_regs_len = tg3_get_regs_len,
13057 .get_regs = tg3_get_regs,
13058 .get_wol = tg3_get_wol,
13059 .set_wol = tg3_set_wol,
13060 .get_msglevel = tg3_get_msglevel,
13061 .set_msglevel = tg3_set_msglevel,
13062 .nway_reset = tg3_nway_reset,
13063 .get_link = ethtool_op_get_link,
13064 .get_eeprom_len = tg3_get_eeprom_len,
13065 .get_eeprom = tg3_get_eeprom,
13066 .set_eeprom = tg3_set_eeprom,
13067 .get_ringparam = tg3_get_ringparam,
13068 .set_ringparam = tg3_set_ringparam,
13069 .get_pauseparam = tg3_get_pauseparam,
13070 .set_pauseparam = tg3_set_pauseparam,
13071 .self_test = tg3_self_test,
13072 .get_strings = tg3_get_strings,
13073 .set_phys_id = tg3_set_phys_id,
13074 .get_ethtool_stats = tg3_get_ethtool_stats,
13075 .get_coalesce = tg3_get_coalesce,
13076 .set_coalesce = tg3_set_coalesce,
13077 .get_sset_count = tg3_get_sset_count,
13078 .get_rxnfc = tg3_get_rxnfc,
13079 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13080 .get_rxfh_indir = tg3_get_rxfh_indir,
13081 .set_rxfh_indir = tg3_set_rxfh_indir,
13082 .get_channels = tg3_get_channels,
13083 .set_channels = tg3_set_channels,
13084 .get_ts_info = tg3_get_ts_info,
13087 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13088 struct rtnl_link_stats64 *stats)
13090 struct tg3 *tp = netdev_priv(dev);
13092 spin_lock_bh(&tp->lock);
13093 if (!tp->hw_stats) {
13094 spin_unlock_bh(&tp->lock);
13095 return &tp->net_stats_prev;
13098 tg3_get_nstats(tp, stats);
13099 spin_unlock_bh(&tp->lock);
13101 return stats;
13104 static void tg3_set_rx_mode(struct net_device *dev)
13106 struct tg3 *tp = netdev_priv(dev);
13108 if (!netif_running(dev))
13109 return;
13111 tg3_full_lock(tp, 0);
13112 __tg3_set_rx_mode(dev);
13113 tg3_full_unlock(tp);
13116 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13117 int new_mtu)
13119 dev->mtu = new_mtu;
13121 if (new_mtu > ETH_DATA_LEN) {
13122 if (tg3_flag(tp, 5780_CLASS)) {
13123 netdev_update_features(dev);
13124 tg3_flag_clear(tp, TSO_CAPABLE);
13125 } else {
13126 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13128 } else {
13129 if (tg3_flag(tp, 5780_CLASS)) {
13130 tg3_flag_set(tp, TSO_CAPABLE);
13131 netdev_update_features(dev);
13133 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13137 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13139 struct tg3 *tp = netdev_priv(dev);
13140 int err, reset_phy = 0;
13142 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13143 return -EINVAL;
13145 if (!netif_running(dev)) {
13146 /* We'll just catch it later when the
13147 * device is up'd.
13149 tg3_set_mtu(dev, tp, new_mtu);
13150 return 0;
13153 tg3_phy_stop(tp);
13155 tg3_netif_stop(tp);
13157 tg3_full_lock(tp, 1);
13159 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13161 tg3_set_mtu(dev, tp, new_mtu);
13163 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13164 * breaks all requests to 256 bytes.
13166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13167 reset_phy = 1;
13169 err = tg3_restart_hw(tp, reset_phy);
13171 if (!err)
13172 tg3_netif_start(tp);
13174 tg3_full_unlock(tp);
13176 if (!err)
13177 tg3_phy_start(tp);
13179 return err;
13182 static const struct net_device_ops tg3_netdev_ops = {
13183 .ndo_open = tg3_open,
13184 .ndo_stop = tg3_close,
13185 .ndo_start_xmit = tg3_start_xmit,
13186 .ndo_get_stats64 = tg3_get_stats64,
13187 .ndo_validate_addr = eth_validate_addr,
13188 .ndo_set_rx_mode = tg3_set_rx_mode,
13189 .ndo_set_mac_address = tg3_set_mac_addr,
13190 .ndo_do_ioctl = tg3_ioctl,
13191 .ndo_tx_timeout = tg3_tx_timeout,
13192 .ndo_change_mtu = tg3_change_mtu,
13193 .ndo_fix_features = tg3_fix_features,
13194 .ndo_set_features = tg3_set_features,
13195 #ifdef CONFIG_NET_POLL_CONTROLLER
13196 .ndo_poll_controller = tg3_poll_controller,
13197 #endif
13200 static void tg3_get_eeprom_size(struct tg3 *tp)
13202 u32 cursize, val, magic;
13204 tp->nvram_size = EEPROM_CHIP_SIZE;
13206 if (tg3_nvram_read(tp, 0, &magic) != 0)
13207 return;
13209 if ((magic != TG3_EEPROM_MAGIC) &&
13210 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13211 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13212 return;
13215 * Size the chip by reading offsets at increasing powers of two.
13216 * When we encounter our validation signature, we know the addressing
13217 * has wrapped around, and thus have our chip size.
13219 cursize = 0x10;
13221 while (cursize < tp->nvram_size) {
13222 if (tg3_nvram_read(tp, cursize, &val) != 0)
13223 return;
13225 if (val == magic)
13226 break;
13228 cursize <<= 1;
13231 tp->nvram_size = cursize;
13234 static void tg3_get_nvram_size(struct tg3 *tp)
13236 u32 val;
13238 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13239 return;
13241 /* Selfboot format */
13242 if (val != TG3_EEPROM_MAGIC) {
13243 tg3_get_eeprom_size(tp);
13244 return;
13247 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13248 if (val != 0) {
13249 /* This is confusing. We want to operate on the
13250 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13251 * call will read from NVRAM and byteswap the data
13252 * according to the byteswapping settings for all
13253 * other register accesses. This ensures the data we
13254 * want will always reside in the lower 16-bits.
13255 * However, the data in NVRAM is in LE format, which
13256 * means the data from the NVRAM read will always be
13257 * opposite the endianness of the CPU. The 16-bit
13258 * byteswap then brings the data to CPU endianness.
13260 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13261 return;
13264 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13267 static void tg3_get_nvram_info(struct tg3 *tp)
13269 u32 nvcfg1;
13271 nvcfg1 = tr32(NVRAM_CFG1);
13272 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13273 tg3_flag_set(tp, FLASH);
13274 } else {
13275 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13276 tw32(NVRAM_CFG1, nvcfg1);
13279 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13280 tg3_flag(tp, 5780_CLASS)) {
13281 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13282 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13283 tp->nvram_jedecnum = JEDEC_ATMEL;
13284 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13285 tg3_flag_set(tp, NVRAM_BUFFERED);
13286 break;
13287 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13288 tp->nvram_jedecnum = JEDEC_ATMEL;
13289 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13290 break;
13291 case FLASH_VENDOR_ATMEL_EEPROM:
13292 tp->nvram_jedecnum = JEDEC_ATMEL;
13293 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13294 tg3_flag_set(tp, NVRAM_BUFFERED);
13295 break;
13296 case FLASH_VENDOR_ST:
13297 tp->nvram_jedecnum = JEDEC_ST;
13298 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13299 tg3_flag_set(tp, NVRAM_BUFFERED);
13300 break;
13301 case FLASH_VENDOR_SAIFUN:
13302 tp->nvram_jedecnum = JEDEC_SAIFUN;
13303 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13304 break;
13305 case FLASH_VENDOR_SST_SMALL:
13306 case FLASH_VENDOR_SST_LARGE:
13307 tp->nvram_jedecnum = JEDEC_SST;
13308 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13309 break;
13311 } else {
13312 tp->nvram_jedecnum = JEDEC_ATMEL;
13313 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13314 tg3_flag_set(tp, NVRAM_BUFFERED);
13318 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13320 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13321 case FLASH_5752PAGE_SIZE_256:
13322 tp->nvram_pagesize = 256;
13323 break;
13324 case FLASH_5752PAGE_SIZE_512:
13325 tp->nvram_pagesize = 512;
13326 break;
13327 case FLASH_5752PAGE_SIZE_1K:
13328 tp->nvram_pagesize = 1024;
13329 break;
13330 case FLASH_5752PAGE_SIZE_2K:
13331 tp->nvram_pagesize = 2048;
13332 break;
13333 case FLASH_5752PAGE_SIZE_4K:
13334 tp->nvram_pagesize = 4096;
13335 break;
13336 case FLASH_5752PAGE_SIZE_264:
13337 tp->nvram_pagesize = 264;
13338 break;
13339 case FLASH_5752PAGE_SIZE_528:
13340 tp->nvram_pagesize = 528;
13341 break;
13345 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13347 u32 nvcfg1;
13349 nvcfg1 = tr32(NVRAM_CFG1);
13351 /* NVRAM protection for TPM */
13352 if (nvcfg1 & (1 << 27))
13353 tg3_flag_set(tp, PROTECTED_NVRAM);
13355 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13356 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13357 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13358 tp->nvram_jedecnum = JEDEC_ATMEL;
13359 tg3_flag_set(tp, NVRAM_BUFFERED);
13360 break;
13361 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13362 tp->nvram_jedecnum = JEDEC_ATMEL;
13363 tg3_flag_set(tp, NVRAM_BUFFERED);
13364 tg3_flag_set(tp, FLASH);
13365 break;
13366 case FLASH_5752VENDOR_ST_M45PE10:
13367 case FLASH_5752VENDOR_ST_M45PE20:
13368 case FLASH_5752VENDOR_ST_M45PE40:
13369 tp->nvram_jedecnum = JEDEC_ST;
13370 tg3_flag_set(tp, NVRAM_BUFFERED);
13371 tg3_flag_set(tp, FLASH);
13372 break;
13375 if (tg3_flag(tp, FLASH)) {
13376 tg3_nvram_get_pagesize(tp, nvcfg1);
13377 } else {
13378 /* For eeprom, set pagesize to maximum eeprom size */
13379 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13381 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13382 tw32(NVRAM_CFG1, nvcfg1);
13386 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13388 u32 nvcfg1, protect = 0;
13390 nvcfg1 = tr32(NVRAM_CFG1);
13392 /* NVRAM protection for TPM */
13393 if (nvcfg1 & (1 << 27)) {
13394 tg3_flag_set(tp, PROTECTED_NVRAM);
13395 protect = 1;
13398 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13399 switch (nvcfg1) {
13400 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13401 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13402 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13403 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13404 tp->nvram_jedecnum = JEDEC_ATMEL;
13405 tg3_flag_set(tp, NVRAM_BUFFERED);
13406 tg3_flag_set(tp, FLASH);
13407 tp->nvram_pagesize = 264;
13408 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13409 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13410 tp->nvram_size = (protect ? 0x3e200 :
13411 TG3_NVRAM_SIZE_512KB);
13412 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13413 tp->nvram_size = (protect ? 0x1f200 :
13414 TG3_NVRAM_SIZE_256KB);
13415 else
13416 tp->nvram_size = (protect ? 0x1f200 :
13417 TG3_NVRAM_SIZE_128KB);
13418 break;
13419 case FLASH_5752VENDOR_ST_M45PE10:
13420 case FLASH_5752VENDOR_ST_M45PE20:
13421 case FLASH_5752VENDOR_ST_M45PE40:
13422 tp->nvram_jedecnum = JEDEC_ST;
13423 tg3_flag_set(tp, NVRAM_BUFFERED);
13424 tg3_flag_set(tp, FLASH);
13425 tp->nvram_pagesize = 256;
13426 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13427 tp->nvram_size = (protect ?
13428 TG3_NVRAM_SIZE_64KB :
13429 TG3_NVRAM_SIZE_128KB);
13430 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13431 tp->nvram_size = (protect ?
13432 TG3_NVRAM_SIZE_64KB :
13433 TG3_NVRAM_SIZE_256KB);
13434 else
13435 tp->nvram_size = (protect ?
13436 TG3_NVRAM_SIZE_128KB :
13437 TG3_NVRAM_SIZE_512KB);
13438 break;
13442 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13444 u32 nvcfg1;
13446 nvcfg1 = tr32(NVRAM_CFG1);
13448 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13449 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13450 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13451 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13452 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13453 tp->nvram_jedecnum = JEDEC_ATMEL;
13454 tg3_flag_set(tp, NVRAM_BUFFERED);
13455 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13457 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13458 tw32(NVRAM_CFG1, nvcfg1);
13459 break;
13460 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13461 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13462 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13463 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13464 tp->nvram_jedecnum = JEDEC_ATMEL;
13465 tg3_flag_set(tp, NVRAM_BUFFERED);
13466 tg3_flag_set(tp, FLASH);
13467 tp->nvram_pagesize = 264;
13468 break;
13469 case FLASH_5752VENDOR_ST_M45PE10:
13470 case FLASH_5752VENDOR_ST_M45PE20:
13471 case FLASH_5752VENDOR_ST_M45PE40:
13472 tp->nvram_jedecnum = JEDEC_ST;
13473 tg3_flag_set(tp, NVRAM_BUFFERED);
13474 tg3_flag_set(tp, FLASH);
13475 tp->nvram_pagesize = 256;
13476 break;
13480 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13482 u32 nvcfg1, protect = 0;
13484 nvcfg1 = tr32(NVRAM_CFG1);
13486 /* NVRAM protection for TPM */
13487 if (nvcfg1 & (1 << 27)) {
13488 tg3_flag_set(tp, PROTECTED_NVRAM);
13489 protect = 1;
13492 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13493 switch (nvcfg1) {
13494 case FLASH_5761VENDOR_ATMEL_ADB021D:
13495 case FLASH_5761VENDOR_ATMEL_ADB041D:
13496 case FLASH_5761VENDOR_ATMEL_ADB081D:
13497 case FLASH_5761VENDOR_ATMEL_ADB161D:
13498 case FLASH_5761VENDOR_ATMEL_MDB021D:
13499 case FLASH_5761VENDOR_ATMEL_MDB041D:
13500 case FLASH_5761VENDOR_ATMEL_MDB081D:
13501 case FLASH_5761VENDOR_ATMEL_MDB161D:
13502 tp->nvram_jedecnum = JEDEC_ATMEL;
13503 tg3_flag_set(tp, NVRAM_BUFFERED);
13504 tg3_flag_set(tp, FLASH);
13505 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13506 tp->nvram_pagesize = 256;
13507 break;
13508 case FLASH_5761VENDOR_ST_A_M45PE20:
13509 case FLASH_5761VENDOR_ST_A_M45PE40:
13510 case FLASH_5761VENDOR_ST_A_M45PE80:
13511 case FLASH_5761VENDOR_ST_A_M45PE16:
13512 case FLASH_5761VENDOR_ST_M_M45PE20:
13513 case FLASH_5761VENDOR_ST_M_M45PE40:
13514 case FLASH_5761VENDOR_ST_M_M45PE80:
13515 case FLASH_5761VENDOR_ST_M_M45PE16:
13516 tp->nvram_jedecnum = JEDEC_ST;
13517 tg3_flag_set(tp, NVRAM_BUFFERED);
13518 tg3_flag_set(tp, FLASH);
13519 tp->nvram_pagesize = 256;
13520 break;
13523 if (protect) {
13524 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13525 } else {
13526 switch (nvcfg1) {
13527 case FLASH_5761VENDOR_ATMEL_ADB161D:
13528 case FLASH_5761VENDOR_ATMEL_MDB161D:
13529 case FLASH_5761VENDOR_ST_A_M45PE16:
13530 case FLASH_5761VENDOR_ST_M_M45PE16:
13531 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13532 break;
13533 case FLASH_5761VENDOR_ATMEL_ADB081D:
13534 case FLASH_5761VENDOR_ATMEL_MDB081D:
13535 case FLASH_5761VENDOR_ST_A_M45PE80:
13536 case FLASH_5761VENDOR_ST_M_M45PE80:
13537 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13538 break;
13539 case FLASH_5761VENDOR_ATMEL_ADB041D:
13540 case FLASH_5761VENDOR_ATMEL_MDB041D:
13541 case FLASH_5761VENDOR_ST_A_M45PE40:
13542 case FLASH_5761VENDOR_ST_M_M45PE40:
13543 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13544 break;
13545 case FLASH_5761VENDOR_ATMEL_ADB021D:
13546 case FLASH_5761VENDOR_ATMEL_MDB021D:
13547 case FLASH_5761VENDOR_ST_A_M45PE20:
13548 case FLASH_5761VENDOR_ST_M_M45PE20:
13549 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13550 break;
13555 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13557 tp->nvram_jedecnum = JEDEC_ATMEL;
13558 tg3_flag_set(tp, NVRAM_BUFFERED);
13559 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13562 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13564 u32 nvcfg1;
13566 nvcfg1 = tr32(NVRAM_CFG1);
13568 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13569 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13570 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13571 tp->nvram_jedecnum = JEDEC_ATMEL;
13572 tg3_flag_set(tp, NVRAM_BUFFERED);
13573 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13575 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13576 tw32(NVRAM_CFG1, nvcfg1);
13577 return;
13578 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13579 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13580 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13581 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13582 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13583 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13584 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13585 tp->nvram_jedecnum = JEDEC_ATMEL;
13586 tg3_flag_set(tp, NVRAM_BUFFERED);
13587 tg3_flag_set(tp, FLASH);
13589 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13590 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13591 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13592 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13593 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13594 break;
13595 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13596 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13597 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13598 break;
13599 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13600 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13601 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13602 break;
13604 break;
13605 case FLASH_5752VENDOR_ST_M45PE10:
13606 case FLASH_5752VENDOR_ST_M45PE20:
13607 case FLASH_5752VENDOR_ST_M45PE40:
13608 tp->nvram_jedecnum = JEDEC_ST;
13609 tg3_flag_set(tp, NVRAM_BUFFERED);
13610 tg3_flag_set(tp, FLASH);
13612 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13613 case FLASH_5752VENDOR_ST_M45PE10:
13614 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13615 break;
13616 case FLASH_5752VENDOR_ST_M45PE20:
13617 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13618 break;
13619 case FLASH_5752VENDOR_ST_M45PE40:
13620 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13621 break;
13623 break;
13624 default:
13625 tg3_flag_set(tp, NO_NVRAM);
13626 return;
13629 tg3_nvram_get_pagesize(tp, nvcfg1);
13630 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13631 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13635 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13637 u32 nvcfg1;
13639 nvcfg1 = tr32(NVRAM_CFG1);
13641 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13642 case FLASH_5717VENDOR_ATMEL_EEPROM:
13643 case FLASH_5717VENDOR_MICRO_EEPROM:
13644 tp->nvram_jedecnum = JEDEC_ATMEL;
13645 tg3_flag_set(tp, NVRAM_BUFFERED);
13646 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13648 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13649 tw32(NVRAM_CFG1, nvcfg1);
13650 return;
13651 case FLASH_5717VENDOR_ATMEL_MDB011D:
13652 case FLASH_5717VENDOR_ATMEL_ADB011B:
13653 case FLASH_5717VENDOR_ATMEL_ADB011D:
13654 case FLASH_5717VENDOR_ATMEL_MDB021D:
13655 case FLASH_5717VENDOR_ATMEL_ADB021B:
13656 case FLASH_5717VENDOR_ATMEL_ADB021D:
13657 case FLASH_5717VENDOR_ATMEL_45USPT:
13658 tp->nvram_jedecnum = JEDEC_ATMEL;
13659 tg3_flag_set(tp, NVRAM_BUFFERED);
13660 tg3_flag_set(tp, FLASH);
13662 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13663 case FLASH_5717VENDOR_ATMEL_MDB021D:
13664 /* Detect size with tg3_nvram_get_size() */
13665 break;
13666 case FLASH_5717VENDOR_ATMEL_ADB021B:
13667 case FLASH_5717VENDOR_ATMEL_ADB021D:
13668 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13669 break;
13670 default:
13671 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13672 break;
13674 break;
13675 case FLASH_5717VENDOR_ST_M_M25PE10:
13676 case FLASH_5717VENDOR_ST_A_M25PE10:
13677 case FLASH_5717VENDOR_ST_M_M45PE10:
13678 case FLASH_5717VENDOR_ST_A_M45PE10:
13679 case FLASH_5717VENDOR_ST_M_M25PE20:
13680 case FLASH_5717VENDOR_ST_A_M25PE20:
13681 case FLASH_5717VENDOR_ST_M_M45PE20:
13682 case FLASH_5717VENDOR_ST_A_M45PE20:
13683 case FLASH_5717VENDOR_ST_25USPT:
13684 case FLASH_5717VENDOR_ST_45USPT:
13685 tp->nvram_jedecnum = JEDEC_ST;
13686 tg3_flag_set(tp, NVRAM_BUFFERED);
13687 tg3_flag_set(tp, FLASH);
13689 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13690 case FLASH_5717VENDOR_ST_M_M25PE20:
13691 case FLASH_5717VENDOR_ST_M_M45PE20:
13692 /* Detect size with tg3_nvram_get_size() */
13693 break;
13694 case FLASH_5717VENDOR_ST_A_M25PE20:
13695 case FLASH_5717VENDOR_ST_A_M45PE20:
13696 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13697 break;
13698 default:
13699 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13700 break;
13702 break;
13703 default:
13704 tg3_flag_set(tp, NO_NVRAM);
13705 return;
13708 tg3_nvram_get_pagesize(tp, nvcfg1);
13709 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13710 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13713 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13715 u32 nvcfg1, nvmpinstrp;
13717 nvcfg1 = tr32(NVRAM_CFG1);
13718 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13721 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13722 tg3_flag_set(tp, NO_NVRAM);
13723 return;
13726 switch (nvmpinstrp) {
13727 case FLASH_5762_EEPROM_HD:
13728 nvmpinstrp = FLASH_5720_EEPROM_HD;
13729 case FLASH_5762_EEPROM_LD:
13730 nvmpinstrp = FLASH_5720_EEPROM_LD;
13734 switch (nvmpinstrp) {
13735 case FLASH_5720_EEPROM_HD:
13736 case FLASH_5720_EEPROM_LD:
13737 tp->nvram_jedecnum = JEDEC_ATMEL;
13738 tg3_flag_set(tp, NVRAM_BUFFERED);
13740 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13741 tw32(NVRAM_CFG1, nvcfg1);
13742 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13743 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13744 else
13745 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13746 return;
13747 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13748 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13749 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13750 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13751 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13752 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13753 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13754 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13755 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13756 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13757 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13758 case FLASH_5720VENDOR_ATMEL_45USPT:
13759 tp->nvram_jedecnum = JEDEC_ATMEL;
13760 tg3_flag_set(tp, NVRAM_BUFFERED);
13761 tg3_flag_set(tp, FLASH);
13763 switch (nvmpinstrp) {
13764 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13765 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13766 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13767 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13768 break;
13769 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13770 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13771 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13772 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13773 break;
13774 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13775 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13776 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13777 break;
13778 default:
13779 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13780 break;
13782 break;
13783 case FLASH_5720VENDOR_M_ST_M25PE10:
13784 case FLASH_5720VENDOR_M_ST_M45PE10:
13785 case FLASH_5720VENDOR_A_ST_M25PE10:
13786 case FLASH_5720VENDOR_A_ST_M45PE10:
13787 case FLASH_5720VENDOR_M_ST_M25PE20:
13788 case FLASH_5720VENDOR_M_ST_M45PE20:
13789 case FLASH_5720VENDOR_A_ST_M25PE20:
13790 case FLASH_5720VENDOR_A_ST_M45PE20:
13791 case FLASH_5720VENDOR_M_ST_M25PE40:
13792 case FLASH_5720VENDOR_M_ST_M45PE40:
13793 case FLASH_5720VENDOR_A_ST_M25PE40:
13794 case FLASH_5720VENDOR_A_ST_M45PE40:
13795 case FLASH_5720VENDOR_M_ST_M25PE80:
13796 case FLASH_5720VENDOR_M_ST_M45PE80:
13797 case FLASH_5720VENDOR_A_ST_M25PE80:
13798 case FLASH_5720VENDOR_A_ST_M45PE80:
13799 case FLASH_5720VENDOR_ST_25USPT:
13800 case FLASH_5720VENDOR_ST_45USPT:
13801 tp->nvram_jedecnum = JEDEC_ST;
13802 tg3_flag_set(tp, NVRAM_BUFFERED);
13803 tg3_flag_set(tp, FLASH);
13805 switch (nvmpinstrp) {
13806 case FLASH_5720VENDOR_M_ST_M25PE20:
13807 case FLASH_5720VENDOR_M_ST_M45PE20:
13808 case FLASH_5720VENDOR_A_ST_M25PE20:
13809 case FLASH_5720VENDOR_A_ST_M45PE20:
13810 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13811 break;
13812 case FLASH_5720VENDOR_M_ST_M25PE40:
13813 case FLASH_5720VENDOR_M_ST_M45PE40:
13814 case FLASH_5720VENDOR_A_ST_M25PE40:
13815 case FLASH_5720VENDOR_A_ST_M45PE40:
13816 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13817 break;
13818 case FLASH_5720VENDOR_M_ST_M25PE80:
13819 case FLASH_5720VENDOR_M_ST_M45PE80:
13820 case FLASH_5720VENDOR_A_ST_M25PE80:
13821 case FLASH_5720VENDOR_A_ST_M45PE80:
13822 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13823 break;
13824 default:
13825 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13826 break;
13828 break;
13829 default:
13830 tg3_flag_set(tp, NO_NVRAM);
13831 return;
13834 tg3_nvram_get_pagesize(tp, nvcfg1);
13835 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13836 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13839 u32 val;
13841 if (tg3_nvram_read(tp, 0, &val))
13842 return;
13844 if (val != TG3_EEPROM_MAGIC &&
13845 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13846 tg3_flag_set(tp, NO_NVRAM);
13850 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13851 static void tg3_nvram_init(struct tg3 *tp)
13853 tw32_f(GRC_EEPROM_ADDR,
13854 (EEPROM_ADDR_FSM_RESET |
13855 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13856 EEPROM_ADDR_CLKPERD_SHIFT)));
13858 msleep(1);
13860 /* Enable seeprom accesses. */
13861 tw32_f(GRC_LOCAL_CTRL,
13862 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13863 udelay(100);
13865 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13866 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13867 tg3_flag_set(tp, NVRAM);
13869 if (tg3_nvram_lock(tp)) {
13870 netdev_warn(tp->dev,
13871 "Cannot get nvram lock, %s failed\n",
13872 __func__);
13873 return;
13875 tg3_enable_nvram_access(tp);
13877 tp->nvram_size = 0;
13879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13880 tg3_get_5752_nvram_info(tp);
13881 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13882 tg3_get_5755_nvram_info(tp);
13883 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13885 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13886 tg3_get_5787_nvram_info(tp);
13887 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13888 tg3_get_5761_nvram_info(tp);
13889 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13890 tg3_get_5906_nvram_info(tp);
13891 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13892 tg3_flag(tp, 57765_CLASS))
13893 tg3_get_57780_nvram_info(tp);
13894 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13895 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13896 tg3_get_5717_nvram_info(tp);
13897 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13898 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
13899 tg3_get_5720_nvram_info(tp);
13900 else
13901 tg3_get_nvram_info(tp);
13903 if (tp->nvram_size == 0)
13904 tg3_get_nvram_size(tp);
13906 tg3_disable_nvram_access(tp);
13907 tg3_nvram_unlock(tp);
13909 } else {
13910 tg3_flag_clear(tp, NVRAM);
13911 tg3_flag_clear(tp, NVRAM_BUFFERED);
13913 tg3_get_eeprom_size(tp);
13917 struct subsys_tbl_ent {
13918 u16 subsys_vendor, subsys_devid;
13919 u32 phy_id;
13922 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
13923 /* Broadcom boards. */
13924 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13925 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13926 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13927 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13928 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13929 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13930 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13931 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13932 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13933 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13934 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13935 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13936 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13937 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13938 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13939 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13940 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13941 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13942 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13943 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13944 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13945 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13947 /* 3com boards. */
13948 { TG3PCI_SUBVENDOR_ID_3COM,
13949 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13950 { TG3PCI_SUBVENDOR_ID_3COM,
13951 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13952 { TG3PCI_SUBVENDOR_ID_3COM,
13953 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13954 { TG3PCI_SUBVENDOR_ID_3COM,
13955 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13956 { TG3PCI_SUBVENDOR_ID_3COM,
13957 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13959 /* DELL boards. */
13960 { TG3PCI_SUBVENDOR_ID_DELL,
13961 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13962 { TG3PCI_SUBVENDOR_ID_DELL,
13963 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13964 { TG3PCI_SUBVENDOR_ID_DELL,
13965 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13966 { TG3PCI_SUBVENDOR_ID_DELL,
13967 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13969 /* Compaq boards. */
13970 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13971 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13972 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13973 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13974 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13975 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13976 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13977 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13978 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13979 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13981 /* IBM boards. */
13982 { TG3PCI_SUBVENDOR_ID_IBM,
13983 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13986 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
13988 int i;
13990 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13991 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13992 tp->pdev->subsystem_vendor) &&
13993 (subsys_id_to_phy_id[i].subsys_devid ==
13994 tp->pdev->subsystem_device))
13995 return &subsys_id_to_phy_id[i];
13997 return NULL;
14000 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14002 u32 val;
14004 tp->phy_id = TG3_PHY_ID_INVALID;
14005 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14007 /* Assume an onboard device and WOL capable by default. */
14008 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14009 tg3_flag_set(tp, WOL_CAP);
14011 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14012 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14013 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14014 tg3_flag_set(tp, IS_NIC);
14016 val = tr32(VCPU_CFGSHDW);
14017 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14018 tg3_flag_set(tp, ASPM_WORKAROUND);
14019 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14020 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14021 tg3_flag_set(tp, WOL_ENABLE);
14022 device_set_wakeup_enable(&tp->pdev->dev, true);
14024 goto done;
14027 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14028 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14029 u32 nic_cfg, led_cfg;
14030 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14031 int eeprom_phy_serdes = 0;
14033 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14034 tp->nic_sram_data_cfg = nic_cfg;
14036 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14037 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14038 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14039 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14040 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
14041 (ver > 0) && (ver < 0x100))
14042 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14045 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14047 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14048 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14049 eeprom_phy_serdes = 1;
14051 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14052 if (nic_phy_id != 0) {
14053 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14054 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14056 eeprom_phy_id = (id1 >> 16) << 10;
14057 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14058 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14059 } else
14060 eeprom_phy_id = 0;
14062 tp->phy_id = eeprom_phy_id;
14063 if (eeprom_phy_serdes) {
14064 if (!tg3_flag(tp, 5705_PLUS))
14065 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14066 else
14067 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14070 if (tg3_flag(tp, 5750_PLUS))
14071 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14072 SHASTA_EXT_LED_MODE_MASK);
14073 else
14074 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14076 switch (led_cfg) {
14077 default:
14078 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14079 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14080 break;
14082 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14083 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14084 break;
14086 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14087 tp->led_ctrl = LED_CTRL_MODE_MAC;
14089 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14090 * read on some older 5700/5701 bootcode.
14092 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14093 ASIC_REV_5700 ||
14094 GET_ASIC_REV(tp->pci_chip_rev_id) ==
14095 ASIC_REV_5701)
14096 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14098 break;
14100 case SHASTA_EXT_LED_SHARED:
14101 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14102 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14103 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14104 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14105 LED_CTRL_MODE_PHY_2);
14106 break;
14108 case SHASTA_EXT_LED_MAC:
14109 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14110 break;
14112 case SHASTA_EXT_LED_COMBO:
14113 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14114 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14115 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14116 LED_CTRL_MODE_PHY_2);
14117 break;
14121 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14123 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14124 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14126 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14127 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14129 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14130 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14131 if ((tp->pdev->subsystem_vendor ==
14132 PCI_VENDOR_ID_ARIMA) &&
14133 (tp->pdev->subsystem_device == 0x205a ||
14134 tp->pdev->subsystem_device == 0x2063))
14135 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14136 } else {
14137 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14138 tg3_flag_set(tp, IS_NIC);
14141 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14142 tg3_flag_set(tp, ENABLE_ASF);
14143 if (tg3_flag(tp, 5750_PLUS))
14144 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14147 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14148 tg3_flag(tp, 5750_PLUS))
14149 tg3_flag_set(tp, ENABLE_APE);
14151 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14152 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14153 tg3_flag_clear(tp, WOL_CAP);
14155 if (tg3_flag(tp, WOL_CAP) &&
14156 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14157 tg3_flag_set(tp, WOL_ENABLE);
14158 device_set_wakeup_enable(&tp->pdev->dev, true);
14161 if (cfg2 & (1 << 17))
14162 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14164 /* serdes signal pre-emphasis in register 0x590 set by */
14165 /* bootcode if bit 18 is set */
14166 if (cfg2 & (1 << 18))
14167 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14169 if ((tg3_flag(tp, 57765_PLUS) ||
14170 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14171 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14172 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14173 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14175 if (tg3_flag(tp, PCI_EXPRESS) &&
14176 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14177 !tg3_flag(tp, 57765_PLUS)) {
14178 u32 cfg3;
14180 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14181 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14182 tg3_flag_set(tp, ASPM_WORKAROUND);
14185 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14186 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14187 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14188 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14189 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14190 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14192 done:
14193 if (tg3_flag(tp, WOL_CAP))
14194 device_set_wakeup_enable(&tp->pdev->dev,
14195 tg3_flag(tp, WOL_ENABLE));
14196 else
14197 device_set_wakeup_capable(&tp->pdev->dev, false);
14200 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14202 int i, err;
14203 u32 val2, off = offset * 8;
14205 err = tg3_nvram_lock(tp);
14206 if (err)
14207 return err;
14209 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14210 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14211 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14212 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14213 udelay(10);
14215 for (i = 0; i < 100; i++) {
14216 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14217 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14218 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14219 break;
14221 udelay(10);
14224 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14226 tg3_nvram_unlock(tp);
14227 if (val2 & APE_OTP_STATUS_CMD_DONE)
14228 return 0;
14230 return -EBUSY;
14233 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14235 int i;
14236 u32 val;
14238 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14239 tw32(OTP_CTRL, cmd);
14241 /* Wait for up to 1 ms for command to execute. */
14242 for (i = 0; i < 100; i++) {
14243 val = tr32(OTP_STATUS);
14244 if (val & OTP_STATUS_CMD_DONE)
14245 break;
14246 udelay(10);
14249 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14252 /* Read the gphy configuration from the OTP region of the chip. The gphy
14253 * configuration is a 32-bit value that straddles the alignment boundary.
14254 * We do two 32-bit reads and then shift and merge the results.
14256 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14258 u32 bhalf_otp, thalf_otp;
14260 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14262 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14263 return 0;
14265 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14267 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14268 return 0;
14270 thalf_otp = tr32(OTP_READ_DATA);
14272 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14274 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14275 return 0;
14277 bhalf_otp = tr32(OTP_READ_DATA);
14279 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14282 static void tg3_phy_init_link_config(struct tg3 *tp)
14284 u32 adv = ADVERTISED_Autoneg;
14286 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14287 adv |= ADVERTISED_1000baseT_Half |
14288 ADVERTISED_1000baseT_Full;
14290 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14291 adv |= ADVERTISED_100baseT_Half |
14292 ADVERTISED_100baseT_Full |
14293 ADVERTISED_10baseT_Half |
14294 ADVERTISED_10baseT_Full |
14295 ADVERTISED_TP;
14296 else
14297 adv |= ADVERTISED_FIBRE;
14299 tp->link_config.advertising = adv;
14300 tp->link_config.speed = SPEED_UNKNOWN;
14301 tp->link_config.duplex = DUPLEX_UNKNOWN;
14302 tp->link_config.autoneg = AUTONEG_ENABLE;
14303 tp->link_config.active_speed = SPEED_UNKNOWN;
14304 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14306 tp->old_link = -1;
14309 static int tg3_phy_probe(struct tg3 *tp)
14311 u32 hw_phy_id_1, hw_phy_id_2;
14312 u32 hw_phy_id, hw_phy_id_masked;
14313 int err;
14315 /* flow control autonegotiation is default behavior */
14316 tg3_flag_set(tp, PAUSE_AUTONEG);
14317 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14319 if (tg3_flag(tp, ENABLE_APE)) {
14320 switch (tp->pci_fn) {
14321 case 0:
14322 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14323 break;
14324 case 1:
14325 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14326 break;
14327 case 2:
14328 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14329 break;
14330 case 3:
14331 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14332 break;
14336 if (tg3_flag(tp, USE_PHYLIB))
14337 return tg3_phy_init(tp);
14339 /* Reading the PHY ID register can conflict with ASF
14340 * firmware access to the PHY hardware.
14342 err = 0;
14343 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14344 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14345 } else {
14346 /* Now read the physical PHY_ID from the chip and verify
14347 * that it is sane. If it doesn't look good, we fall back
14348 * to either the hard-coded table based PHY_ID and failing
14349 * that the value found in the eeprom area.
14351 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14352 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14354 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14355 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14356 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14358 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14361 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14362 tp->phy_id = hw_phy_id;
14363 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14364 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14365 else
14366 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14367 } else {
14368 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14369 /* Do nothing, phy ID already set up in
14370 * tg3_get_eeprom_hw_cfg().
14372 } else {
14373 struct subsys_tbl_ent *p;
14375 /* No eeprom signature? Try the hardcoded
14376 * subsys device table.
14378 p = tg3_lookup_by_subsys(tp);
14379 if (!p)
14380 return -ENODEV;
14382 tp->phy_id = p->phy_id;
14383 if (!tp->phy_id ||
14384 tp->phy_id == TG3_PHY_ID_BCM8002)
14385 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14389 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14390 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14392 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
14393 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14394 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14395 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14396 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14397 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14399 tg3_phy_init_link_config(tp);
14401 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14402 !tg3_flag(tp, ENABLE_APE) &&
14403 !tg3_flag(tp, ENABLE_ASF)) {
14404 u32 bmsr, dummy;
14406 tg3_readphy(tp, MII_BMSR, &bmsr);
14407 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14408 (bmsr & BMSR_LSTATUS))
14409 goto skip_phy_reset;
14411 err = tg3_phy_reset(tp);
14412 if (err)
14413 return err;
14415 tg3_phy_set_wirespeed(tp);
14417 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14418 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14419 tp->link_config.flowctrl);
14421 tg3_writephy(tp, MII_BMCR,
14422 BMCR_ANENABLE | BMCR_ANRESTART);
14426 skip_phy_reset:
14427 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14428 err = tg3_init_5401phy_dsp(tp);
14429 if (err)
14430 return err;
14432 err = tg3_init_5401phy_dsp(tp);
14435 return err;
14438 static void tg3_read_vpd(struct tg3 *tp)
14440 u8 *vpd_data;
14441 unsigned int block_end, rosize, len;
14442 u32 vpdlen;
14443 int j, i = 0;
14445 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14446 if (!vpd_data)
14447 goto out_no_vpd;
14449 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14450 if (i < 0)
14451 goto out_not_found;
14453 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14454 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14455 i += PCI_VPD_LRDT_TAG_SIZE;
14457 if (block_end > vpdlen)
14458 goto out_not_found;
14460 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14461 PCI_VPD_RO_KEYWORD_MFR_ID);
14462 if (j > 0) {
14463 len = pci_vpd_info_field_size(&vpd_data[j]);
14465 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14466 if (j + len > block_end || len != 4 ||
14467 memcmp(&vpd_data[j], "1028", 4))
14468 goto partno;
14470 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14471 PCI_VPD_RO_KEYWORD_VENDOR0);
14472 if (j < 0)
14473 goto partno;
14475 len = pci_vpd_info_field_size(&vpd_data[j]);
14477 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14478 if (j + len > block_end)
14479 goto partno;
14481 memcpy(tp->fw_ver, &vpd_data[j], len);
14482 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14485 partno:
14486 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14487 PCI_VPD_RO_KEYWORD_PARTNO);
14488 if (i < 0)
14489 goto out_not_found;
14491 len = pci_vpd_info_field_size(&vpd_data[i]);
14493 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14494 if (len > TG3_BPN_SIZE ||
14495 (len + i) > vpdlen)
14496 goto out_not_found;
14498 memcpy(tp->board_part_number, &vpd_data[i], len);
14500 out_not_found:
14501 kfree(vpd_data);
14502 if (tp->board_part_number[0])
14503 return;
14505 out_no_vpd:
14506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14507 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14508 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14509 strcpy(tp->board_part_number, "BCM5717");
14510 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14511 strcpy(tp->board_part_number, "BCM5718");
14512 else
14513 goto nomatch;
14514 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14515 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14516 strcpy(tp->board_part_number, "BCM57780");
14517 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14518 strcpy(tp->board_part_number, "BCM57760");
14519 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14520 strcpy(tp->board_part_number, "BCM57790");
14521 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14522 strcpy(tp->board_part_number, "BCM57788");
14523 else
14524 goto nomatch;
14525 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14526 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14527 strcpy(tp->board_part_number, "BCM57761");
14528 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14529 strcpy(tp->board_part_number, "BCM57765");
14530 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14531 strcpy(tp->board_part_number, "BCM57781");
14532 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14533 strcpy(tp->board_part_number, "BCM57785");
14534 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14535 strcpy(tp->board_part_number, "BCM57791");
14536 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14537 strcpy(tp->board_part_number, "BCM57795");
14538 else
14539 goto nomatch;
14540 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14541 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14542 strcpy(tp->board_part_number, "BCM57762");
14543 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14544 strcpy(tp->board_part_number, "BCM57766");
14545 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14546 strcpy(tp->board_part_number, "BCM57782");
14547 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14548 strcpy(tp->board_part_number, "BCM57786");
14549 else
14550 goto nomatch;
14551 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14552 strcpy(tp->board_part_number, "BCM95906");
14553 } else {
14554 nomatch:
14555 strcpy(tp->board_part_number, "none");
14559 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14561 u32 val;
14563 if (tg3_nvram_read(tp, offset, &val) ||
14564 (val & 0xfc000000) != 0x0c000000 ||
14565 tg3_nvram_read(tp, offset + 4, &val) ||
14566 val != 0)
14567 return 0;
14569 return 1;
14572 static void tg3_read_bc_ver(struct tg3 *tp)
14574 u32 val, offset, start, ver_offset;
14575 int i, dst_off;
14576 bool newver = false;
14578 if (tg3_nvram_read(tp, 0xc, &offset) ||
14579 tg3_nvram_read(tp, 0x4, &start))
14580 return;
14582 offset = tg3_nvram_logical_addr(tp, offset);
14584 if (tg3_nvram_read(tp, offset, &val))
14585 return;
14587 if ((val & 0xfc000000) == 0x0c000000) {
14588 if (tg3_nvram_read(tp, offset + 4, &val))
14589 return;
14591 if (val == 0)
14592 newver = true;
14595 dst_off = strlen(tp->fw_ver);
14597 if (newver) {
14598 if (TG3_VER_SIZE - dst_off < 16 ||
14599 tg3_nvram_read(tp, offset + 8, &ver_offset))
14600 return;
14602 offset = offset + ver_offset - start;
14603 for (i = 0; i < 16; i += 4) {
14604 __be32 v;
14605 if (tg3_nvram_read_be32(tp, offset + i, &v))
14606 return;
14608 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14610 } else {
14611 u32 major, minor;
14613 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14614 return;
14616 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14617 TG3_NVM_BCVER_MAJSFT;
14618 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14619 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14620 "v%d.%02d", major, minor);
14624 static void tg3_read_hwsb_ver(struct tg3 *tp)
14626 u32 val, major, minor;
14628 /* Use native endian representation */
14629 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14630 return;
14632 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14633 TG3_NVM_HWSB_CFG1_MAJSFT;
14634 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14635 TG3_NVM_HWSB_CFG1_MINSFT;
14637 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14640 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14642 u32 offset, major, minor, build;
14644 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14646 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14647 return;
14649 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14650 case TG3_EEPROM_SB_REVISION_0:
14651 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14652 break;
14653 case TG3_EEPROM_SB_REVISION_2:
14654 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14655 break;
14656 case TG3_EEPROM_SB_REVISION_3:
14657 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14658 break;
14659 case TG3_EEPROM_SB_REVISION_4:
14660 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14661 break;
14662 case TG3_EEPROM_SB_REVISION_5:
14663 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14664 break;
14665 case TG3_EEPROM_SB_REVISION_6:
14666 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14667 break;
14668 default:
14669 return;
14672 if (tg3_nvram_read(tp, offset, &val))
14673 return;
14675 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14676 TG3_EEPROM_SB_EDH_BLD_SHFT;
14677 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14678 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14679 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14681 if (minor > 99 || build > 26)
14682 return;
14684 offset = strlen(tp->fw_ver);
14685 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14686 " v%d.%02d", major, minor);
14688 if (build > 0) {
14689 offset = strlen(tp->fw_ver);
14690 if (offset < TG3_VER_SIZE - 1)
14691 tp->fw_ver[offset] = 'a' + build - 1;
14695 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14697 u32 val, offset, start;
14698 int i, vlen;
14700 for (offset = TG3_NVM_DIR_START;
14701 offset < TG3_NVM_DIR_END;
14702 offset += TG3_NVM_DIRENT_SIZE) {
14703 if (tg3_nvram_read(tp, offset, &val))
14704 return;
14706 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14707 break;
14710 if (offset == TG3_NVM_DIR_END)
14711 return;
14713 if (!tg3_flag(tp, 5705_PLUS))
14714 start = 0x08000000;
14715 else if (tg3_nvram_read(tp, offset - 4, &start))
14716 return;
14718 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14719 !tg3_fw_img_is_valid(tp, offset) ||
14720 tg3_nvram_read(tp, offset + 8, &val))
14721 return;
14723 offset += val - start;
14725 vlen = strlen(tp->fw_ver);
14727 tp->fw_ver[vlen++] = ',';
14728 tp->fw_ver[vlen++] = ' ';
14730 for (i = 0; i < 4; i++) {
14731 __be32 v;
14732 if (tg3_nvram_read_be32(tp, offset, &v))
14733 return;
14735 offset += sizeof(v);
14737 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14738 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14739 break;
14742 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14743 vlen += sizeof(v);
14747 static void tg3_probe_ncsi(struct tg3 *tp)
14749 u32 apedata;
14751 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14752 if (apedata != APE_SEG_SIG_MAGIC)
14753 return;
14755 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14756 if (!(apedata & APE_FW_STATUS_READY))
14757 return;
14759 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14760 tg3_flag_set(tp, APE_HAS_NCSI);
14763 static void tg3_read_dash_ver(struct tg3 *tp)
14765 int vlen;
14766 u32 apedata;
14767 char *fwtype;
14769 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14771 if (tg3_flag(tp, APE_HAS_NCSI))
14772 fwtype = "NCSI";
14773 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14774 fwtype = "SMASH";
14775 else
14776 fwtype = "DASH";
14778 vlen = strlen(tp->fw_ver);
14780 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14781 fwtype,
14782 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14783 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14784 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14785 (apedata & APE_FW_VERSION_BLDMSK));
14788 static void tg3_read_otp_ver(struct tg3 *tp)
14790 u32 val, val2;
14792 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
14793 return;
14795 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14796 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14797 TG3_OTP_MAGIC0_VALID(val)) {
14798 u64 val64 = (u64) val << 32 | val2;
14799 u32 ver = 0;
14800 int i, vlen;
14802 for (i = 0; i < 7; i++) {
14803 if ((val64 & 0xff) == 0)
14804 break;
14805 ver = val64 & 0xff;
14806 val64 >>= 8;
14808 vlen = strlen(tp->fw_ver);
14809 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14813 static void tg3_read_fw_ver(struct tg3 *tp)
14815 u32 val;
14816 bool vpd_vers = false;
14818 if (tp->fw_ver[0] != 0)
14819 vpd_vers = true;
14821 if (tg3_flag(tp, NO_NVRAM)) {
14822 strcat(tp->fw_ver, "sb");
14823 tg3_read_otp_ver(tp);
14824 return;
14827 if (tg3_nvram_read(tp, 0, &val))
14828 return;
14830 if (val == TG3_EEPROM_MAGIC)
14831 tg3_read_bc_ver(tp);
14832 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14833 tg3_read_sb_ver(tp, val);
14834 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14835 tg3_read_hwsb_ver(tp);
14837 if (tg3_flag(tp, ENABLE_ASF)) {
14838 if (tg3_flag(tp, ENABLE_APE)) {
14839 tg3_probe_ncsi(tp);
14840 if (!vpd_vers)
14841 tg3_read_dash_ver(tp);
14842 } else if (!vpd_vers) {
14843 tg3_read_mgmtfw_ver(tp);
14847 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14850 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14852 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14853 return TG3_RX_RET_MAX_SIZE_5717;
14854 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14855 return TG3_RX_RET_MAX_SIZE_5700;
14856 else
14857 return TG3_RX_RET_MAX_SIZE_5705;
14860 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14861 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14862 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14863 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14864 { },
14867 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14869 struct pci_dev *peer;
14870 unsigned int func, devnr = tp->pdev->devfn & ~7;
14872 for (func = 0; func < 8; func++) {
14873 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14874 if (peer && peer != tp->pdev)
14875 break;
14876 pci_dev_put(peer);
14878 /* 5704 can be configured in single-port mode, set peer to
14879 * tp->pdev in that case.
14881 if (!peer) {
14882 peer = tp->pdev;
14883 return peer;
14887 * We don't need to keep the refcount elevated; there's no way
14888 * to remove one half of this device without removing the other
14890 pci_dev_put(peer);
14892 return peer;
14895 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14897 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14899 u32 reg;
14901 /* All devices that use the alternate
14902 * ASIC REV location have a CPMU.
14904 tg3_flag_set(tp, CPMU_PRESENT);
14906 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14907 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14908 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14909 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14910 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
14911 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
14912 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
14913 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
14914 reg = TG3PCI_GEN2_PRODID_ASICREV;
14915 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14916 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14917 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14918 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14919 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14920 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14921 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14922 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14923 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14924 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14925 reg = TG3PCI_GEN15_PRODID_ASICREV;
14926 else
14927 reg = TG3PCI_PRODID_ASICREV;
14929 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14932 /* Wrong chip ID in 5752 A0. This code can be removed later
14933 * as A0 is not in production.
14935 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14936 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14938 if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14939 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14942 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14944 tg3_flag_set(tp, 5717_PLUS);
14946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14948 tg3_flag_set(tp, 57765_CLASS);
14950 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
14951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
14952 tg3_flag_set(tp, 57765_PLUS);
14954 /* Intentionally exclude ASIC_REV_5906 */
14955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14961 tg3_flag(tp, 57765_PLUS))
14962 tg3_flag_set(tp, 5755_PLUS);
14964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14966 tg3_flag_set(tp, 5780_CLASS);
14968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14971 tg3_flag(tp, 5755_PLUS) ||
14972 tg3_flag(tp, 5780_CLASS))
14973 tg3_flag_set(tp, 5750_PLUS);
14975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14976 tg3_flag(tp, 5750_PLUS))
14977 tg3_flag_set(tp, 5705_PLUS);
14980 static bool tg3_10_100_only_device(struct tg3 *tp,
14981 const struct pci_device_id *ent)
14983 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14985 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14986 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14987 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14988 return true;
14990 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14991 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14992 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14993 return true;
14994 } else {
14995 return true;
14999 return false;
15002 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15004 u32 misc_ctrl_reg;
15005 u32 pci_state_reg, grc_misc_cfg;
15006 u32 val;
15007 u16 pci_cmd;
15008 int err;
15010 /* Force memory write invalidate off. If we leave it on,
15011 * then on 5700_BX chips we have to enable a workaround.
15012 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15013 * to match the cacheline size. The Broadcom driver have this
15014 * workaround but turns MWI off all the times so never uses
15015 * it. This seems to suggest that the workaround is insufficient.
15017 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15018 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15019 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15021 /* Important! -- Make sure register accesses are byteswapped
15022 * correctly. Also, for those chips that require it, make
15023 * sure that indirect register accesses are enabled before
15024 * the first operation.
15026 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15027 &misc_ctrl_reg);
15028 tp->misc_host_ctrl |= (misc_ctrl_reg &
15029 MISC_HOST_CTRL_CHIPREV);
15030 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15031 tp->misc_host_ctrl);
15033 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15035 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15036 * we need to disable memory and use config. cycles
15037 * only to access all registers. The 5702/03 chips
15038 * can mistakenly decode the special cycles from the
15039 * ICH chipsets as memory write cycles, causing corruption
15040 * of register and memory space. Only certain ICH bridges
15041 * will drive special cycles with non-zero data during the
15042 * address phase which can fall within the 5703's address
15043 * range. This is not an ICH bug as the PCI spec allows
15044 * non-zero address during special cycles. However, only
15045 * these ICH bridges are known to drive non-zero addresses
15046 * during special cycles.
15048 * Since special cycles do not cross PCI bridges, we only
15049 * enable this workaround if the 5703 is on the secondary
15050 * bus of these ICH bridges.
15052 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
15053 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
15054 static struct tg3_dev_id {
15055 u32 vendor;
15056 u32 device;
15057 u32 rev;
15058 } ich_chipsets[] = {
15059 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15060 PCI_ANY_ID },
15061 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15062 PCI_ANY_ID },
15063 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15064 0xa },
15065 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15066 PCI_ANY_ID },
15067 { },
15069 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15070 struct pci_dev *bridge = NULL;
15072 while (pci_id->vendor != 0) {
15073 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15074 bridge);
15075 if (!bridge) {
15076 pci_id++;
15077 continue;
15079 if (pci_id->rev != PCI_ANY_ID) {
15080 if (bridge->revision > pci_id->rev)
15081 continue;
15083 if (bridge->subordinate &&
15084 (bridge->subordinate->number ==
15085 tp->pdev->bus->number)) {
15086 tg3_flag_set(tp, ICH_WORKAROUND);
15087 pci_dev_put(bridge);
15088 break;
15093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15094 static struct tg3_dev_id {
15095 u32 vendor;
15096 u32 device;
15097 } bridge_chipsets[] = {
15098 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15099 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15100 { },
15102 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15103 struct pci_dev *bridge = NULL;
15105 while (pci_id->vendor != 0) {
15106 bridge = pci_get_device(pci_id->vendor,
15107 pci_id->device,
15108 bridge);
15109 if (!bridge) {
15110 pci_id++;
15111 continue;
15113 if (bridge->subordinate &&
15114 (bridge->subordinate->number <=
15115 tp->pdev->bus->number) &&
15116 (bridge->subordinate->busn_res.end >=
15117 tp->pdev->bus->number)) {
15118 tg3_flag_set(tp, 5701_DMA_BUG);
15119 pci_dev_put(bridge);
15120 break;
15125 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15126 * DMA addresses > 40-bit. This bridge may have other additional
15127 * 57xx devices behind it in some 4-port NIC designs for example.
15128 * Any tg3 device found behind the bridge will also need the 40-bit
15129 * DMA workaround.
15131 if (tg3_flag(tp, 5780_CLASS)) {
15132 tg3_flag_set(tp, 40BIT_DMA_BUG);
15133 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15134 } else {
15135 struct pci_dev *bridge = NULL;
15137 do {
15138 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15139 PCI_DEVICE_ID_SERVERWORKS_EPB,
15140 bridge);
15141 if (bridge && bridge->subordinate &&
15142 (bridge->subordinate->number <=
15143 tp->pdev->bus->number) &&
15144 (bridge->subordinate->busn_res.end >=
15145 tp->pdev->bus->number)) {
15146 tg3_flag_set(tp, 40BIT_DMA_BUG);
15147 pci_dev_put(bridge);
15148 break;
15150 } while (bridge);
15153 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15154 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15155 tp->pdev_peer = tg3_find_peer(tp);
15157 /* Determine TSO capabilities */
15158 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15159 ; /* Do nothing. HW bug. */
15160 else if (tg3_flag(tp, 57765_PLUS))
15161 tg3_flag_set(tp, HW_TSO_3);
15162 else if (tg3_flag(tp, 5755_PLUS) ||
15163 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15164 tg3_flag_set(tp, HW_TSO_2);
15165 else if (tg3_flag(tp, 5750_PLUS)) {
15166 tg3_flag_set(tp, HW_TSO_1);
15167 tg3_flag_set(tp, TSO_BUG);
15168 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15169 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15170 tg3_flag_clear(tp, TSO_BUG);
15171 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15172 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15173 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15174 tg3_flag_set(tp, TSO_BUG);
15175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15176 tp->fw_needed = FIRMWARE_TG3TSO5;
15177 else
15178 tp->fw_needed = FIRMWARE_TG3TSO;
15181 /* Selectively allow TSO based on operating conditions */
15182 if (tg3_flag(tp, HW_TSO_1) ||
15183 tg3_flag(tp, HW_TSO_2) ||
15184 tg3_flag(tp, HW_TSO_3) ||
15185 tp->fw_needed) {
15186 /* For firmware TSO, assume ASF is disabled.
15187 * We'll disable TSO later if we discover ASF
15188 * is enabled in tg3_get_eeprom_hw_cfg().
15190 tg3_flag_set(tp, TSO_CAPABLE);
15191 } else {
15192 tg3_flag_clear(tp, TSO_CAPABLE);
15193 tg3_flag_clear(tp, TSO_BUG);
15194 tp->fw_needed = NULL;
15197 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15198 tp->fw_needed = FIRMWARE_TG3;
15200 tp->irq_max = 1;
15202 if (tg3_flag(tp, 5750_PLUS)) {
15203 tg3_flag_set(tp, SUPPORT_MSI);
15204 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15205 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15206 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15207 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15208 tp->pdev_peer == tp->pdev))
15209 tg3_flag_clear(tp, SUPPORT_MSI);
15211 if (tg3_flag(tp, 5755_PLUS) ||
15212 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15213 tg3_flag_set(tp, 1SHOT_MSI);
15216 if (tg3_flag(tp, 57765_PLUS)) {
15217 tg3_flag_set(tp, SUPPORT_MSIX);
15218 tp->irq_max = TG3_IRQ_MAX_VECS;
15222 tp->txq_max = 1;
15223 tp->rxq_max = 1;
15224 if (tp->irq_max > 1) {
15225 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15226 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15230 tp->txq_max = tp->irq_max - 1;
15233 if (tg3_flag(tp, 5755_PLUS) ||
15234 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15235 tg3_flag_set(tp, SHORT_DMA_BUG);
15237 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15238 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15242 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15243 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15244 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15246 if (tg3_flag(tp, 57765_PLUS) &&
15247 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15248 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15250 if (!tg3_flag(tp, 5705_PLUS) ||
15251 tg3_flag(tp, 5780_CLASS) ||
15252 tg3_flag(tp, USE_JUMBO_BDFLAG))
15253 tg3_flag_set(tp, JUMBO_CAPABLE);
15255 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15256 &pci_state_reg);
15258 if (pci_is_pcie(tp->pdev)) {
15259 u16 lnkctl;
15261 tg3_flag_set(tp, PCI_EXPRESS);
15263 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15264 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15265 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15266 ASIC_REV_5906) {
15267 tg3_flag_clear(tp, HW_TSO_2);
15268 tg3_flag_clear(tp, TSO_CAPABLE);
15270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15271 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15272 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15273 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15274 tg3_flag_set(tp, CLKREQ_BUG);
15275 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15276 tg3_flag_set(tp, L1PLLPD_EN);
15278 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15279 /* BCM5785 devices are effectively PCIe devices, and should
15280 * follow PCIe codepaths, but do not have a PCIe capabilities
15281 * section.
15283 tg3_flag_set(tp, PCI_EXPRESS);
15284 } else if (!tg3_flag(tp, 5705_PLUS) ||
15285 tg3_flag(tp, 5780_CLASS)) {
15286 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15287 if (!tp->pcix_cap) {
15288 dev_err(&tp->pdev->dev,
15289 "Cannot find PCI-X capability, aborting\n");
15290 return -EIO;
15293 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15294 tg3_flag_set(tp, PCIX_MODE);
15297 /* If we have an AMD 762 or VIA K8T800 chipset, write
15298 * reordering to the mailbox registers done by the host
15299 * controller can cause major troubles. We read back from
15300 * every mailbox register write to force the writes to be
15301 * posted to the chip in order.
15303 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15304 !tg3_flag(tp, PCI_EXPRESS))
15305 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15307 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15308 &tp->pci_cacheline_sz);
15309 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15310 &tp->pci_lat_timer);
15311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15312 tp->pci_lat_timer < 64) {
15313 tp->pci_lat_timer = 64;
15314 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15315 tp->pci_lat_timer);
15318 /* Important! -- It is critical that the PCI-X hw workaround
15319 * situation is decided before the first MMIO register access.
15321 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15322 /* 5700 BX chips need to have their TX producer index
15323 * mailboxes written twice to workaround a bug.
15325 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15327 /* If we are in PCI-X mode, enable register write workaround.
15329 * The workaround is to use indirect register accesses
15330 * for all chip writes not to mailbox registers.
15332 if (tg3_flag(tp, PCIX_MODE)) {
15333 u32 pm_reg;
15335 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15337 /* The chip can have it's power management PCI config
15338 * space registers clobbered due to this bug.
15339 * So explicitly force the chip into D0 here.
15341 pci_read_config_dword(tp->pdev,
15342 tp->pm_cap + PCI_PM_CTRL,
15343 &pm_reg);
15344 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15345 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15346 pci_write_config_dword(tp->pdev,
15347 tp->pm_cap + PCI_PM_CTRL,
15348 pm_reg);
15350 /* Also, force SERR#/PERR# in PCI command. */
15351 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15352 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15353 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15357 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15358 tg3_flag_set(tp, PCI_HIGH_SPEED);
15359 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15360 tg3_flag_set(tp, PCI_32BIT);
15362 /* Chip-specific fixup from Broadcom driver */
15363 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15364 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15365 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15366 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15369 /* Default fast path register access methods */
15370 tp->read32 = tg3_read32;
15371 tp->write32 = tg3_write32;
15372 tp->read32_mbox = tg3_read32;
15373 tp->write32_mbox = tg3_write32;
15374 tp->write32_tx_mbox = tg3_write32;
15375 tp->write32_rx_mbox = tg3_write32;
15377 /* Various workaround register access methods */
15378 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15379 tp->write32 = tg3_write_indirect_reg32;
15380 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15381 (tg3_flag(tp, PCI_EXPRESS) &&
15382 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15384 * Back to back register writes can cause problems on these
15385 * chips, the workaround is to read back all reg writes
15386 * except those to mailbox regs.
15388 * See tg3_write_indirect_reg32().
15390 tp->write32 = tg3_write_flush_reg32;
15393 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15394 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15395 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15396 tp->write32_rx_mbox = tg3_write_flush_reg32;
15399 if (tg3_flag(tp, ICH_WORKAROUND)) {
15400 tp->read32 = tg3_read_indirect_reg32;
15401 tp->write32 = tg3_write_indirect_reg32;
15402 tp->read32_mbox = tg3_read_indirect_mbox;
15403 tp->write32_mbox = tg3_write_indirect_mbox;
15404 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15405 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15407 iounmap(tp->regs);
15408 tp->regs = NULL;
15410 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15411 pci_cmd &= ~PCI_COMMAND_MEMORY;
15412 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15414 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15415 tp->read32_mbox = tg3_read32_mbox_5906;
15416 tp->write32_mbox = tg3_write32_mbox_5906;
15417 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15418 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15421 if (tp->write32 == tg3_write_indirect_reg32 ||
15422 (tg3_flag(tp, PCIX_MODE) &&
15423 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15424 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15425 tg3_flag_set(tp, SRAM_USE_CONFIG);
15427 /* The memory arbiter has to be enabled in order for SRAM accesses
15428 * to succeed. Normally on powerup the tg3 chip firmware will make
15429 * sure it is enabled, but other entities such as system netboot
15430 * code might disable it.
15432 val = tr32(MEMARB_MODE);
15433 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15435 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15437 tg3_flag(tp, 5780_CLASS)) {
15438 if (tg3_flag(tp, PCIX_MODE)) {
15439 pci_read_config_dword(tp->pdev,
15440 tp->pcix_cap + PCI_X_STATUS,
15441 &val);
15442 tp->pci_fn = val & 0x7;
15444 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
15445 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15446 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15447 NIC_SRAM_CPMUSTAT_SIG) {
15448 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
15449 tp->pci_fn = tp->pci_fn ? 1 : 0;
15451 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15452 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15453 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15454 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15455 NIC_SRAM_CPMUSTAT_SIG) {
15456 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15457 TG3_CPMU_STATUS_FSHFT_5719;
15461 /* Get eeprom hw config before calling tg3_set_power_state().
15462 * In particular, the TG3_FLAG_IS_NIC flag must be
15463 * determined before calling tg3_set_power_state() so that
15464 * we know whether or not to switch out of Vaux power.
15465 * When the flag is set, it means that GPIO1 is used for eeprom
15466 * write protect and also implies that it is a LOM where GPIOs
15467 * are not used to switch power.
15469 tg3_get_eeprom_hw_cfg(tp);
15471 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15472 tg3_flag_clear(tp, TSO_CAPABLE);
15473 tg3_flag_clear(tp, TSO_BUG);
15474 tp->fw_needed = NULL;
15477 if (tg3_flag(tp, ENABLE_APE)) {
15478 /* Allow reads and writes to the
15479 * APE register and memory space.
15481 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15482 PCISTATE_ALLOW_APE_SHMEM_WR |
15483 PCISTATE_ALLOW_APE_PSPACE_WR;
15484 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15485 pci_state_reg);
15487 tg3_ape_lock_init(tp);
15490 /* Set up tp->grc_local_ctrl before calling
15491 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15492 * will bring 5700's external PHY out of reset.
15493 * It is also used as eeprom write protect on LOMs.
15495 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15496 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15497 tg3_flag(tp, EEPROM_WRITE_PROT))
15498 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15499 GRC_LCLCTRL_GPIO_OUTPUT1);
15500 /* Unused GPIO3 must be driven as output on 5752 because there
15501 * are no pull-up resistors on unused GPIO pins.
15503 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15504 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15508 tg3_flag(tp, 57765_CLASS))
15509 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15511 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15512 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15513 /* Turn off the debug UART. */
15514 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15515 if (tg3_flag(tp, IS_NIC))
15516 /* Keep VMain power. */
15517 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15518 GRC_LCLCTRL_GPIO_OUTPUT0;
15521 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15522 tp->grc_local_ctrl |=
15523 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15525 /* Switch out of Vaux if it is a NIC */
15526 tg3_pwrsrc_switch_to_vmain(tp);
15528 /* Derive initial jumbo mode from MTU assigned in
15529 * ether_setup() via the alloc_etherdev() call
15531 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15532 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15534 /* Determine WakeOnLan speed to use. */
15535 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15536 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15537 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15538 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15539 tg3_flag_clear(tp, WOL_SPEED_100MB);
15540 } else {
15541 tg3_flag_set(tp, WOL_SPEED_100MB);
15544 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15545 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15547 /* A few boards don't want Ethernet@WireSpeed phy feature */
15548 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15549 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15550 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15551 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15552 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15553 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15554 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15556 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15557 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15558 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15559 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15560 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15562 if (tg3_flag(tp, 5705_PLUS) &&
15563 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15564 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15565 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15566 !tg3_flag(tp, 57765_PLUS)) {
15567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15568 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15571 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15572 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15573 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15574 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15575 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15576 } else
15577 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15580 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15581 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15582 tp->phy_otp = tg3_read_otp_phycfg(tp);
15583 if (tp->phy_otp == 0)
15584 tp->phy_otp = TG3_OTP_DEFAULT;
15587 if (tg3_flag(tp, CPMU_PRESENT))
15588 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15589 else
15590 tp->mi_mode = MAC_MI_MODE_BASE;
15592 tp->coalesce_mode = 0;
15593 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15594 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15595 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15597 /* Set these bits to enable statistics workaround. */
15598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15599 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15600 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15601 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15602 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15605 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15606 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15607 tg3_flag_set(tp, USE_PHYLIB);
15609 err = tg3_mdio_init(tp);
15610 if (err)
15611 return err;
15613 /* Initialize data/descriptor byte/word swapping. */
15614 val = tr32(GRC_MODE);
15615 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15616 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15617 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15618 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15619 GRC_MODE_B2HRX_ENABLE |
15620 GRC_MODE_HTX2B_ENABLE |
15621 GRC_MODE_HOST_STACKUP);
15622 else
15623 val &= GRC_MODE_HOST_STACKUP;
15625 tw32(GRC_MODE, val | tp->grc_mode);
15627 tg3_switch_clocks(tp);
15629 /* Clear this out for sanity. */
15630 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15632 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15633 &pci_state_reg);
15634 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15635 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15636 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15638 if (chiprevid == CHIPREV_ID_5701_A0 ||
15639 chiprevid == CHIPREV_ID_5701_B0 ||
15640 chiprevid == CHIPREV_ID_5701_B2 ||
15641 chiprevid == CHIPREV_ID_5701_B5) {
15642 void __iomem *sram_base;
15644 /* Write some dummy words into the SRAM status block
15645 * area, see if it reads back correctly. If the return
15646 * value is bad, force enable the PCIX workaround.
15648 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15650 writel(0x00000000, sram_base);
15651 writel(0x00000000, sram_base + 4);
15652 writel(0xffffffff, sram_base + 4);
15653 if (readl(sram_base) != 0x00000000)
15654 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15658 udelay(50);
15659 tg3_nvram_init(tp);
15661 grc_misc_cfg = tr32(GRC_MISC_CFG);
15662 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15665 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15666 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15667 tg3_flag_set(tp, IS_5788);
15669 if (!tg3_flag(tp, IS_5788) &&
15670 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15671 tg3_flag_set(tp, TAGGED_STATUS);
15672 if (tg3_flag(tp, TAGGED_STATUS)) {
15673 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15674 HOSTCC_MODE_CLRTICK_TXBD);
15676 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15677 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15678 tp->misc_host_ctrl);
15681 /* Preserve the APE MAC_MODE bits */
15682 if (tg3_flag(tp, ENABLE_APE))
15683 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15684 else
15685 tp->mac_mode = 0;
15687 if (tg3_10_100_only_device(tp, ent))
15688 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15690 err = tg3_phy_probe(tp);
15691 if (err) {
15692 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15693 /* ... but do not return immediately ... */
15694 tg3_mdio_fini(tp);
15697 tg3_read_vpd(tp);
15698 tg3_read_fw_ver(tp);
15700 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15701 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15702 } else {
15703 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15704 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15705 else
15706 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15709 /* 5700 {AX,BX} chips have a broken status block link
15710 * change bit implementation, so we must use the
15711 * status register in those cases.
15713 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15714 tg3_flag_set(tp, USE_LINKCHG_REG);
15715 else
15716 tg3_flag_clear(tp, USE_LINKCHG_REG);
15718 /* The led_ctrl is set during tg3_phy_probe, here we might
15719 * have to force the link status polling mechanism based
15720 * upon subsystem IDs.
15722 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15723 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15724 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15725 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15726 tg3_flag_set(tp, USE_LINKCHG_REG);
15729 /* For all SERDES we poll the MAC status register. */
15730 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15731 tg3_flag_set(tp, POLL_SERDES);
15732 else
15733 tg3_flag_clear(tp, POLL_SERDES);
15735 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15736 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15737 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15738 tg3_flag(tp, PCIX_MODE)) {
15739 tp->rx_offset = NET_SKB_PAD;
15740 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15741 tp->rx_copy_thresh = ~(u16)0;
15742 #endif
15745 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15746 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15747 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15749 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15751 /* Increment the rx prod index on the rx std ring by at most
15752 * 8 for these chips to workaround hw errata.
15754 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15755 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15756 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15757 tp->rx_std_max_post = 8;
15759 if (tg3_flag(tp, ASPM_WORKAROUND))
15760 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15761 PCIE_PWR_MGMT_L1_THRESH_MSK;
15763 return err;
15766 #ifdef CONFIG_SPARC
15767 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15769 struct net_device *dev = tp->dev;
15770 struct pci_dev *pdev = tp->pdev;
15771 struct device_node *dp = pci_device_to_OF_node(pdev);
15772 const unsigned char *addr;
15773 int len;
15775 addr = of_get_property(dp, "local-mac-address", &len);
15776 if (addr && len == 6) {
15777 memcpy(dev->dev_addr, addr, 6);
15778 memcpy(dev->perm_addr, dev->dev_addr, 6);
15779 return 0;
15781 return -ENODEV;
15784 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15786 struct net_device *dev = tp->dev;
15788 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15789 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15790 return 0;
15792 #endif
15794 static int tg3_get_device_address(struct tg3 *tp)
15796 struct net_device *dev = tp->dev;
15797 u32 hi, lo, mac_offset;
15798 int addr_ok = 0;
15800 #ifdef CONFIG_SPARC
15801 if (!tg3_get_macaddr_sparc(tp))
15802 return 0;
15803 #endif
15805 mac_offset = 0x7c;
15806 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15807 tg3_flag(tp, 5780_CLASS)) {
15808 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15809 mac_offset = 0xcc;
15810 if (tg3_nvram_lock(tp))
15811 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15812 else
15813 tg3_nvram_unlock(tp);
15814 } else if (tg3_flag(tp, 5717_PLUS)) {
15815 if (tp->pci_fn & 1)
15816 mac_offset = 0xcc;
15817 if (tp->pci_fn > 1)
15818 mac_offset += 0x18c;
15819 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15820 mac_offset = 0x10;
15822 /* First try to get it from MAC address mailbox. */
15823 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15824 if ((hi >> 16) == 0x484b) {
15825 dev->dev_addr[0] = (hi >> 8) & 0xff;
15826 dev->dev_addr[1] = (hi >> 0) & 0xff;
15828 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15829 dev->dev_addr[2] = (lo >> 24) & 0xff;
15830 dev->dev_addr[3] = (lo >> 16) & 0xff;
15831 dev->dev_addr[4] = (lo >> 8) & 0xff;
15832 dev->dev_addr[5] = (lo >> 0) & 0xff;
15834 /* Some old bootcode may report a 0 MAC address in SRAM */
15835 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15837 if (!addr_ok) {
15838 /* Next, try NVRAM. */
15839 if (!tg3_flag(tp, NO_NVRAM) &&
15840 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15841 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15842 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15843 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15845 /* Finally just fetch it out of the MAC control regs. */
15846 else {
15847 hi = tr32(MAC_ADDR_0_HIGH);
15848 lo = tr32(MAC_ADDR_0_LOW);
15850 dev->dev_addr[5] = lo & 0xff;
15851 dev->dev_addr[4] = (lo >> 8) & 0xff;
15852 dev->dev_addr[3] = (lo >> 16) & 0xff;
15853 dev->dev_addr[2] = (lo >> 24) & 0xff;
15854 dev->dev_addr[1] = hi & 0xff;
15855 dev->dev_addr[0] = (hi >> 8) & 0xff;
15859 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15860 #ifdef CONFIG_SPARC
15861 if (!tg3_get_default_macaddr_sparc(tp))
15862 return 0;
15863 #endif
15864 return -EINVAL;
15866 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15867 return 0;
15870 #define BOUNDARY_SINGLE_CACHELINE 1
15871 #define BOUNDARY_MULTI_CACHELINE 2
15873 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15875 int cacheline_size;
15876 u8 byte;
15877 int goal;
15879 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15880 if (byte == 0)
15881 cacheline_size = 1024;
15882 else
15883 cacheline_size = (int) byte * 4;
15885 /* On 5703 and later chips, the boundary bits have no
15886 * effect.
15888 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15889 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15890 !tg3_flag(tp, PCI_EXPRESS))
15891 goto out;
15893 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15894 goal = BOUNDARY_MULTI_CACHELINE;
15895 #else
15896 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15897 goal = BOUNDARY_SINGLE_CACHELINE;
15898 #else
15899 goal = 0;
15900 #endif
15901 #endif
15903 if (tg3_flag(tp, 57765_PLUS)) {
15904 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15905 goto out;
15908 if (!goal)
15909 goto out;
15911 /* PCI controllers on most RISC systems tend to disconnect
15912 * when a device tries to burst across a cache-line boundary.
15913 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15915 * Unfortunately, for PCI-E there are only limited
15916 * write-side controls for this, and thus for reads
15917 * we will still get the disconnects. We'll also waste
15918 * these PCI cycles for both read and write for chips
15919 * other than 5700 and 5701 which do not implement the
15920 * boundary bits.
15922 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15923 switch (cacheline_size) {
15924 case 16:
15925 case 32:
15926 case 64:
15927 case 128:
15928 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15929 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15930 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15931 } else {
15932 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15933 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15935 break;
15937 case 256:
15938 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15939 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15940 break;
15942 default:
15943 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15944 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15945 break;
15947 } else if (tg3_flag(tp, PCI_EXPRESS)) {
15948 switch (cacheline_size) {
15949 case 16:
15950 case 32:
15951 case 64:
15952 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15953 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15954 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15955 break;
15957 /* fallthrough */
15958 case 128:
15959 default:
15960 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15961 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15962 break;
15964 } else {
15965 switch (cacheline_size) {
15966 case 16:
15967 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15968 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15969 DMA_RWCTRL_WRITE_BNDRY_16);
15970 break;
15972 /* fallthrough */
15973 case 32:
15974 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15975 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15976 DMA_RWCTRL_WRITE_BNDRY_32);
15977 break;
15979 /* fallthrough */
15980 case 64:
15981 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15982 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15983 DMA_RWCTRL_WRITE_BNDRY_64);
15984 break;
15986 /* fallthrough */
15987 case 128:
15988 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15989 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15990 DMA_RWCTRL_WRITE_BNDRY_128);
15991 break;
15993 /* fallthrough */
15994 case 256:
15995 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15996 DMA_RWCTRL_WRITE_BNDRY_256);
15997 break;
15998 case 512:
15999 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16000 DMA_RWCTRL_WRITE_BNDRY_512);
16001 break;
16002 case 1024:
16003 default:
16004 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16005 DMA_RWCTRL_WRITE_BNDRY_1024);
16006 break;
16010 out:
16011 return val;
16014 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16015 int size, int to_device)
16017 struct tg3_internal_buffer_desc test_desc;
16018 u32 sram_dma_descs;
16019 int i, ret;
16021 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16023 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16024 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16025 tw32(RDMAC_STATUS, 0);
16026 tw32(WDMAC_STATUS, 0);
16028 tw32(BUFMGR_MODE, 0);
16029 tw32(FTQ_RESET, 0);
16031 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16032 test_desc.addr_lo = buf_dma & 0xffffffff;
16033 test_desc.nic_mbuf = 0x00002100;
16034 test_desc.len = size;
16037 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16038 * the *second* time the tg3 driver was getting loaded after an
16039 * initial scan.
16041 * Broadcom tells me:
16042 * ...the DMA engine is connected to the GRC block and a DMA
16043 * reset may affect the GRC block in some unpredictable way...
16044 * The behavior of resets to individual blocks has not been tested.
16046 * Broadcom noted the GRC reset will also reset all sub-components.
16048 if (to_device) {
16049 test_desc.cqid_sqid = (13 << 8) | 2;
16051 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16052 udelay(40);
16053 } else {
16054 test_desc.cqid_sqid = (16 << 8) | 7;
16056 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16057 udelay(40);
16059 test_desc.flags = 0x00000005;
16061 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16062 u32 val;
16064 val = *(((u32 *)&test_desc) + i);
16065 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16066 sram_dma_descs + (i * sizeof(u32)));
16067 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16069 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16071 if (to_device)
16072 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16073 else
16074 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16076 ret = -ENODEV;
16077 for (i = 0; i < 40; i++) {
16078 u32 val;
16080 if (to_device)
16081 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16082 else
16083 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16084 if ((val & 0xffff) == sram_dma_descs) {
16085 ret = 0;
16086 break;
16089 udelay(100);
16092 return ret;
16095 #define TEST_BUFFER_SIZE 0x2000
16097 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16098 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16099 { },
16102 static int tg3_test_dma(struct tg3 *tp)
16104 dma_addr_t buf_dma;
16105 u32 *buf, saved_dma_rwctrl;
16106 int ret = 0;
16108 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16109 &buf_dma, GFP_KERNEL);
16110 if (!buf) {
16111 ret = -ENOMEM;
16112 goto out_nofree;
16115 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16116 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16118 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16120 if (tg3_flag(tp, 57765_PLUS))
16121 goto out;
16123 if (tg3_flag(tp, PCI_EXPRESS)) {
16124 /* DMA read watermark not used on PCIE */
16125 tp->dma_rwctrl |= 0x00180000;
16126 } else if (!tg3_flag(tp, PCIX_MODE)) {
16127 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
16128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
16129 tp->dma_rwctrl |= 0x003f0000;
16130 else
16131 tp->dma_rwctrl |= 0x003f000f;
16132 } else {
16133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16134 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16135 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16136 u32 read_water = 0x7;
16138 /* If the 5704 is behind the EPB bridge, we can
16139 * do the less restrictive ONE_DMA workaround for
16140 * better performance.
16142 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16143 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16144 tp->dma_rwctrl |= 0x8000;
16145 else if (ccval == 0x6 || ccval == 0x7)
16146 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16149 read_water = 4;
16150 /* Set bit 23 to enable PCIX hw bug fix */
16151 tp->dma_rwctrl |=
16152 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16153 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16154 (1 << 23);
16155 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16156 /* 5780 always in PCIX mode */
16157 tp->dma_rwctrl |= 0x00144000;
16158 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16159 /* 5714 always in PCIX mode */
16160 tp->dma_rwctrl |= 0x00148000;
16161 } else {
16162 tp->dma_rwctrl |= 0x001b000f;
16166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16167 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16168 tp->dma_rwctrl &= 0xfffffff0;
16170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16172 /* Remove this if it causes problems for some boards. */
16173 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16175 /* On 5700/5701 chips, we need to set this bit.
16176 * Otherwise the chip will issue cacheline transactions
16177 * to streamable DMA memory with not all the byte
16178 * enables turned on. This is an error on several
16179 * RISC PCI controllers, in particular sparc64.
16181 * On 5703/5704 chips, this bit has been reassigned
16182 * a different meaning. In particular, it is used
16183 * on those chips to enable a PCI-X workaround.
16185 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16188 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16190 #if 0
16191 /* Unneeded, already done by tg3_get_invariants. */
16192 tg3_switch_clocks(tp);
16193 #endif
16195 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16196 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16197 goto out;
16199 /* It is best to perform DMA test with maximum write burst size
16200 * to expose the 5700/5701 write DMA bug.
16202 saved_dma_rwctrl = tp->dma_rwctrl;
16203 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16204 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16206 while (1) {
16207 u32 *p = buf, i;
16209 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16210 p[i] = i;
16212 /* Send the buffer to the chip. */
16213 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16214 if (ret) {
16215 dev_err(&tp->pdev->dev,
16216 "%s: Buffer write failed. err = %d\n",
16217 __func__, ret);
16218 break;
16221 #if 0
16222 /* validate data reached card RAM correctly. */
16223 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16224 u32 val;
16225 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16226 if (le32_to_cpu(val) != p[i]) {
16227 dev_err(&tp->pdev->dev,
16228 "%s: Buffer corrupted on device! "
16229 "(%d != %d)\n", __func__, val, i);
16230 /* ret = -ENODEV here? */
16232 p[i] = 0;
16234 #endif
16235 /* Now read it back. */
16236 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16237 if (ret) {
16238 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16239 "err = %d\n", __func__, ret);
16240 break;
16243 /* Verify it. */
16244 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16245 if (p[i] == i)
16246 continue;
16248 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16249 DMA_RWCTRL_WRITE_BNDRY_16) {
16250 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16251 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16252 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16253 break;
16254 } else {
16255 dev_err(&tp->pdev->dev,
16256 "%s: Buffer corrupted on read back! "
16257 "(%d != %d)\n", __func__, p[i], i);
16258 ret = -ENODEV;
16259 goto out;
16263 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16264 /* Success. */
16265 ret = 0;
16266 break;
16269 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16270 DMA_RWCTRL_WRITE_BNDRY_16) {
16271 /* DMA test passed without adjusting DMA boundary,
16272 * now look for chipsets that are known to expose the
16273 * DMA bug without failing the test.
16275 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16276 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16277 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16278 } else {
16279 /* Safe to use the calculated DMA boundary. */
16280 tp->dma_rwctrl = saved_dma_rwctrl;
16283 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16286 out:
16287 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16288 out_nofree:
16289 return ret;
16292 static void tg3_init_bufmgr_config(struct tg3 *tp)
16294 if (tg3_flag(tp, 57765_PLUS)) {
16295 tp->bufmgr_config.mbuf_read_dma_low_water =
16296 DEFAULT_MB_RDMA_LOW_WATER_5705;
16297 tp->bufmgr_config.mbuf_mac_rx_low_water =
16298 DEFAULT_MB_MACRX_LOW_WATER_57765;
16299 tp->bufmgr_config.mbuf_high_water =
16300 DEFAULT_MB_HIGH_WATER_57765;
16302 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16303 DEFAULT_MB_RDMA_LOW_WATER_5705;
16304 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16305 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16306 tp->bufmgr_config.mbuf_high_water_jumbo =
16307 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16308 } else if (tg3_flag(tp, 5705_PLUS)) {
16309 tp->bufmgr_config.mbuf_read_dma_low_water =
16310 DEFAULT_MB_RDMA_LOW_WATER_5705;
16311 tp->bufmgr_config.mbuf_mac_rx_low_water =
16312 DEFAULT_MB_MACRX_LOW_WATER_5705;
16313 tp->bufmgr_config.mbuf_high_water =
16314 DEFAULT_MB_HIGH_WATER_5705;
16315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16316 tp->bufmgr_config.mbuf_mac_rx_low_water =
16317 DEFAULT_MB_MACRX_LOW_WATER_5906;
16318 tp->bufmgr_config.mbuf_high_water =
16319 DEFAULT_MB_HIGH_WATER_5906;
16322 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16323 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16324 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16325 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16326 tp->bufmgr_config.mbuf_high_water_jumbo =
16327 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16328 } else {
16329 tp->bufmgr_config.mbuf_read_dma_low_water =
16330 DEFAULT_MB_RDMA_LOW_WATER;
16331 tp->bufmgr_config.mbuf_mac_rx_low_water =
16332 DEFAULT_MB_MACRX_LOW_WATER;
16333 tp->bufmgr_config.mbuf_high_water =
16334 DEFAULT_MB_HIGH_WATER;
16336 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16337 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16338 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16339 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16340 tp->bufmgr_config.mbuf_high_water_jumbo =
16341 DEFAULT_MB_HIGH_WATER_JUMBO;
16344 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16345 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16348 static char *tg3_phy_string(struct tg3 *tp)
16350 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16351 case TG3_PHY_ID_BCM5400: return "5400";
16352 case TG3_PHY_ID_BCM5401: return "5401";
16353 case TG3_PHY_ID_BCM5411: return "5411";
16354 case TG3_PHY_ID_BCM5701: return "5701";
16355 case TG3_PHY_ID_BCM5703: return "5703";
16356 case TG3_PHY_ID_BCM5704: return "5704";
16357 case TG3_PHY_ID_BCM5705: return "5705";
16358 case TG3_PHY_ID_BCM5750: return "5750";
16359 case TG3_PHY_ID_BCM5752: return "5752";
16360 case TG3_PHY_ID_BCM5714: return "5714";
16361 case TG3_PHY_ID_BCM5780: return "5780";
16362 case TG3_PHY_ID_BCM5755: return "5755";
16363 case TG3_PHY_ID_BCM5787: return "5787";
16364 case TG3_PHY_ID_BCM5784: return "5784";
16365 case TG3_PHY_ID_BCM5756: return "5722/5756";
16366 case TG3_PHY_ID_BCM5906: return "5906";
16367 case TG3_PHY_ID_BCM5761: return "5761";
16368 case TG3_PHY_ID_BCM5718C: return "5718C";
16369 case TG3_PHY_ID_BCM5718S: return "5718S";
16370 case TG3_PHY_ID_BCM57765: return "57765";
16371 case TG3_PHY_ID_BCM5719C: return "5719C";
16372 case TG3_PHY_ID_BCM5720C: return "5720C";
16373 case TG3_PHY_ID_BCM5762: return "5762C";
16374 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16375 case 0: return "serdes";
16376 default: return "unknown";
16380 static char *tg3_bus_string(struct tg3 *tp, char *str)
16382 if (tg3_flag(tp, PCI_EXPRESS)) {
16383 strcpy(str, "PCI Express");
16384 return str;
16385 } else if (tg3_flag(tp, PCIX_MODE)) {
16386 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16388 strcpy(str, "PCIX:");
16390 if ((clock_ctrl == 7) ||
16391 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16392 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16393 strcat(str, "133MHz");
16394 else if (clock_ctrl == 0)
16395 strcat(str, "33MHz");
16396 else if (clock_ctrl == 2)
16397 strcat(str, "50MHz");
16398 else if (clock_ctrl == 4)
16399 strcat(str, "66MHz");
16400 else if (clock_ctrl == 6)
16401 strcat(str, "100MHz");
16402 } else {
16403 strcpy(str, "PCI:");
16404 if (tg3_flag(tp, PCI_HIGH_SPEED))
16405 strcat(str, "66MHz");
16406 else
16407 strcat(str, "33MHz");
16409 if (tg3_flag(tp, PCI_32BIT))
16410 strcat(str, ":32-bit");
16411 else
16412 strcat(str, ":64-bit");
16413 return str;
16416 static void tg3_init_coal(struct tg3 *tp)
16418 struct ethtool_coalesce *ec = &tp->coal;
16420 memset(ec, 0, sizeof(*ec));
16421 ec->cmd = ETHTOOL_GCOALESCE;
16422 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16423 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16424 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16425 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16426 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16427 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16428 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16429 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16430 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16432 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16433 HOSTCC_MODE_CLRTICK_TXBD)) {
16434 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16435 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16436 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16437 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16440 if (tg3_flag(tp, 5705_PLUS)) {
16441 ec->rx_coalesce_usecs_irq = 0;
16442 ec->tx_coalesce_usecs_irq = 0;
16443 ec->stats_block_coalesce_usecs = 0;
16447 static int tg3_init_one(struct pci_dev *pdev,
16448 const struct pci_device_id *ent)
16450 struct net_device *dev;
16451 struct tg3 *tp;
16452 int i, err, pm_cap;
16453 u32 sndmbx, rcvmbx, intmbx;
16454 char str[40];
16455 u64 dma_mask, persist_dma_mask;
16456 netdev_features_t features = 0;
16458 printk_once(KERN_INFO "%s\n", version);
16460 err = pci_enable_device(pdev);
16461 if (err) {
16462 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16463 return err;
16466 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16467 if (err) {
16468 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16469 goto err_out_disable_pdev;
16472 pci_set_master(pdev);
16474 /* Find power-management capability. */
16475 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16476 if (pm_cap == 0) {
16477 dev_err(&pdev->dev,
16478 "Cannot find Power Management capability, aborting\n");
16479 err = -EIO;
16480 goto err_out_free_res;
16483 err = pci_set_power_state(pdev, PCI_D0);
16484 if (err) {
16485 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16486 goto err_out_free_res;
16489 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16490 if (!dev) {
16491 err = -ENOMEM;
16492 goto err_out_power_down;
16495 SET_NETDEV_DEV(dev, &pdev->dev);
16497 tp = netdev_priv(dev);
16498 tp->pdev = pdev;
16499 tp->dev = dev;
16500 tp->pm_cap = pm_cap;
16501 tp->rx_mode = TG3_DEF_RX_MODE;
16502 tp->tx_mode = TG3_DEF_TX_MODE;
16504 if (tg3_debug > 0)
16505 tp->msg_enable = tg3_debug;
16506 else
16507 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16509 /* The word/byte swap controls here control register access byte
16510 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16511 * setting below.
16513 tp->misc_host_ctrl =
16514 MISC_HOST_CTRL_MASK_PCI_INT |
16515 MISC_HOST_CTRL_WORD_SWAP |
16516 MISC_HOST_CTRL_INDIR_ACCESS |
16517 MISC_HOST_CTRL_PCISTATE_RW;
16519 /* The NONFRM (non-frame) byte/word swap controls take effect
16520 * on descriptor entries, anything which isn't packet data.
16522 * The StrongARM chips on the board (one for tx, one for rx)
16523 * are running in big-endian mode.
16525 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16526 GRC_MODE_WSWAP_NONFRM_DATA);
16527 #ifdef __BIG_ENDIAN
16528 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16529 #endif
16530 spin_lock_init(&tp->lock);
16531 spin_lock_init(&tp->indirect_lock);
16532 INIT_WORK(&tp->reset_task, tg3_reset_task);
16534 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16535 if (!tp->regs) {
16536 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16537 err = -ENOMEM;
16538 goto err_out_free_dev;
16541 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16542 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16543 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16544 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16545 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16546 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16547 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16548 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16549 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16550 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16551 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16552 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16553 tg3_flag_set(tp, ENABLE_APE);
16554 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16555 if (!tp->aperegs) {
16556 dev_err(&pdev->dev,
16557 "Cannot map APE registers, aborting\n");
16558 err = -ENOMEM;
16559 goto err_out_iounmap;
16563 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16564 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16566 dev->ethtool_ops = &tg3_ethtool_ops;
16567 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16568 dev->netdev_ops = &tg3_netdev_ops;
16569 dev->irq = pdev->irq;
16571 err = tg3_get_invariants(tp, ent);
16572 if (err) {
16573 dev_err(&pdev->dev,
16574 "Problem fetching invariants of chip, aborting\n");
16575 goto err_out_apeunmap;
16578 /* The EPB bridge inside 5714, 5715, and 5780 and any
16579 * device behind the EPB cannot support DMA addresses > 40-bit.
16580 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16581 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16582 * do DMA address check in tg3_start_xmit().
16584 if (tg3_flag(tp, IS_5788))
16585 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16586 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16587 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16588 #ifdef CONFIG_HIGHMEM
16589 dma_mask = DMA_BIT_MASK(64);
16590 #endif
16591 } else
16592 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16594 /* Configure DMA attributes. */
16595 if (dma_mask > DMA_BIT_MASK(32)) {
16596 err = pci_set_dma_mask(pdev, dma_mask);
16597 if (!err) {
16598 features |= NETIF_F_HIGHDMA;
16599 err = pci_set_consistent_dma_mask(pdev,
16600 persist_dma_mask);
16601 if (err < 0) {
16602 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16603 "DMA for consistent allocations\n");
16604 goto err_out_apeunmap;
16608 if (err || dma_mask == DMA_BIT_MASK(32)) {
16609 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16610 if (err) {
16611 dev_err(&pdev->dev,
16612 "No usable DMA configuration, aborting\n");
16613 goto err_out_apeunmap;
16617 tg3_init_bufmgr_config(tp);
16619 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16621 /* 5700 B0 chips do not support checksumming correctly due
16622 * to hardware bugs.
16624 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16625 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16627 if (tg3_flag(tp, 5755_PLUS))
16628 features |= NETIF_F_IPV6_CSUM;
16631 /* TSO is on by default on chips that support hardware TSO.
16632 * Firmware TSO on older chips gives lower performance, so it
16633 * is off by default, but can be enabled using ethtool.
16635 if ((tg3_flag(tp, HW_TSO_1) ||
16636 tg3_flag(tp, HW_TSO_2) ||
16637 tg3_flag(tp, HW_TSO_3)) &&
16638 (features & NETIF_F_IP_CSUM))
16639 features |= NETIF_F_TSO;
16640 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16641 if (features & NETIF_F_IPV6_CSUM)
16642 features |= NETIF_F_TSO6;
16643 if (tg3_flag(tp, HW_TSO_3) ||
16644 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16645 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16646 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16647 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16648 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16649 features |= NETIF_F_TSO_ECN;
16652 dev->features |= features;
16653 dev->vlan_features |= features;
16656 * Add loopback capability only for a subset of devices that support
16657 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16658 * loopback for the remaining devices.
16660 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16661 !tg3_flag(tp, CPMU_PRESENT))
16662 /* Add the loopback capability */
16663 features |= NETIF_F_LOOPBACK;
16665 dev->hw_features |= features;
16667 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16668 !tg3_flag(tp, TSO_CAPABLE) &&
16669 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16670 tg3_flag_set(tp, MAX_RXPEND_64);
16671 tp->rx_pending = 63;
16674 err = tg3_get_device_address(tp);
16675 if (err) {
16676 dev_err(&pdev->dev,
16677 "Could not obtain valid ethernet address, aborting\n");
16678 goto err_out_apeunmap;
16682 * Reset chip in case UNDI or EFI driver did not shutdown
16683 * DMA self test will enable WDMAC and we'll see (spurious)
16684 * pending DMA on the PCI bus at that point.
16686 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16687 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16688 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16689 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16692 err = tg3_test_dma(tp);
16693 if (err) {
16694 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16695 goto err_out_apeunmap;
16698 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16699 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16700 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16701 for (i = 0; i < tp->irq_max; i++) {
16702 struct tg3_napi *tnapi = &tp->napi[i];
16704 tnapi->tp = tp;
16705 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16707 tnapi->int_mbox = intmbx;
16708 if (i <= 4)
16709 intmbx += 0x8;
16710 else
16711 intmbx += 0x4;
16713 tnapi->consmbox = rcvmbx;
16714 tnapi->prodmbox = sndmbx;
16716 if (i)
16717 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16718 else
16719 tnapi->coal_now = HOSTCC_MODE_NOW;
16721 if (!tg3_flag(tp, SUPPORT_MSIX))
16722 break;
16725 * If we support MSIX, we'll be using RSS. If we're using
16726 * RSS, the first vector only handles link interrupts and the
16727 * remaining vectors handle rx and tx interrupts. Reuse the
16728 * mailbox values for the next iteration. The values we setup
16729 * above are still useful for the single vectored mode.
16731 if (!i)
16732 continue;
16734 rcvmbx += 0x8;
16736 if (sndmbx & 0x4)
16737 sndmbx -= 0x4;
16738 else
16739 sndmbx += 0xc;
16742 tg3_init_coal(tp);
16744 pci_set_drvdata(pdev, dev);
16746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16747 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
16748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
16749 tg3_flag_set(tp, PTP_CAPABLE);
16751 if (tg3_flag(tp, 5717_PLUS)) {
16752 /* Resume a low-power mode */
16753 tg3_frob_aux_power(tp, false);
16756 tg3_timer_init(tp);
16758 err = register_netdev(dev);
16759 if (err) {
16760 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16761 goto err_out_apeunmap;
16764 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16765 tp->board_part_number,
16766 tp->pci_chip_rev_id,
16767 tg3_bus_string(tp, str),
16768 dev->dev_addr);
16770 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16771 struct phy_device *phydev;
16772 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16773 netdev_info(dev,
16774 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16775 phydev->drv->name, dev_name(&phydev->dev));
16776 } else {
16777 char *ethtype;
16779 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16780 ethtype = "10/100Base-TX";
16781 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16782 ethtype = "1000Base-SX";
16783 else
16784 ethtype = "10/100/1000Base-T";
16786 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16787 "(WireSpeed[%d], EEE[%d])\n",
16788 tg3_phy_string(tp), ethtype,
16789 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16790 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16793 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16794 (dev->features & NETIF_F_RXCSUM) != 0,
16795 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16796 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16797 tg3_flag(tp, ENABLE_ASF) != 0,
16798 tg3_flag(tp, TSO_CAPABLE) != 0);
16799 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16800 tp->dma_rwctrl,
16801 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16802 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16804 pci_save_state(pdev);
16806 return 0;
16808 err_out_apeunmap:
16809 if (tp->aperegs) {
16810 iounmap(tp->aperegs);
16811 tp->aperegs = NULL;
16814 err_out_iounmap:
16815 if (tp->regs) {
16816 iounmap(tp->regs);
16817 tp->regs = NULL;
16820 err_out_free_dev:
16821 free_netdev(dev);
16823 err_out_power_down:
16824 pci_set_power_state(pdev, PCI_D3hot);
16826 err_out_free_res:
16827 pci_release_regions(pdev);
16829 err_out_disable_pdev:
16830 pci_disable_device(pdev);
16831 pci_set_drvdata(pdev, NULL);
16832 return err;
16835 static void tg3_remove_one(struct pci_dev *pdev)
16837 struct net_device *dev = pci_get_drvdata(pdev);
16839 if (dev) {
16840 struct tg3 *tp = netdev_priv(dev);
16842 release_firmware(tp->fw);
16844 tg3_reset_task_cancel(tp);
16846 if (tg3_flag(tp, USE_PHYLIB)) {
16847 tg3_phy_fini(tp);
16848 tg3_mdio_fini(tp);
16851 unregister_netdev(dev);
16852 if (tp->aperegs) {
16853 iounmap(tp->aperegs);
16854 tp->aperegs = NULL;
16856 if (tp->regs) {
16857 iounmap(tp->regs);
16858 tp->regs = NULL;
16860 free_netdev(dev);
16861 pci_release_regions(pdev);
16862 pci_disable_device(pdev);
16863 pci_set_drvdata(pdev, NULL);
16867 #ifdef CONFIG_PM_SLEEP
16868 static int tg3_suspend(struct device *device)
16870 struct pci_dev *pdev = to_pci_dev(device);
16871 struct net_device *dev = pci_get_drvdata(pdev);
16872 struct tg3 *tp = netdev_priv(dev);
16873 int err;
16875 if (!netif_running(dev))
16876 return 0;
16878 tg3_reset_task_cancel(tp);
16879 tg3_phy_stop(tp);
16880 tg3_netif_stop(tp);
16882 tg3_timer_stop(tp);
16884 tg3_full_lock(tp, 1);
16885 tg3_disable_ints(tp);
16886 tg3_full_unlock(tp);
16888 netif_device_detach(dev);
16890 tg3_full_lock(tp, 0);
16891 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16892 tg3_flag_clear(tp, INIT_COMPLETE);
16893 tg3_full_unlock(tp);
16895 err = tg3_power_down_prepare(tp);
16896 if (err) {
16897 int err2;
16899 tg3_full_lock(tp, 0);
16901 tg3_flag_set(tp, INIT_COMPLETE);
16902 err2 = tg3_restart_hw(tp, 1);
16903 if (err2)
16904 goto out;
16906 tg3_timer_start(tp);
16908 netif_device_attach(dev);
16909 tg3_netif_start(tp);
16911 out:
16912 tg3_full_unlock(tp);
16914 if (!err2)
16915 tg3_phy_start(tp);
16918 return err;
16921 static int tg3_resume(struct device *device)
16923 struct pci_dev *pdev = to_pci_dev(device);
16924 struct net_device *dev = pci_get_drvdata(pdev);
16925 struct tg3 *tp = netdev_priv(dev);
16926 int err;
16928 if (!netif_running(dev))
16929 return 0;
16931 netif_device_attach(dev);
16933 tg3_full_lock(tp, 0);
16935 tg3_flag_set(tp, INIT_COMPLETE);
16936 err = tg3_restart_hw(tp, 1);
16937 if (err)
16938 goto out;
16940 tg3_timer_start(tp);
16942 tg3_netif_start(tp);
16944 out:
16945 tg3_full_unlock(tp);
16947 if (!err)
16948 tg3_phy_start(tp);
16950 return err;
16953 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16954 #define TG3_PM_OPS (&tg3_pm_ops)
16956 #else
16958 #define TG3_PM_OPS NULL
16960 #endif /* CONFIG_PM_SLEEP */
16963 * tg3_io_error_detected - called when PCI error is detected
16964 * @pdev: Pointer to PCI device
16965 * @state: The current pci connection state
16967 * This function is called after a PCI bus error affecting
16968 * this device has been detected.
16970 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16971 pci_channel_state_t state)
16973 struct net_device *netdev = pci_get_drvdata(pdev);
16974 struct tg3 *tp = netdev_priv(netdev);
16975 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16977 netdev_info(netdev, "PCI I/O error detected\n");
16979 rtnl_lock();
16981 if (!netif_running(netdev))
16982 goto done;
16984 tg3_phy_stop(tp);
16986 tg3_netif_stop(tp);
16988 tg3_timer_stop(tp);
16990 /* Want to make sure that the reset task doesn't run */
16991 tg3_reset_task_cancel(tp);
16993 netif_device_detach(netdev);
16995 /* Clean up software state, even if MMIO is blocked */
16996 tg3_full_lock(tp, 0);
16997 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16998 tg3_full_unlock(tp);
17000 done:
17001 if (state == pci_channel_io_perm_failure)
17002 err = PCI_ERS_RESULT_DISCONNECT;
17003 else
17004 pci_disable_device(pdev);
17006 rtnl_unlock();
17008 return err;
17012 * tg3_io_slot_reset - called after the pci bus has been reset.
17013 * @pdev: Pointer to PCI device
17015 * Restart the card from scratch, as if from a cold-boot.
17016 * At this point, the card has exprienced a hard reset,
17017 * followed by fixups by BIOS, and has its config space
17018 * set up identically to what it was at cold boot.
17020 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17022 struct net_device *netdev = pci_get_drvdata(pdev);
17023 struct tg3 *tp = netdev_priv(netdev);
17024 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17025 int err;
17027 rtnl_lock();
17029 if (pci_enable_device(pdev)) {
17030 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17031 goto done;
17034 pci_set_master(pdev);
17035 pci_restore_state(pdev);
17036 pci_save_state(pdev);
17038 if (!netif_running(netdev)) {
17039 rc = PCI_ERS_RESULT_RECOVERED;
17040 goto done;
17043 err = tg3_power_up(tp);
17044 if (err)
17045 goto done;
17047 rc = PCI_ERS_RESULT_RECOVERED;
17049 done:
17050 rtnl_unlock();
17052 return rc;
17056 * tg3_io_resume - called when traffic can start flowing again.
17057 * @pdev: Pointer to PCI device
17059 * This callback is called when the error recovery driver tells
17060 * us that its OK to resume normal operation.
17062 static void tg3_io_resume(struct pci_dev *pdev)
17064 struct net_device *netdev = pci_get_drvdata(pdev);
17065 struct tg3 *tp = netdev_priv(netdev);
17066 int err;
17068 rtnl_lock();
17070 if (!netif_running(netdev))
17071 goto done;
17073 tg3_full_lock(tp, 0);
17074 tg3_flag_set(tp, INIT_COMPLETE);
17075 err = tg3_restart_hw(tp, 1);
17076 if (err) {
17077 tg3_full_unlock(tp);
17078 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17079 goto done;
17082 netif_device_attach(netdev);
17084 tg3_timer_start(tp);
17086 tg3_netif_start(tp);
17088 tg3_full_unlock(tp);
17090 tg3_phy_start(tp);
17092 done:
17093 rtnl_unlock();
17096 static const struct pci_error_handlers tg3_err_handler = {
17097 .error_detected = tg3_io_error_detected,
17098 .slot_reset = tg3_io_slot_reset,
17099 .resume = tg3_io_resume
17102 static struct pci_driver tg3_driver = {
17103 .name = DRV_MODULE_NAME,
17104 .id_table = tg3_pci_tbl,
17105 .probe = tg3_init_one,
17106 .remove = tg3_remove_one,
17107 .err_handler = &tg3_err_handler,
17108 .driver.pm = TG3_PM_OPS,
17111 static int __init tg3_init(void)
17113 return pci_register_driver(&tg3_driver);
17116 static void __exit tg3_cleanup(void)
17118 pci_unregister_driver(&tg3_driver);
17121 module_init(tg3_init);
17122 module_exit(tg3_cleanup);