tg3: Add Macronix NVRAM support
[linux-2.6/btrfs-unstable.git] / drivers / net / ethernet / broadcom / tg3.c
bloba77ee2f8fb8d223cc121a36a2ffb703cc32a02d9
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
10 * Firmware is:
11 * Derived from proprietary unpublished source code,
12 * Copyright (C) 2000-2016 Broadcom Corporation.
13 * Copyright (C) 2016-2017 Broadcom Ltd.
15 * Permission is hereby granted for the distribution of this firmware
16 * data in hexadecimal or equivalent format, provided this copyright
17 * notice is accompanying it.
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/stringify.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/types.h>
27 #include <linux/compiler.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/in.h>
31 #include <linux/interrupt.h>
32 #include <linux/ioport.h>
33 #include <linux/pci.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/mdio.h>
39 #include <linux/mii.h>
40 #include <linux/phy.h>
41 #include <linux/brcmphy.h>
42 #include <linux/if.h>
43 #include <linux/if_vlan.h>
44 #include <linux/ip.h>
45 #include <linux/tcp.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/firmware.h>
50 #include <linux/ssb/ssb_driver_gige.h>
51 #include <linux/hwmon.h>
52 #include <linux/hwmon-sysfs.h>
54 #include <net/checksum.h>
55 #include <net/ip.h>
57 #include <linux/io.h>
58 #include <asm/byteorder.h>
59 #include <linux/uaccess.h>
61 #include <uapi/linux/net_tstamp.h>
62 #include <linux/ptp_clock_kernel.h>
64 #ifdef CONFIG_SPARC
65 #include <asm/idprom.h>
66 #include <asm/prom.h>
67 #endif
69 #define BAR_0 0
70 #define BAR_2 2
72 #include "tg3.h"
74 /* Functions & macros to verify TG3_FLAGS types */
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 return test_bit(flag, bits);
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 set_bit(flag, bits);
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 clear_bit(flag, bits);
91 #define tg3_flag(tp, flag) \
92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag) \
94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag) \
96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define DRV_MODULE_NAME "tg3"
99 #define TG3_MAJ_NUM 3
100 #define TG3_MIN_NUM 137
101 #define DRV_MODULE_VERSION \
102 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE "May 11, 2014"
105 #define RESET_KIND_SHUTDOWN 0
106 #define RESET_KIND_INIT 1
107 #define RESET_KIND_SUSPEND 2
109 #define TG3_DEF_RX_MODE 0
110 #define TG3_DEF_TX_MODE 0
111 #define TG3_DEF_MSG_ENABLE \
112 (NETIF_MSG_DRV | \
113 NETIF_MSG_PROBE | \
114 NETIF_MSG_LINK | \
115 NETIF_MSG_TIMER | \
116 NETIF_MSG_IFDOWN | \
117 NETIF_MSG_IFUP | \
118 NETIF_MSG_RX_ERR | \
119 NETIF_MSG_TX_ERR)
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
123 /* length of time before we decide the hardware is borked,
124 * and dev->tx_timeout() should be called to fix the problem
127 #define TG3_TX_TIMEOUT (5 * HZ)
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135 * You can't change the ring sizes, but you can change where you place
136 * them in the NIC onboard memory.
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING 200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
147 /* Do not place this n-ring entries value into the tp struct itself,
148 * we really want to expose these constants to GCC so that modulo et
149 * al. operations are done with shifts and masks instead of with
150 * hw multiply/modulo instructions. Another solution would be to
151 * replace things like '% foo' with '& (foo - 1)'.
154 #define TG3_TX_RING_SIZE 512
155 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
164 TG3_TX_RING_SIZE)
165 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
167 #define TG3_DMA_BYTE_ENAB 64
169 #define TG3_RX_STD_DMA_SZ 1536
170 #define TG3_RX_JMB_DMA_SZ 9046
172 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
174 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184 * that are at least dword aligned when used in PCIX mode. The driver
185 * works around this bug by double copying the packet. This workaround
186 * is built into the normal double copy length check for efficiency.
188 * However, the double copy is only necessary on those architectures
189 * where unaligned memory accesses are inefficient. For those architectures
190 * where unaligned memory accesses incur little penalty, we can reintegrate
191 * the 5701 in the normal rx path. Doing so saves a device structure
192 * dereference by hardcoding the double copy threshold in place.
194 #define TG3_RX_COPY_THRESHOLD 256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
197 #else
198 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
199 #endif
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
205 #endif
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K 2048
210 #define TG3_TX_BD_DMA_MAX_4K 4096
212 #define TG3_RAW_IP_ALIGN 2
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
217 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
218 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
220 #define FIRMWARE_TG3 "tigon/tg3.bin"
221 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
225 static char version[] =
226 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
236 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
243 static const struct pci_device_id tg3_pci_tbl[] = {
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271 TG3_DRV_DATA_FLAG_5705_10_100},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293 PCI_VENDOR_ID_LENOVO,
294 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
364 static const struct {
365 const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
367 { "rx_octets" },
368 { "rx_fragments" },
369 { "rx_ucast_packets" },
370 { "rx_mcast_packets" },
371 { "rx_bcast_packets" },
372 { "rx_fcs_errors" },
373 { "rx_align_errors" },
374 { "rx_xon_pause_rcvd" },
375 { "rx_xoff_pause_rcvd" },
376 { "rx_mac_ctrl_rcvd" },
377 { "rx_xoff_entered" },
378 { "rx_frame_too_long_errors" },
379 { "rx_jabbers" },
380 { "rx_undersize_packets" },
381 { "rx_in_length_errors" },
382 { "rx_out_length_errors" },
383 { "rx_64_or_less_octet_packets" },
384 { "rx_65_to_127_octet_packets" },
385 { "rx_128_to_255_octet_packets" },
386 { "rx_256_to_511_octet_packets" },
387 { "rx_512_to_1023_octet_packets" },
388 { "rx_1024_to_1522_octet_packets" },
389 { "rx_1523_to_2047_octet_packets" },
390 { "rx_2048_to_4095_octet_packets" },
391 { "rx_4096_to_8191_octet_packets" },
392 { "rx_8192_to_9022_octet_packets" },
394 { "tx_octets" },
395 { "tx_collisions" },
397 { "tx_xon_sent" },
398 { "tx_xoff_sent" },
399 { "tx_flow_control" },
400 { "tx_mac_errors" },
401 { "tx_single_collisions" },
402 { "tx_mult_collisions" },
403 { "tx_deferred" },
404 { "tx_excessive_collisions" },
405 { "tx_late_collisions" },
406 { "tx_collide_2times" },
407 { "tx_collide_3times" },
408 { "tx_collide_4times" },
409 { "tx_collide_5times" },
410 { "tx_collide_6times" },
411 { "tx_collide_7times" },
412 { "tx_collide_8times" },
413 { "tx_collide_9times" },
414 { "tx_collide_10times" },
415 { "tx_collide_11times" },
416 { "tx_collide_12times" },
417 { "tx_collide_13times" },
418 { "tx_collide_14times" },
419 { "tx_collide_15times" },
420 { "tx_ucast_packets" },
421 { "tx_mcast_packets" },
422 { "tx_bcast_packets" },
423 { "tx_carrier_sense_errors" },
424 { "tx_discards" },
425 { "tx_errors" },
427 { "dma_writeq_full" },
428 { "dma_write_prioq_full" },
429 { "rxbds_empty" },
430 { "rx_discards" },
431 { "rx_errors" },
432 { "rx_threshold_hit" },
434 { "dma_readq_full" },
435 { "dma_read_prioq_full" },
436 { "tx_comp_queue_full" },
438 { "ring_set_send_prod_index" },
439 { "ring_status_update" },
440 { "nic_irqs" },
441 { "nic_avoided_irqs" },
442 { "nic_tx_threshold_hit" },
444 { "mbuf_lwm_thresh_hit" },
447 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST 0
449 #define TG3_LINK_TEST 1
450 #define TG3_REGISTER_TEST 2
451 #define TG3_MEMORY_TEST 3
452 #define TG3_MAC_LOOPB_TEST 4
453 #define TG3_PHY_LOOPB_TEST 5
454 #define TG3_EXT_LOOPB_TEST 6
455 #define TG3_INTERRUPT_TEST 7
458 static const struct {
459 const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461 [TG3_NVRAM_TEST] = { "nvram test (online) " },
462 [TG3_LINK_TEST] = { "link test (online) " },
463 [TG3_REGISTER_TEST] = { "register test (offline)" },
464 [TG3_MEMORY_TEST] = { "memory test (offline)" },
465 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
466 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
467 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
468 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
471 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
476 writel(val, tp->regs + off);
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
481 return readl(tp->regs + off);
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
486 writel(val, tp->aperegs + off);
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
491 return readl(tp->aperegs + off);
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
496 unsigned long flags;
498 spin_lock_irqsave(&tp->indirect_lock, flags);
499 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501 spin_unlock_irqrestore(&tp->indirect_lock, flags);
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
506 writel(val, tp->regs + off);
507 readl(tp->regs + off);
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
512 unsigned long flags;
513 u32 val;
515 spin_lock_irqsave(&tp->indirect_lock, flags);
516 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518 spin_unlock_irqrestore(&tp->indirect_lock, flags);
519 return val;
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
524 unsigned long flags;
526 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528 TG3_64BIT_REG_LOW, val);
529 return;
531 if (off == TG3_RX_STD_PROD_IDX_REG) {
532 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533 TG3_64BIT_REG_LOW, val);
534 return;
537 spin_lock_irqsave(&tp->indirect_lock, flags);
538 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540 spin_unlock_irqrestore(&tp->indirect_lock, flags);
542 /* In indirect mode when disabling interrupts, we also need
543 * to clear the interrupt bit in the GRC local ctrl register.
545 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
546 (val == 0x1)) {
547 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
554 unsigned long flags;
555 u32 val;
557 spin_lock_irqsave(&tp->indirect_lock, flags);
558 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560 spin_unlock_irqrestore(&tp->indirect_lock, flags);
561 return val;
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565 * where it is unsafe to read back the register without some delay.
566 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
571 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572 /* Non-posted methods */
573 tp->write32(tp, off, val);
574 else {
575 /* Posted method */
576 tg3_write32(tp, off, val);
577 if (usec_wait)
578 udelay(usec_wait);
579 tp->read32(tp, off);
581 /* Wait again after the read for the posted method to guarantee that
582 * the wait time is met.
584 if (usec_wait)
585 udelay(usec_wait);
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
590 tp->write32_mbox(tp, off, val);
591 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593 !tg3_flag(tp, ICH_WORKAROUND)))
594 tp->read32_mbox(tp, off);
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
599 void __iomem *mbox = tp->regs + off;
600 writel(val, mbox);
601 if (tg3_flag(tp, TXD_MBOX_HWBUG))
602 writel(val, mbox);
603 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604 tg3_flag(tp, FLUSH_POSTED_WRITES))
605 readl(mbox);
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
610 return readl(tp->regs + off + GRCMBOX_BASE);
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
615 writel(val, tp->regs + off + GRCMBOX_BASE);
618 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
624 #define tw32(reg, val) tp->write32(tp, reg, val)
625 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg) tp->read32(tp, reg)
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
631 unsigned long flags;
633 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
635 return;
637 spin_lock_irqsave(&tp->indirect_lock, flags);
638 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
642 /* Always leave this as zero. */
643 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
644 } else {
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646 tw32_f(TG3PCI_MEM_WIN_DATA, val);
648 /* Always leave this as zero. */
649 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
651 spin_unlock_irqrestore(&tp->indirect_lock, flags);
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
656 unsigned long flags;
658 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660 *val = 0;
661 return;
664 spin_lock_irqsave(&tp->indirect_lock, flags);
665 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
669 /* Always leave this as zero. */
670 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
671 } else {
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673 *val = tr32(TG3PCI_MEM_WIN_DATA);
675 /* Always leave this as zero. */
676 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
678 spin_unlock_irqrestore(&tp->indirect_lock, flags);
681 static void tg3_ape_lock_init(struct tg3 *tp)
683 int i;
684 u32 regbase, bit;
686 if (tg3_asic_rev(tp) == ASIC_REV_5761)
687 regbase = TG3_APE_LOCK_GRANT;
688 else
689 regbase = TG3_APE_PER_LOCK_GRANT;
691 /* Make sure the driver hasn't any stale locks. */
692 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
693 switch (i) {
694 case TG3_APE_LOCK_PHY0:
695 case TG3_APE_LOCK_PHY1:
696 case TG3_APE_LOCK_PHY2:
697 case TG3_APE_LOCK_PHY3:
698 bit = APE_LOCK_GRANT_DRIVER;
699 break;
700 default:
701 if (!tp->pci_fn)
702 bit = APE_LOCK_GRANT_DRIVER;
703 else
704 bit = 1 << tp->pci_fn;
706 tg3_ape_write32(tp, regbase + 4 * i, bit);
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
713 int i, off;
714 int ret = 0;
715 u32 status, req, gnt, bit;
717 if (!tg3_flag(tp, ENABLE_APE))
718 return 0;
720 switch (locknum) {
721 case TG3_APE_LOCK_GPIO:
722 if (tg3_asic_rev(tp) == ASIC_REV_5761)
723 return 0;
724 case TG3_APE_LOCK_GRC:
725 case TG3_APE_LOCK_MEM:
726 if (!tp->pci_fn)
727 bit = APE_LOCK_REQ_DRIVER;
728 else
729 bit = 1 << tp->pci_fn;
730 break;
731 case TG3_APE_LOCK_PHY0:
732 case TG3_APE_LOCK_PHY1:
733 case TG3_APE_LOCK_PHY2:
734 case TG3_APE_LOCK_PHY3:
735 bit = APE_LOCK_REQ_DRIVER;
736 break;
737 default:
738 return -EINVAL;
741 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
742 req = TG3_APE_LOCK_REQ;
743 gnt = TG3_APE_LOCK_GRANT;
744 } else {
745 req = TG3_APE_PER_LOCK_REQ;
746 gnt = TG3_APE_PER_LOCK_GRANT;
749 off = 4 * locknum;
751 tg3_ape_write32(tp, req + off, bit);
753 /* Wait for up to 1 millisecond to acquire lock. */
754 for (i = 0; i < 100; i++) {
755 status = tg3_ape_read32(tp, gnt + off);
756 if (status == bit)
757 break;
758 if (pci_channel_offline(tp->pdev))
759 break;
761 udelay(10);
764 if (status != bit) {
765 /* Revoke the lock request. */
766 tg3_ape_write32(tp, gnt + off, bit);
767 ret = -EBUSY;
770 return ret;
773 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
775 u32 gnt, bit;
777 if (!tg3_flag(tp, ENABLE_APE))
778 return;
780 switch (locknum) {
781 case TG3_APE_LOCK_GPIO:
782 if (tg3_asic_rev(tp) == ASIC_REV_5761)
783 return;
784 case TG3_APE_LOCK_GRC:
785 case TG3_APE_LOCK_MEM:
786 if (!tp->pci_fn)
787 bit = APE_LOCK_GRANT_DRIVER;
788 else
789 bit = 1 << tp->pci_fn;
790 break;
791 case TG3_APE_LOCK_PHY0:
792 case TG3_APE_LOCK_PHY1:
793 case TG3_APE_LOCK_PHY2:
794 case TG3_APE_LOCK_PHY3:
795 bit = APE_LOCK_GRANT_DRIVER;
796 break;
797 default:
798 return;
801 if (tg3_asic_rev(tp) == ASIC_REV_5761)
802 gnt = TG3_APE_LOCK_GRANT;
803 else
804 gnt = TG3_APE_PER_LOCK_GRANT;
806 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
809 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
811 u32 apedata;
813 while (timeout_us) {
814 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
815 return -EBUSY;
817 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
818 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
819 break;
821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
823 udelay(10);
824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
827 return timeout_us ? 0 : -EBUSY;
830 #ifdef CONFIG_TIGON3_HWMON
831 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
833 u32 i, apedata;
835 for (i = 0; i < timeout_us / 10; i++) {
836 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
838 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
839 break;
841 udelay(10);
844 return i == timeout_us / 10;
847 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
848 u32 len)
850 int err;
851 u32 i, bufoff, msgoff, maxlen, apedata;
853 if (!tg3_flag(tp, APE_HAS_NCSI))
854 return 0;
856 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
857 if (apedata != APE_SEG_SIG_MAGIC)
858 return -ENODEV;
860 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861 if (!(apedata & APE_FW_STATUS_READY))
862 return -EAGAIN;
864 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
865 TG3_APE_SHMEM_BASE;
866 msgoff = bufoff + 2 * sizeof(u32);
867 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
869 while (len) {
870 u32 length;
872 /* Cap xfer sizes to scratchpad limits. */
873 length = (len > maxlen) ? maxlen : len;
874 len -= length;
876 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
877 if (!(apedata & APE_FW_STATUS_READY))
878 return -EAGAIN;
880 /* Wait for up to 1 msec for APE to service previous event. */
881 err = tg3_ape_event_lock(tp, 1000);
882 if (err)
883 return err;
885 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
886 APE_EVENT_STATUS_SCRTCHPD_READ |
887 APE_EVENT_STATUS_EVENT_PENDING;
888 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
890 tg3_ape_write32(tp, bufoff, base_off);
891 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
893 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
894 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
896 base_off += length;
898 if (tg3_ape_wait_for_event(tp, 30000))
899 return -EAGAIN;
901 for (i = 0; length; i += 4, length -= 4) {
902 u32 val = tg3_ape_read32(tp, msgoff + i);
903 memcpy(data, &val, sizeof(u32));
904 data++;
908 return 0;
910 #endif
912 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
914 int err;
915 u32 apedata;
917 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
918 if (apedata != APE_SEG_SIG_MAGIC)
919 return -EAGAIN;
921 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
922 if (!(apedata & APE_FW_STATUS_READY))
923 return -EAGAIN;
925 /* Wait for up to 1 millisecond for APE to service previous event. */
926 err = tg3_ape_event_lock(tp, 1000);
927 if (err)
928 return err;
930 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
931 event | APE_EVENT_STATUS_EVENT_PENDING);
933 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
934 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
936 return 0;
939 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
941 u32 event;
942 u32 apedata;
944 if (!tg3_flag(tp, ENABLE_APE))
945 return;
947 switch (kind) {
948 case RESET_KIND_INIT:
949 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
950 APE_HOST_SEG_SIG_MAGIC);
951 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
952 APE_HOST_SEG_LEN_MAGIC);
953 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
954 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
955 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
956 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
957 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
958 APE_HOST_BEHAV_NO_PHYLOCK);
959 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
960 TG3_APE_HOST_DRVR_STATE_START);
962 event = APE_EVENT_STATUS_STATE_START;
963 break;
964 case RESET_KIND_SHUTDOWN:
965 /* With the interface we are currently using,
966 * APE does not track driver state. Wiping
967 * out the HOST SEGMENT SIGNATURE forces
968 * the APE to assume OS absent status.
970 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
972 if (device_may_wakeup(&tp->pdev->dev) &&
973 tg3_flag(tp, WOL_ENABLE)) {
974 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
975 TG3_APE_HOST_WOL_SPEED_AUTO);
976 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
977 } else
978 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
980 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
982 event = APE_EVENT_STATUS_STATE_UNLOAD;
983 break;
984 default:
985 return;
988 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
990 tg3_ape_send_event(tp, event);
993 static void tg3_disable_ints(struct tg3 *tp)
995 int i;
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
999 for (i = 0; i < tp->irq_max; i++)
1000 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1003 static void tg3_enable_ints(struct tg3 *tp)
1005 int i;
1007 tp->irq_sync = 0;
1008 wmb();
1010 tw32(TG3PCI_MISC_HOST_CTRL,
1011 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1013 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1014 for (i = 0; i < tp->irq_cnt; i++) {
1015 struct tg3_napi *tnapi = &tp->napi[i];
1017 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1018 if (tg3_flag(tp, 1SHOT_MSI))
1019 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1021 tp->coal_now |= tnapi->coal_now;
1024 /* Force an initial interrupt */
1025 if (!tg3_flag(tp, TAGGED_STATUS) &&
1026 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1027 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1028 else
1029 tw32(HOSTCC_MODE, tp->coal_now);
1031 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1034 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1036 struct tg3 *tp = tnapi->tp;
1037 struct tg3_hw_status *sblk = tnapi->hw_status;
1038 unsigned int work_exists = 0;
1040 /* check for phy events */
1041 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1042 if (sblk->status & SD_STATUS_LINK_CHG)
1043 work_exists = 1;
1046 /* check for TX work to do */
1047 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1048 work_exists = 1;
1050 /* check for RX work to do */
1051 if (tnapi->rx_rcb_prod_idx &&
1052 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1053 work_exists = 1;
1055 return work_exists;
1058 /* tg3_int_reenable
1059 * similar to tg3_enable_ints, but it accurately determines whether there
1060 * is new work pending and can return without flushing the PIO write
1061 * which reenables interrupts
1063 static void tg3_int_reenable(struct tg3_napi *tnapi)
1065 struct tg3 *tp = tnapi->tp;
1067 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1068 mmiowb();
1070 /* When doing tagged status, this work check is unnecessary.
1071 * The last_tag we write above tells the chip which piece of
1072 * work we've completed.
1074 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1075 tw32(HOSTCC_MODE, tp->coalesce_mode |
1076 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1079 static void tg3_switch_clocks(struct tg3 *tp)
1081 u32 clock_ctrl;
1082 u32 orig_clock_ctrl;
1084 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1085 return;
1087 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1089 orig_clock_ctrl = clock_ctrl;
1090 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1091 CLOCK_CTRL_CLKRUN_OENABLE |
1092 0x1f);
1093 tp->pci_clock_ctrl = clock_ctrl;
1095 if (tg3_flag(tp, 5705_PLUS)) {
1096 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1097 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1098 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1100 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1101 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1102 clock_ctrl |
1103 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1104 40);
1105 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1107 40);
1109 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1112 #define PHY_BUSY_LOOPS 5000
1114 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1115 u32 *val)
1117 u32 frame_val;
1118 unsigned int loops;
1119 int ret;
1121 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1122 tw32_f(MAC_MI_MODE,
1123 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1124 udelay(80);
1127 tg3_ape_lock(tp, tp->phy_ape_lock);
1129 *val = 0x0;
1131 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1132 MI_COM_PHY_ADDR_MASK);
1133 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1134 MI_COM_REG_ADDR_MASK);
1135 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1137 tw32_f(MAC_MI_COM, frame_val);
1139 loops = PHY_BUSY_LOOPS;
1140 while (loops != 0) {
1141 udelay(10);
1142 frame_val = tr32(MAC_MI_COM);
1144 if ((frame_val & MI_COM_BUSY) == 0) {
1145 udelay(5);
1146 frame_val = tr32(MAC_MI_COM);
1147 break;
1149 loops -= 1;
1152 ret = -EBUSY;
1153 if (loops != 0) {
1154 *val = frame_val & MI_COM_DATA_MASK;
1155 ret = 0;
1158 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1159 tw32_f(MAC_MI_MODE, tp->mi_mode);
1160 udelay(80);
1163 tg3_ape_unlock(tp, tp->phy_ape_lock);
1165 return ret;
1168 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1170 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1173 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1174 u32 val)
1176 u32 frame_val;
1177 unsigned int loops;
1178 int ret;
1180 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1181 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1182 return 0;
1184 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1185 tw32_f(MAC_MI_MODE,
1186 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1187 udelay(80);
1190 tg3_ape_lock(tp, tp->phy_ape_lock);
1192 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1193 MI_COM_PHY_ADDR_MASK);
1194 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1195 MI_COM_REG_ADDR_MASK);
1196 frame_val |= (val & MI_COM_DATA_MASK);
1197 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1199 tw32_f(MAC_MI_COM, frame_val);
1201 loops = PHY_BUSY_LOOPS;
1202 while (loops != 0) {
1203 udelay(10);
1204 frame_val = tr32(MAC_MI_COM);
1205 if ((frame_val & MI_COM_BUSY) == 0) {
1206 udelay(5);
1207 frame_val = tr32(MAC_MI_COM);
1208 break;
1210 loops -= 1;
1213 ret = -EBUSY;
1214 if (loops != 0)
1215 ret = 0;
1217 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1218 tw32_f(MAC_MI_MODE, tp->mi_mode);
1219 udelay(80);
1222 tg3_ape_unlock(tp, tp->phy_ape_lock);
1224 return ret;
1227 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1229 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1232 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1234 int err;
1236 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1237 if (err)
1238 goto done;
1240 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1241 if (err)
1242 goto done;
1244 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1245 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1246 if (err)
1247 goto done;
1249 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1251 done:
1252 return err;
1255 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1257 int err;
1259 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1260 if (err)
1261 goto done;
1263 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1264 if (err)
1265 goto done;
1267 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1268 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1269 if (err)
1270 goto done;
1272 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1274 done:
1275 return err;
1278 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1280 int err;
1282 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1283 if (!err)
1284 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1286 return err;
1289 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1291 int err;
1293 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1294 if (!err)
1295 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1297 return err;
1300 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1302 int err;
1304 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1305 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1306 MII_TG3_AUXCTL_SHDWSEL_MISC);
1307 if (!err)
1308 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1310 return err;
1313 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1315 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1316 set |= MII_TG3_AUXCTL_MISC_WREN;
1318 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1321 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1323 u32 val;
1324 int err;
1326 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1328 if (err)
1329 return err;
1331 if (enable)
1332 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1333 else
1334 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1337 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1339 return err;
1342 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1344 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1345 reg | val | MII_TG3_MISC_SHDW_WREN);
1348 static int tg3_bmcr_reset(struct tg3 *tp)
1350 u32 phy_control;
1351 int limit, err;
1353 /* OK, reset it, and poll the BMCR_RESET bit until it
1354 * clears or we time out.
1356 phy_control = BMCR_RESET;
1357 err = tg3_writephy(tp, MII_BMCR, phy_control);
1358 if (err != 0)
1359 return -EBUSY;
1361 limit = 5000;
1362 while (limit--) {
1363 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1364 if (err != 0)
1365 return -EBUSY;
1367 if ((phy_control & BMCR_RESET) == 0) {
1368 udelay(40);
1369 break;
1371 udelay(10);
1373 if (limit < 0)
1374 return -EBUSY;
1376 return 0;
1379 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1381 struct tg3 *tp = bp->priv;
1382 u32 val;
1384 spin_lock_bh(&tp->lock);
1386 if (__tg3_readphy(tp, mii_id, reg, &val))
1387 val = -EIO;
1389 spin_unlock_bh(&tp->lock);
1391 return val;
1394 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1396 struct tg3 *tp = bp->priv;
1397 u32 ret = 0;
1399 spin_lock_bh(&tp->lock);
1401 if (__tg3_writephy(tp, mii_id, reg, val))
1402 ret = -EIO;
1404 spin_unlock_bh(&tp->lock);
1406 return ret;
1409 static void tg3_mdio_config_5785(struct tg3 *tp)
1411 u32 val;
1412 struct phy_device *phydev;
1414 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1415 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1416 case PHY_ID_BCM50610:
1417 case PHY_ID_BCM50610M:
1418 val = MAC_PHYCFG2_50610_LED_MODES;
1419 break;
1420 case PHY_ID_BCMAC131:
1421 val = MAC_PHYCFG2_AC131_LED_MODES;
1422 break;
1423 case PHY_ID_RTL8211C:
1424 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1425 break;
1426 case PHY_ID_RTL8201E:
1427 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1428 break;
1429 default:
1430 return;
1433 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1434 tw32(MAC_PHYCFG2, val);
1436 val = tr32(MAC_PHYCFG1);
1437 val &= ~(MAC_PHYCFG1_RGMII_INT |
1438 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1439 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1440 tw32(MAC_PHYCFG1, val);
1442 return;
1445 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1446 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1447 MAC_PHYCFG2_FMODE_MASK_MASK |
1448 MAC_PHYCFG2_GMODE_MASK_MASK |
1449 MAC_PHYCFG2_ACT_MASK_MASK |
1450 MAC_PHYCFG2_QUAL_MASK_MASK |
1451 MAC_PHYCFG2_INBAND_ENABLE;
1453 tw32(MAC_PHYCFG2, val);
1455 val = tr32(MAC_PHYCFG1);
1456 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1457 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1458 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1459 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1460 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1461 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1462 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1464 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1465 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1466 tw32(MAC_PHYCFG1, val);
1468 val = tr32(MAC_EXT_RGMII_MODE);
1469 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1470 MAC_RGMII_MODE_RX_QUALITY |
1471 MAC_RGMII_MODE_RX_ACTIVITY |
1472 MAC_RGMII_MODE_RX_ENG_DET |
1473 MAC_RGMII_MODE_TX_ENABLE |
1474 MAC_RGMII_MODE_TX_LOWPWR |
1475 MAC_RGMII_MODE_TX_RESET);
1476 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1477 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1478 val |= MAC_RGMII_MODE_RX_INT_B |
1479 MAC_RGMII_MODE_RX_QUALITY |
1480 MAC_RGMII_MODE_RX_ACTIVITY |
1481 MAC_RGMII_MODE_RX_ENG_DET;
1482 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1483 val |= MAC_RGMII_MODE_TX_ENABLE |
1484 MAC_RGMII_MODE_TX_LOWPWR |
1485 MAC_RGMII_MODE_TX_RESET;
1487 tw32(MAC_EXT_RGMII_MODE, val);
1490 static void tg3_mdio_start(struct tg3 *tp)
1492 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1493 tw32_f(MAC_MI_MODE, tp->mi_mode);
1494 udelay(80);
1496 if (tg3_flag(tp, MDIOBUS_INITED) &&
1497 tg3_asic_rev(tp) == ASIC_REV_5785)
1498 tg3_mdio_config_5785(tp);
1501 static int tg3_mdio_init(struct tg3 *tp)
1503 int i;
1504 u32 reg;
1505 struct phy_device *phydev;
1507 if (tg3_flag(tp, 5717_PLUS)) {
1508 u32 is_serdes;
1510 tp->phy_addr = tp->pci_fn + 1;
1512 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1513 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1514 else
1515 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1516 TG3_CPMU_PHY_STRAP_IS_SERDES;
1517 if (is_serdes)
1518 tp->phy_addr += 7;
1519 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1520 int addr;
1522 addr = ssb_gige_get_phyaddr(tp->pdev);
1523 if (addr < 0)
1524 return addr;
1525 tp->phy_addr = addr;
1526 } else
1527 tp->phy_addr = TG3_PHY_MII_ADDR;
1529 tg3_mdio_start(tp);
1531 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1532 return 0;
1534 tp->mdio_bus = mdiobus_alloc();
1535 if (tp->mdio_bus == NULL)
1536 return -ENOMEM;
1538 tp->mdio_bus->name = "tg3 mdio bus";
1539 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1540 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1541 tp->mdio_bus->priv = tp;
1542 tp->mdio_bus->parent = &tp->pdev->dev;
1543 tp->mdio_bus->read = &tg3_mdio_read;
1544 tp->mdio_bus->write = &tg3_mdio_write;
1545 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1547 /* The bus registration will look for all the PHYs on the mdio bus.
1548 * Unfortunately, it does not ensure the PHY is powered up before
1549 * accessing the PHY ID registers. A chip reset is the
1550 * quickest way to bring the device back to an operational state..
1552 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1553 tg3_bmcr_reset(tp);
1555 i = mdiobus_register(tp->mdio_bus);
1556 if (i) {
1557 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1558 mdiobus_free(tp->mdio_bus);
1559 return i;
1562 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1564 if (!phydev || !phydev->drv) {
1565 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1566 mdiobus_unregister(tp->mdio_bus);
1567 mdiobus_free(tp->mdio_bus);
1568 return -ENODEV;
1571 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1572 case PHY_ID_BCM57780:
1573 phydev->interface = PHY_INTERFACE_MODE_GMII;
1574 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1575 break;
1576 case PHY_ID_BCM50610:
1577 case PHY_ID_BCM50610M:
1578 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1579 PHY_BRCM_RX_REFCLK_UNUSED |
1580 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1581 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1582 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1583 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1584 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1585 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1586 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1587 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1588 /* fallthru */
1589 case PHY_ID_RTL8211C:
1590 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1591 break;
1592 case PHY_ID_RTL8201E:
1593 case PHY_ID_BCMAC131:
1594 phydev->interface = PHY_INTERFACE_MODE_MII;
1595 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1596 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1597 break;
1600 tg3_flag_set(tp, MDIOBUS_INITED);
1602 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1603 tg3_mdio_config_5785(tp);
1605 return 0;
1608 static void tg3_mdio_fini(struct tg3 *tp)
1610 if (tg3_flag(tp, MDIOBUS_INITED)) {
1611 tg3_flag_clear(tp, MDIOBUS_INITED);
1612 mdiobus_unregister(tp->mdio_bus);
1613 mdiobus_free(tp->mdio_bus);
1617 /* tp->lock is held. */
1618 static inline void tg3_generate_fw_event(struct tg3 *tp)
1620 u32 val;
1622 val = tr32(GRC_RX_CPU_EVENT);
1623 val |= GRC_RX_CPU_DRIVER_EVENT;
1624 tw32_f(GRC_RX_CPU_EVENT, val);
1626 tp->last_event_jiffies = jiffies;
1629 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1631 /* tp->lock is held. */
1632 static void tg3_wait_for_event_ack(struct tg3 *tp)
1634 int i;
1635 unsigned int delay_cnt;
1636 long time_remain;
1638 /* If enough time has passed, no wait is necessary. */
1639 time_remain = (long)(tp->last_event_jiffies + 1 +
1640 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1641 (long)jiffies;
1642 if (time_remain < 0)
1643 return;
1645 /* Check if we can shorten the wait time. */
1646 delay_cnt = jiffies_to_usecs(time_remain);
1647 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1648 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1649 delay_cnt = (delay_cnt >> 3) + 1;
1651 for (i = 0; i < delay_cnt; i++) {
1652 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1653 break;
1654 if (pci_channel_offline(tp->pdev))
1655 break;
1657 udelay(8);
1661 /* tp->lock is held. */
1662 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1664 u32 reg, val;
1666 val = 0;
1667 if (!tg3_readphy(tp, MII_BMCR, &reg))
1668 val = reg << 16;
1669 if (!tg3_readphy(tp, MII_BMSR, &reg))
1670 val |= (reg & 0xffff);
1671 *data++ = val;
1673 val = 0;
1674 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1675 val = reg << 16;
1676 if (!tg3_readphy(tp, MII_LPA, &reg))
1677 val |= (reg & 0xffff);
1678 *data++ = val;
1680 val = 0;
1681 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1682 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1683 val = reg << 16;
1684 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1685 val |= (reg & 0xffff);
1687 *data++ = val;
1689 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1690 val = reg << 16;
1691 else
1692 val = 0;
1693 *data++ = val;
1696 /* tp->lock is held. */
1697 static void tg3_ump_link_report(struct tg3 *tp)
1699 u32 data[4];
1701 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1702 return;
1704 tg3_phy_gather_ump_data(tp, data);
1706 tg3_wait_for_event_ack(tp);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1710 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1711 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1712 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1713 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1715 tg3_generate_fw_event(tp);
1718 /* tp->lock is held. */
1719 static void tg3_stop_fw(struct tg3 *tp)
1721 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1722 /* Wait for RX cpu to ACK the previous event. */
1723 tg3_wait_for_event_ack(tp);
1725 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1727 tg3_generate_fw_event(tp);
1729 /* Wait for RX cpu to ACK this event. */
1730 tg3_wait_for_event_ack(tp);
1734 /* tp->lock is held. */
1735 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1737 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1738 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1740 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1741 switch (kind) {
1742 case RESET_KIND_INIT:
1743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744 DRV_STATE_START);
1745 break;
1747 case RESET_KIND_SHUTDOWN:
1748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749 DRV_STATE_UNLOAD);
1750 break;
1752 case RESET_KIND_SUSPEND:
1753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 DRV_STATE_SUSPEND);
1755 break;
1757 default:
1758 break;
1763 /* tp->lock is held. */
1764 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1766 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1767 switch (kind) {
1768 case RESET_KIND_INIT:
1769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770 DRV_STATE_START_DONE);
1771 break;
1773 case RESET_KIND_SHUTDOWN:
1774 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1775 DRV_STATE_UNLOAD_DONE);
1776 break;
1778 default:
1779 break;
1784 /* tp->lock is held. */
1785 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1787 if (tg3_flag(tp, ENABLE_ASF)) {
1788 switch (kind) {
1789 case RESET_KIND_INIT:
1790 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1791 DRV_STATE_START);
1792 break;
1794 case RESET_KIND_SHUTDOWN:
1795 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796 DRV_STATE_UNLOAD);
1797 break;
1799 case RESET_KIND_SUSPEND:
1800 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1801 DRV_STATE_SUSPEND);
1802 break;
1804 default:
1805 break;
1810 static int tg3_poll_fw(struct tg3 *tp)
1812 int i;
1813 u32 val;
1815 if (tg3_flag(tp, NO_FWARE_REPORTED))
1816 return 0;
1818 if (tg3_flag(tp, IS_SSB_CORE)) {
1819 /* We don't use firmware. */
1820 return 0;
1823 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1824 /* Wait up to 20ms for init done. */
1825 for (i = 0; i < 200; i++) {
1826 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1827 return 0;
1828 if (pci_channel_offline(tp->pdev))
1829 return -ENODEV;
1831 udelay(100);
1833 return -ENODEV;
1836 /* Wait for firmware initialization to complete. */
1837 for (i = 0; i < 100000; i++) {
1838 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1839 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1840 break;
1841 if (pci_channel_offline(tp->pdev)) {
1842 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1843 tg3_flag_set(tp, NO_FWARE_REPORTED);
1844 netdev_info(tp->dev, "No firmware running\n");
1847 break;
1850 udelay(10);
1853 /* Chip might not be fitted with firmware. Some Sun onboard
1854 * parts are configured like that. So don't signal the timeout
1855 * of the above loop as an error, but do report the lack of
1856 * running firmware once.
1858 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1859 tg3_flag_set(tp, NO_FWARE_REPORTED);
1861 netdev_info(tp->dev, "No firmware running\n");
1864 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1865 /* The 57765 A0 needs a little more
1866 * time to do some important work.
1868 mdelay(10);
1871 return 0;
1874 static void tg3_link_report(struct tg3 *tp)
1876 if (!netif_carrier_ok(tp->dev)) {
1877 netif_info(tp, link, tp->dev, "Link is down\n");
1878 tg3_ump_link_report(tp);
1879 } else if (netif_msg_link(tp)) {
1880 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1881 (tp->link_config.active_speed == SPEED_1000 ?
1882 1000 :
1883 (tp->link_config.active_speed == SPEED_100 ?
1884 100 : 10)),
1885 (tp->link_config.active_duplex == DUPLEX_FULL ?
1886 "full" : "half"));
1888 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1889 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1890 "on" : "off",
1891 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1892 "on" : "off");
1894 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1895 netdev_info(tp->dev, "EEE is %s\n",
1896 tp->setlpicnt ? "enabled" : "disabled");
1898 tg3_ump_link_report(tp);
1901 tp->link_up = netif_carrier_ok(tp->dev);
1904 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1906 u32 flowctrl = 0;
1908 if (adv & ADVERTISE_PAUSE_CAP) {
1909 flowctrl |= FLOW_CTRL_RX;
1910 if (!(adv & ADVERTISE_PAUSE_ASYM))
1911 flowctrl |= FLOW_CTRL_TX;
1912 } else if (adv & ADVERTISE_PAUSE_ASYM)
1913 flowctrl |= FLOW_CTRL_TX;
1915 return flowctrl;
1918 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1920 u16 miireg;
1922 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1923 miireg = ADVERTISE_1000XPAUSE;
1924 else if (flow_ctrl & FLOW_CTRL_TX)
1925 miireg = ADVERTISE_1000XPSE_ASYM;
1926 else if (flow_ctrl & FLOW_CTRL_RX)
1927 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1928 else
1929 miireg = 0;
1931 return miireg;
1934 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1936 u32 flowctrl = 0;
1938 if (adv & ADVERTISE_1000XPAUSE) {
1939 flowctrl |= FLOW_CTRL_RX;
1940 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1941 flowctrl |= FLOW_CTRL_TX;
1942 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1943 flowctrl |= FLOW_CTRL_TX;
1945 return flowctrl;
1948 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1950 u8 cap = 0;
1952 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1953 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1954 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1955 if (lcladv & ADVERTISE_1000XPAUSE)
1956 cap = FLOW_CTRL_RX;
1957 if (rmtadv & ADVERTISE_1000XPAUSE)
1958 cap = FLOW_CTRL_TX;
1961 return cap;
1964 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1966 u8 autoneg;
1967 u8 flowctrl = 0;
1968 u32 old_rx_mode = tp->rx_mode;
1969 u32 old_tx_mode = tp->tx_mode;
1971 if (tg3_flag(tp, USE_PHYLIB))
1972 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1973 else
1974 autoneg = tp->link_config.autoneg;
1976 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1977 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1978 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1979 else
1980 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1981 } else
1982 flowctrl = tp->link_config.flowctrl;
1984 tp->link_config.active_flowctrl = flowctrl;
1986 if (flowctrl & FLOW_CTRL_RX)
1987 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1988 else
1989 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1991 if (old_rx_mode != tp->rx_mode)
1992 tw32_f(MAC_RX_MODE, tp->rx_mode);
1994 if (flowctrl & FLOW_CTRL_TX)
1995 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1996 else
1997 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1999 if (old_tx_mode != tp->tx_mode)
2000 tw32_f(MAC_TX_MODE, tp->tx_mode);
2003 static void tg3_adjust_link(struct net_device *dev)
2005 u8 oldflowctrl, linkmesg = 0;
2006 u32 mac_mode, lcl_adv, rmt_adv;
2007 struct tg3 *tp = netdev_priv(dev);
2008 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2010 spin_lock_bh(&tp->lock);
2012 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2013 MAC_MODE_HALF_DUPLEX);
2015 oldflowctrl = tp->link_config.active_flowctrl;
2017 if (phydev->link) {
2018 lcl_adv = 0;
2019 rmt_adv = 0;
2021 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2022 mac_mode |= MAC_MODE_PORT_MODE_MII;
2023 else if (phydev->speed == SPEED_1000 ||
2024 tg3_asic_rev(tp) != ASIC_REV_5785)
2025 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2026 else
2027 mac_mode |= MAC_MODE_PORT_MODE_MII;
2029 if (phydev->duplex == DUPLEX_HALF)
2030 mac_mode |= MAC_MODE_HALF_DUPLEX;
2031 else {
2032 lcl_adv = mii_advertise_flowctrl(
2033 tp->link_config.flowctrl);
2035 if (phydev->pause)
2036 rmt_adv = LPA_PAUSE_CAP;
2037 if (phydev->asym_pause)
2038 rmt_adv |= LPA_PAUSE_ASYM;
2041 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2042 } else
2043 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2045 if (mac_mode != tp->mac_mode) {
2046 tp->mac_mode = mac_mode;
2047 tw32_f(MAC_MODE, tp->mac_mode);
2048 udelay(40);
2051 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2052 if (phydev->speed == SPEED_10)
2053 tw32(MAC_MI_STAT,
2054 MAC_MI_STAT_10MBPS_MODE |
2055 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2056 else
2057 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2060 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2061 tw32(MAC_TX_LENGTHS,
2062 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063 (6 << TX_LENGTHS_IPG_SHIFT) |
2064 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2065 else
2066 tw32(MAC_TX_LENGTHS,
2067 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2068 (6 << TX_LENGTHS_IPG_SHIFT) |
2069 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2071 if (phydev->link != tp->old_link ||
2072 phydev->speed != tp->link_config.active_speed ||
2073 phydev->duplex != tp->link_config.active_duplex ||
2074 oldflowctrl != tp->link_config.active_flowctrl)
2075 linkmesg = 1;
2077 tp->old_link = phydev->link;
2078 tp->link_config.active_speed = phydev->speed;
2079 tp->link_config.active_duplex = phydev->duplex;
2081 spin_unlock_bh(&tp->lock);
2083 if (linkmesg)
2084 tg3_link_report(tp);
2087 static int tg3_phy_init(struct tg3 *tp)
2089 struct phy_device *phydev;
2091 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2092 return 0;
2094 /* Bring the PHY back to a known state. */
2095 tg3_bmcr_reset(tp);
2097 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2099 /* Attach the MAC to the PHY. */
2100 phydev = phy_connect(tp->dev, phydev_name(phydev),
2101 tg3_adjust_link, phydev->interface);
2102 if (IS_ERR(phydev)) {
2103 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2104 return PTR_ERR(phydev);
2107 /* Mask with MAC supported features. */
2108 switch (phydev->interface) {
2109 case PHY_INTERFACE_MODE_GMII:
2110 case PHY_INTERFACE_MODE_RGMII:
2111 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2112 phydev->supported &= (PHY_GBIT_FEATURES |
2113 SUPPORTED_Pause |
2114 SUPPORTED_Asym_Pause);
2115 break;
2117 /* fallthru */
2118 case PHY_INTERFACE_MODE_MII:
2119 phydev->supported &= (PHY_BASIC_FEATURES |
2120 SUPPORTED_Pause |
2121 SUPPORTED_Asym_Pause);
2122 break;
2123 default:
2124 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2125 return -EINVAL;
2128 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2130 phydev->advertising = phydev->supported;
2132 phy_attached_info(phydev);
2134 return 0;
2137 static void tg3_phy_start(struct tg3 *tp)
2139 struct phy_device *phydev;
2141 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2142 return;
2144 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2146 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2147 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2148 phydev->speed = tp->link_config.speed;
2149 phydev->duplex = tp->link_config.duplex;
2150 phydev->autoneg = tp->link_config.autoneg;
2151 phydev->advertising = tp->link_config.advertising;
2154 phy_start(phydev);
2156 phy_start_aneg(phydev);
2159 static void tg3_phy_stop(struct tg3 *tp)
2161 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2162 return;
2164 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2167 static void tg3_phy_fini(struct tg3 *tp)
2169 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2170 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2171 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2175 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2177 int err;
2178 u32 val;
2180 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2181 return 0;
2183 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2184 /* Cannot do read-modify-write on 5401 */
2185 err = tg3_phy_auxctl_write(tp,
2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2187 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2188 0x4c20);
2189 goto done;
2192 err = tg3_phy_auxctl_read(tp,
2193 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2194 if (err)
2195 return err;
2197 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2198 err = tg3_phy_auxctl_write(tp,
2199 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2201 done:
2202 return err;
2205 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2207 u32 phytest;
2209 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2210 u32 phy;
2212 tg3_writephy(tp, MII_TG3_FET_TEST,
2213 phytest | MII_TG3_FET_SHADOW_EN);
2214 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2215 if (enable)
2216 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217 else
2218 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2219 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2221 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2225 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2227 u32 reg;
2229 if (!tg3_flag(tp, 5705_PLUS) ||
2230 (tg3_flag(tp, 5717_PLUS) &&
2231 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2232 return;
2234 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2235 tg3_phy_fet_toggle_apd(tp, enable);
2236 return;
2239 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2240 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2241 MII_TG3_MISC_SHDW_SCR5_SDTL |
2242 MII_TG3_MISC_SHDW_SCR5_C125OE;
2243 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2244 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2249 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2250 if (enable)
2251 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2253 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2256 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2258 u32 phy;
2260 if (!tg3_flag(tp, 5705_PLUS) ||
2261 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2262 return;
2264 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2265 u32 ephy;
2267 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2268 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2270 tg3_writephy(tp, MII_TG3_FET_TEST,
2271 ephy | MII_TG3_FET_SHADOW_EN);
2272 if (!tg3_readphy(tp, reg, &phy)) {
2273 if (enable)
2274 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275 else
2276 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2277 tg3_writephy(tp, reg, phy);
2279 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2281 } else {
2282 int ret;
2284 ret = tg3_phy_auxctl_read(tp,
2285 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2286 if (!ret) {
2287 if (enable)
2288 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289 else
2290 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2291 tg3_phy_auxctl_write(tp,
2292 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2297 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2299 int ret;
2300 u32 val;
2302 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2303 return;
2305 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2306 if (!ret)
2307 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2308 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2311 static void tg3_phy_apply_otp(struct tg3 *tp)
2313 u32 otp, phy;
2315 if (!tp->phy_otp)
2316 return;
2318 otp = tp->phy_otp;
2320 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2321 return;
2323 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2324 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2325 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2327 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2328 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2331 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2332 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2333 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2335 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2338 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2339 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2341 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2342 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2343 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2345 tg3_phy_toggle_auxctl_smdsp(tp, false);
2348 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2350 u32 val;
2351 struct ethtool_eee *dest = &tp->eee;
2353 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2354 return;
2356 if (eee)
2357 dest = eee;
2359 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2360 return;
2362 /* Pull eee_active */
2363 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2364 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2365 dest->eee_active = 1;
2366 } else
2367 dest->eee_active = 0;
2369 /* Pull lp advertised settings */
2370 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2371 return;
2372 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2374 /* Pull advertised and eee_enabled settings */
2375 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2376 return;
2377 dest->eee_enabled = !!val;
2378 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2380 /* Pull tx_lpi_enabled */
2381 val = tr32(TG3_CPMU_EEE_MODE);
2382 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2384 /* Pull lpi timer value */
2385 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2388 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2390 u32 val;
2392 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2393 return;
2395 tp->setlpicnt = 0;
2397 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2398 current_link_up &&
2399 tp->link_config.active_duplex == DUPLEX_FULL &&
2400 (tp->link_config.active_speed == SPEED_100 ||
2401 tp->link_config.active_speed == SPEED_1000)) {
2402 u32 eeectl;
2404 if (tp->link_config.active_speed == SPEED_1000)
2405 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2406 else
2407 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2409 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2411 tg3_eee_pull_config(tp, NULL);
2412 if (tp->eee.eee_active)
2413 tp->setlpicnt = 2;
2416 if (!tp->setlpicnt) {
2417 if (current_link_up &&
2418 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2419 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2420 tg3_phy_toggle_auxctl_smdsp(tp, false);
2423 val = tr32(TG3_CPMU_EEE_MODE);
2424 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2428 static void tg3_phy_eee_enable(struct tg3 *tp)
2430 u32 val;
2432 if (tp->link_config.active_speed == SPEED_1000 &&
2433 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2434 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2435 tg3_flag(tp, 57765_CLASS)) &&
2436 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2437 val = MII_TG3_DSP_TAP26_ALNOKO |
2438 MII_TG3_DSP_TAP26_RMRXSTO;
2439 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2440 tg3_phy_toggle_auxctl_smdsp(tp, false);
2443 val = tr32(TG3_CPMU_EEE_MODE);
2444 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2447 static int tg3_wait_macro_done(struct tg3 *tp)
2449 int limit = 100;
2451 while (limit--) {
2452 u32 tmp32;
2454 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2455 if ((tmp32 & 0x1000) == 0)
2456 break;
2459 if (limit < 0)
2460 return -EBUSY;
2462 return 0;
2465 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2467 static const u32 test_pat[4][6] = {
2468 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2469 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2470 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2471 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2473 int chan;
2475 for (chan = 0; chan < 4; chan++) {
2476 int i;
2478 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479 (chan * 0x2000) | 0x0200);
2480 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2482 for (i = 0; i < 6; i++)
2483 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2484 test_pat[chan][i]);
2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2487 if (tg3_wait_macro_done(tp)) {
2488 *resetp = 1;
2489 return -EBUSY;
2492 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2493 (chan * 0x2000) | 0x0200);
2494 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2495 if (tg3_wait_macro_done(tp)) {
2496 *resetp = 1;
2497 return -EBUSY;
2500 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2501 if (tg3_wait_macro_done(tp)) {
2502 *resetp = 1;
2503 return -EBUSY;
2506 for (i = 0; i < 6; i += 2) {
2507 u32 low, high;
2509 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2510 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2511 tg3_wait_macro_done(tp)) {
2512 *resetp = 1;
2513 return -EBUSY;
2515 low &= 0x7fff;
2516 high &= 0x000f;
2517 if (low != test_pat[chan][i] ||
2518 high != test_pat[chan][i+1]) {
2519 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2520 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2521 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2523 return -EBUSY;
2528 return 0;
2531 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2533 int chan;
2535 for (chan = 0; chan < 4; chan++) {
2536 int i;
2538 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2539 (chan * 0x2000) | 0x0200);
2540 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2541 for (i = 0; i < 6; i++)
2542 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2543 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2544 if (tg3_wait_macro_done(tp))
2545 return -EBUSY;
2548 return 0;
2551 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2553 u32 reg32, phy9_orig;
2554 int retries, do_phy_reset, err;
2556 retries = 10;
2557 do_phy_reset = 1;
2558 do {
2559 if (do_phy_reset) {
2560 err = tg3_bmcr_reset(tp);
2561 if (err)
2562 return err;
2563 do_phy_reset = 0;
2566 /* Disable transmitter and interrupt. */
2567 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2568 continue;
2570 reg32 |= 0x3000;
2571 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2573 /* Set full-duplex, 1000 mbps. */
2574 tg3_writephy(tp, MII_BMCR,
2575 BMCR_FULLDPLX | BMCR_SPEED1000);
2577 /* Set to master mode. */
2578 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2579 continue;
2581 tg3_writephy(tp, MII_CTRL1000,
2582 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2584 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2585 if (err)
2586 return err;
2588 /* Block the PHY control access. */
2589 tg3_phydsp_write(tp, 0x8005, 0x0800);
2591 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2592 if (!err)
2593 break;
2594 } while (--retries);
2596 err = tg3_phy_reset_chanpat(tp);
2597 if (err)
2598 return err;
2600 tg3_phydsp_write(tp, 0x8005, 0x0000);
2602 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2603 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2605 tg3_phy_toggle_auxctl_smdsp(tp, false);
2607 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2609 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2610 if (err)
2611 return err;
2613 reg32 &= ~0x3000;
2614 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2616 return 0;
2619 static void tg3_carrier_off(struct tg3 *tp)
2621 netif_carrier_off(tp->dev);
2622 tp->link_up = false;
2625 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2627 if (tg3_flag(tp, ENABLE_ASF))
2628 netdev_warn(tp->dev,
2629 "Management side-band traffic will be interrupted during phy settings change\n");
2632 /* This will reset the tigon3 PHY if there is no valid
2633 * link unless the FORCE argument is non-zero.
2635 static int tg3_phy_reset(struct tg3 *tp)
2637 u32 val, cpmuctrl;
2638 int err;
2640 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2641 val = tr32(GRC_MISC_CFG);
2642 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2643 udelay(40);
2645 err = tg3_readphy(tp, MII_BMSR, &val);
2646 err |= tg3_readphy(tp, MII_BMSR, &val);
2647 if (err != 0)
2648 return -EBUSY;
2650 if (netif_running(tp->dev) && tp->link_up) {
2651 netif_carrier_off(tp->dev);
2652 tg3_link_report(tp);
2655 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2656 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2657 tg3_asic_rev(tp) == ASIC_REV_5705) {
2658 err = tg3_phy_reset_5703_4_5(tp);
2659 if (err)
2660 return err;
2661 goto out;
2664 cpmuctrl = 0;
2665 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2666 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2667 cpmuctrl = tr32(TG3_CPMU_CTRL);
2668 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2669 tw32(TG3_CPMU_CTRL,
2670 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2673 err = tg3_bmcr_reset(tp);
2674 if (err)
2675 return err;
2677 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2678 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2679 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2681 tw32(TG3_CPMU_CTRL, cpmuctrl);
2684 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2685 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2686 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2687 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2688 CPMU_LSPD_1000MB_MACCLK_12_5) {
2689 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2690 udelay(40);
2691 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2695 if (tg3_flag(tp, 5717_PLUS) &&
2696 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2697 return 0;
2699 tg3_phy_apply_otp(tp);
2701 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2702 tg3_phy_toggle_apd(tp, true);
2703 else
2704 tg3_phy_toggle_apd(tp, false);
2706 out:
2707 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2708 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2709 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2710 tg3_phydsp_write(tp, 0x000a, 0x0323);
2711 tg3_phy_toggle_auxctl_smdsp(tp, false);
2714 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2715 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2716 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2719 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721 tg3_phydsp_write(tp, 0x000a, 0x310b);
2722 tg3_phydsp_write(tp, 0x201f, 0x9506);
2723 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2724 tg3_phy_toggle_auxctl_smdsp(tp, false);
2726 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2727 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2728 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2729 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2730 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2731 tg3_writephy(tp, MII_TG3_TEST1,
2732 MII_TG3_TEST1_TRIM_EN | 0x4);
2733 } else
2734 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2736 tg3_phy_toggle_auxctl_smdsp(tp, false);
2740 /* Set Extended packet length bit (bit 14) on all chips that */
2741 /* support jumbo frames */
2742 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2743 /* Cannot do read-modify-write on 5401 */
2744 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2745 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2746 /* Set bit 14 with read-modify-write to preserve other bits */
2747 err = tg3_phy_auxctl_read(tp,
2748 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2749 if (!err)
2750 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2751 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2754 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2755 * jumbo frames transmission.
2757 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2758 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2759 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2760 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2763 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2764 /* adjust output voltage */
2765 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2768 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2769 tg3_phydsp_write(tp, 0xffb, 0x4000);
2771 tg3_phy_toggle_automdix(tp, true);
2772 tg3_phy_set_wirespeed(tp);
2773 return 0;
2776 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2777 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2778 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2779 TG3_GPIO_MSG_NEED_VAUX)
2780 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2781 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2782 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2783 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2784 (TG3_GPIO_MSG_DRVR_PRES << 12))
2786 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2787 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2788 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2789 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2790 (TG3_GPIO_MSG_NEED_VAUX << 12))
2792 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2794 u32 status, shift;
2796 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2797 tg3_asic_rev(tp) == ASIC_REV_5719)
2798 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2799 else
2800 status = tr32(TG3_CPMU_DRV_STATUS);
2802 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2803 status &= ~(TG3_GPIO_MSG_MASK << shift);
2804 status |= (newstat << shift);
2806 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2807 tg3_asic_rev(tp) == ASIC_REV_5719)
2808 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2809 else
2810 tw32(TG3_CPMU_DRV_STATUS, status);
2812 return status >> TG3_APE_GPIO_MSG_SHIFT;
2815 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2817 if (!tg3_flag(tp, IS_NIC))
2818 return 0;
2820 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2821 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2822 tg3_asic_rev(tp) == ASIC_REV_5720) {
2823 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2824 return -EIO;
2826 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2828 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2831 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2832 } else {
2833 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2834 TG3_GRC_LCLCTL_PWRSW_DELAY);
2837 return 0;
2840 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2842 u32 grc_local_ctrl;
2844 if (!tg3_flag(tp, IS_NIC) ||
2845 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2846 tg3_asic_rev(tp) == ASIC_REV_5701)
2847 return;
2849 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2851 tw32_wait_f(GRC_LOCAL_CTRL,
2852 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2853 TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 tw32_wait_f(GRC_LOCAL_CTRL,
2856 grc_local_ctrl,
2857 TG3_GRC_LCLCTL_PWRSW_DELAY);
2859 tw32_wait_f(GRC_LOCAL_CTRL,
2860 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2861 TG3_GRC_LCLCTL_PWRSW_DELAY);
2864 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2866 if (!tg3_flag(tp, IS_NIC))
2867 return;
2869 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2870 tg3_asic_rev(tp) == ASIC_REV_5701) {
2871 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2872 (GRC_LCLCTRL_GPIO_OE0 |
2873 GRC_LCLCTRL_GPIO_OE1 |
2874 GRC_LCLCTRL_GPIO_OE2 |
2875 GRC_LCLCTRL_GPIO_OUTPUT0 |
2876 GRC_LCLCTRL_GPIO_OUTPUT1),
2877 TG3_GRC_LCLCTL_PWRSW_DELAY);
2878 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2879 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2880 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2881 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2882 GRC_LCLCTRL_GPIO_OE1 |
2883 GRC_LCLCTRL_GPIO_OE2 |
2884 GRC_LCLCTRL_GPIO_OUTPUT0 |
2885 GRC_LCLCTRL_GPIO_OUTPUT1 |
2886 tp->grc_local_ctrl;
2887 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY);
2890 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2891 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2892 TG3_GRC_LCLCTL_PWRSW_DELAY);
2894 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2895 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2896 TG3_GRC_LCLCTL_PWRSW_DELAY);
2897 } else {
2898 u32 no_gpio2;
2899 u32 grc_local_ctrl = 0;
2901 /* Workaround to prevent overdrawing Amps. */
2902 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2903 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2904 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2905 grc_local_ctrl,
2906 TG3_GRC_LCLCTL_PWRSW_DELAY);
2909 /* On 5753 and variants, GPIO2 cannot be used. */
2910 no_gpio2 = tp->nic_sram_data_cfg &
2911 NIC_SRAM_DATA_CFG_NO_GPIO2;
2913 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2914 GRC_LCLCTRL_GPIO_OE1 |
2915 GRC_LCLCTRL_GPIO_OE2 |
2916 GRC_LCLCTRL_GPIO_OUTPUT1 |
2917 GRC_LCLCTRL_GPIO_OUTPUT2;
2918 if (no_gpio2) {
2919 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2920 GRC_LCLCTRL_GPIO_OUTPUT2);
2922 tw32_wait_f(GRC_LOCAL_CTRL,
2923 tp->grc_local_ctrl | grc_local_ctrl,
2924 TG3_GRC_LCLCTL_PWRSW_DELAY);
2926 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2928 tw32_wait_f(GRC_LOCAL_CTRL,
2929 tp->grc_local_ctrl | grc_local_ctrl,
2930 TG3_GRC_LCLCTL_PWRSW_DELAY);
2932 if (!no_gpio2) {
2933 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2934 tw32_wait_f(GRC_LOCAL_CTRL,
2935 tp->grc_local_ctrl | grc_local_ctrl,
2936 TG3_GRC_LCLCTL_PWRSW_DELAY);
2941 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2943 u32 msg = 0;
2945 /* Serialize power state transitions */
2946 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2947 return;
2949 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2950 msg = TG3_GPIO_MSG_NEED_VAUX;
2952 msg = tg3_set_function_status(tp, msg);
2954 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2955 goto done;
2957 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2958 tg3_pwrsrc_switch_to_vaux(tp);
2959 else
2960 tg3_pwrsrc_die_with_vmain(tp);
2962 done:
2963 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2966 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2968 bool need_vaux = false;
2970 /* The GPIOs do something completely different on 57765. */
2971 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2972 return;
2974 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2975 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2976 tg3_asic_rev(tp) == ASIC_REV_5720) {
2977 tg3_frob_aux_power_5717(tp, include_wol ?
2978 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2979 return;
2982 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2983 struct net_device *dev_peer;
2985 dev_peer = pci_get_drvdata(tp->pdev_peer);
2987 /* remove_one() may have been run on the peer. */
2988 if (dev_peer) {
2989 struct tg3 *tp_peer = netdev_priv(dev_peer);
2991 if (tg3_flag(tp_peer, INIT_COMPLETE))
2992 return;
2994 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2995 tg3_flag(tp_peer, ENABLE_ASF))
2996 need_vaux = true;
3000 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3001 tg3_flag(tp, ENABLE_ASF))
3002 need_vaux = true;
3004 if (need_vaux)
3005 tg3_pwrsrc_switch_to_vaux(tp);
3006 else
3007 tg3_pwrsrc_die_with_vmain(tp);
3010 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3012 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3013 return 1;
3014 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3015 if (speed != SPEED_10)
3016 return 1;
3017 } else if (speed == SPEED_10)
3018 return 1;
3020 return 0;
3023 static bool tg3_phy_power_bug(struct tg3 *tp)
3025 switch (tg3_asic_rev(tp)) {
3026 case ASIC_REV_5700:
3027 case ASIC_REV_5704:
3028 return true;
3029 case ASIC_REV_5780:
3030 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3031 return true;
3032 return false;
3033 case ASIC_REV_5717:
3034 if (!tp->pci_fn)
3035 return true;
3036 return false;
3037 case ASIC_REV_5719:
3038 case ASIC_REV_5720:
3039 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3040 !tp->pci_fn)
3041 return true;
3042 return false;
3045 return false;
3048 static bool tg3_phy_led_bug(struct tg3 *tp)
3050 switch (tg3_asic_rev(tp)) {
3051 case ASIC_REV_5719:
3052 case ASIC_REV_5720:
3053 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3054 !tp->pci_fn)
3055 return true;
3056 return false;
3059 return false;
3062 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3064 u32 val;
3066 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3067 return;
3069 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3070 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3071 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3072 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3074 sg_dig_ctrl |=
3075 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3076 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3077 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3079 return;
3082 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3083 tg3_bmcr_reset(tp);
3084 val = tr32(GRC_MISC_CFG);
3085 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3086 udelay(40);
3087 return;
3088 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3089 u32 phytest;
3090 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3091 u32 phy;
3093 tg3_writephy(tp, MII_ADVERTISE, 0);
3094 tg3_writephy(tp, MII_BMCR,
3095 BMCR_ANENABLE | BMCR_ANRESTART);
3097 tg3_writephy(tp, MII_TG3_FET_TEST,
3098 phytest | MII_TG3_FET_SHADOW_EN);
3099 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3100 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3101 tg3_writephy(tp,
3102 MII_TG3_FET_SHDW_AUXMODE4,
3103 phy);
3105 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3107 return;
3108 } else if (do_low_power) {
3109 if (!tg3_phy_led_bug(tp))
3110 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3111 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3113 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3114 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3115 MII_TG3_AUXCTL_PCTL_VREG_11V;
3116 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3119 /* The PHY should not be powered down on some chips because
3120 * of bugs.
3122 if (tg3_phy_power_bug(tp))
3123 return;
3125 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3126 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3127 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3128 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3129 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3130 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3133 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3136 /* tp->lock is held. */
3137 static int tg3_nvram_lock(struct tg3 *tp)
3139 if (tg3_flag(tp, NVRAM)) {
3140 int i;
3142 if (tp->nvram_lock_cnt == 0) {
3143 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3144 for (i = 0; i < 8000; i++) {
3145 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3146 break;
3147 udelay(20);
3149 if (i == 8000) {
3150 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3151 return -ENODEV;
3154 tp->nvram_lock_cnt++;
3156 return 0;
3159 /* tp->lock is held. */
3160 static void tg3_nvram_unlock(struct tg3 *tp)
3162 if (tg3_flag(tp, NVRAM)) {
3163 if (tp->nvram_lock_cnt > 0)
3164 tp->nvram_lock_cnt--;
3165 if (tp->nvram_lock_cnt == 0)
3166 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3170 /* tp->lock is held. */
3171 static void tg3_enable_nvram_access(struct tg3 *tp)
3173 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3174 u32 nvaccess = tr32(NVRAM_ACCESS);
3176 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3180 /* tp->lock is held. */
3181 static void tg3_disable_nvram_access(struct tg3 *tp)
3183 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3184 u32 nvaccess = tr32(NVRAM_ACCESS);
3186 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3190 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3191 u32 offset, u32 *val)
3193 u32 tmp;
3194 int i;
3196 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3197 return -EINVAL;
3199 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3200 EEPROM_ADDR_DEVID_MASK |
3201 EEPROM_ADDR_READ);
3202 tw32(GRC_EEPROM_ADDR,
3203 tmp |
3204 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3205 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3206 EEPROM_ADDR_ADDR_MASK) |
3207 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3209 for (i = 0; i < 1000; i++) {
3210 tmp = tr32(GRC_EEPROM_ADDR);
3212 if (tmp & EEPROM_ADDR_COMPLETE)
3213 break;
3214 msleep(1);
3216 if (!(tmp & EEPROM_ADDR_COMPLETE))
3217 return -EBUSY;
3219 tmp = tr32(GRC_EEPROM_DATA);
3222 * The data will always be opposite the native endian
3223 * format. Perform a blind byteswap to compensate.
3225 *val = swab32(tmp);
3227 return 0;
3230 #define NVRAM_CMD_TIMEOUT 10000
3232 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3234 int i;
3236 tw32(NVRAM_CMD, nvram_cmd);
3237 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3238 usleep_range(10, 40);
3239 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3240 udelay(10);
3241 break;
3245 if (i == NVRAM_CMD_TIMEOUT)
3246 return -EBUSY;
3248 return 0;
3251 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3253 if (tg3_flag(tp, NVRAM) &&
3254 tg3_flag(tp, NVRAM_BUFFERED) &&
3255 tg3_flag(tp, FLASH) &&
3256 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3257 (tp->nvram_jedecnum == JEDEC_ATMEL))
3259 addr = ((addr / tp->nvram_pagesize) <<
3260 ATMEL_AT45DB0X1B_PAGE_POS) +
3261 (addr % tp->nvram_pagesize);
3263 return addr;
3266 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3268 if (tg3_flag(tp, NVRAM) &&
3269 tg3_flag(tp, NVRAM_BUFFERED) &&
3270 tg3_flag(tp, FLASH) &&
3271 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3272 (tp->nvram_jedecnum == JEDEC_ATMEL))
3274 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3275 tp->nvram_pagesize) +
3276 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3278 return addr;
3281 /* NOTE: Data read in from NVRAM is byteswapped according to
3282 * the byteswapping settings for all other register accesses.
3283 * tg3 devices are BE devices, so on a BE machine, the data
3284 * returned will be exactly as it is seen in NVRAM. On a LE
3285 * machine, the 32-bit value will be byteswapped.
3287 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3289 int ret;
3291 if (!tg3_flag(tp, NVRAM))
3292 return tg3_nvram_read_using_eeprom(tp, offset, val);
3294 offset = tg3_nvram_phys_addr(tp, offset);
3296 if (offset > NVRAM_ADDR_MSK)
3297 return -EINVAL;
3299 ret = tg3_nvram_lock(tp);
3300 if (ret)
3301 return ret;
3303 tg3_enable_nvram_access(tp);
3305 tw32(NVRAM_ADDR, offset);
3306 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3307 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3309 if (ret == 0)
3310 *val = tr32(NVRAM_RDDATA);
3312 tg3_disable_nvram_access(tp);
3314 tg3_nvram_unlock(tp);
3316 return ret;
3319 /* Ensures NVRAM data is in bytestream format. */
3320 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3322 u32 v;
3323 int res = tg3_nvram_read(tp, offset, &v);
3324 if (!res)
3325 *val = cpu_to_be32(v);
3326 return res;
3329 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3330 u32 offset, u32 len, u8 *buf)
3332 int i, j, rc = 0;
3333 u32 val;
3335 for (i = 0; i < len; i += 4) {
3336 u32 addr;
3337 __be32 data;
3339 addr = offset + i;
3341 memcpy(&data, buf + i, 4);
3344 * The SEEPROM interface expects the data to always be opposite
3345 * the native endian format. We accomplish this by reversing
3346 * all the operations that would have been performed on the
3347 * data from a call to tg3_nvram_read_be32().
3349 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3351 val = tr32(GRC_EEPROM_ADDR);
3352 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3354 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3355 EEPROM_ADDR_READ);
3356 tw32(GRC_EEPROM_ADDR, val |
3357 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3358 (addr & EEPROM_ADDR_ADDR_MASK) |
3359 EEPROM_ADDR_START |
3360 EEPROM_ADDR_WRITE);
3362 for (j = 0; j < 1000; j++) {
3363 val = tr32(GRC_EEPROM_ADDR);
3365 if (val & EEPROM_ADDR_COMPLETE)
3366 break;
3367 msleep(1);
3369 if (!(val & EEPROM_ADDR_COMPLETE)) {
3370 rc = -EBUSY;
3371 break;
3375 return rc;
3378 /* offset and length are dword aligned */
3379 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3380 u8 *buf)
3382 int ret = 0;
3383 u32 pagesize = tp->nvram_pagesize;
3384 u32 pagemask = pagesize - 1;
3385 u32 nvram_cmd;
3386 u8 *tmp;
3388 tmp = kmalloc(pagesize, GFP_KERNEL);
3389 if (tmp == NULL)
3390 return -ENOMEM;
3392 while (len) {
3393 int j;
3394 u32 phy_addr, page_off, size;
3396 phy_addr = offset & ~pagemask;
3398 for (j = 0; j < pagesize; j += 4) {
3399 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3400 (__be32 *) (tmp + j));
3401 if (ret)
3402 break;
3404 if (ret)
3405 break;
3407 page_off = offset & pagemask;
3408 size = pagesize;
3409 if (len < size)
3410 size = len;
3412 len -= size;
3414 memcpy(tmp + page_off, buf, size);
3416 offset = offset + (pagesize - page_off);
3418 tg3_enable_nvram_access(tp);
3421 * Before we can erase the flash page, we need
3422 * to issue a special "write enable" command.
3424 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3426 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3427 break;
3429 /* Erase the target page */
3430 tw32(NVRAM_ADDR, phy_addr);
3432 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3433 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3435 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436 break;
3438 /* Issue another write enable to start the write. */
3439 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3441 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3442 break;
3444 for (j = 0; j < pagesize; j += 4) {
3445 __be32 data;
3447 data = *((__be32 *) (tmp + j));
3449 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3451 tw32(NVRAM_ADDR, phy_addr + j);
3453 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3454 NVRAM_CMD_WR;
3456 if (j == 0)
3457 nvram_cmd |= NVRAM_CMD_FIRST;
3458 else if (j == (pagesize - 4))
3459 nvram_cmd |= NVRAM_CMD_LAST;
3461 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3462 if (ret)
3463 break;
3465 if (ret)
3466 break;
3469 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3470 tg3_nvram_exec_cmd(tp, nvram_cmd);
3472 kfree(tmp);
3474 return ret;
3477 /* offset and length are dword aligned */
3478 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3479 u8 *buf)
3481 int i, ret = 0;
3483 for (i = 0; i < len; i += 4, offset += 4) {
3484 u32 page_off, phy_addr, nvram_cmd;
3485 __be32 data;
3487 memcpy(&data, buf + i, 4);
3488 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3490 page_off = offset % tp->nvram_pagesize;
3492 phy_addr = tg3_nvram_phys_addr(tp, offset);
3494 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3496 if (page_off == 0 || i == 0)
3497 nvram_cmd |= NVRAM_CMD_FIRST;
3498 if (page_off == (tp->nvram_pagesize - 4))
3499 nvram_cmd |= NVRAM_CMD_LAST;
3501 if (i == (len - 4))
3502 nvram_cmd |= NVRAM_CMD_LAST;
3504 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3505 !tg3_flag(tp, FLASH) ||
3506 !tg3_flag(tp, 57765_PLUS))
3507 tw32(NVRAM_ADDR, phy_addr);
3509 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3510 !tg3_flag(tp, 5755_PLUS) &&
3511 (tp->nvram_jedecnum == JEDEC_ST) &&
3512 (nvram_cmd & NVRAM_CMD_FIRST)) {
3513 u32 cmd;
3515 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3516 ret = tg3_nvram_exec_cmd(tp, cmd);
3517 if (ret)
3518 break;
3520 if (!tg3_flag(tp, FLASH)) {
3521 /* We always do complete word writes to eeprom. */
3522 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3525 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3526 if (ret)
3527 break;
3529 return ret;
3532 /* offset and length are dword aligned */
3533 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3535 int ret;
3537 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3538 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3539 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3540 udelay(40);
3543 if (!tg3_flag(tp, NVRAM)) {
3544 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3545 } else {
3546 u32 grc_mode;
3548 ret = tg3_nvram_lock(tp);
3549 if (ret)
3550 return ret;
3552 tg3_enable_nvram_access(tp);
3553 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3554 tw32(NVRAM_WRITE1, 0x406);
3556 grc_mode = tr32(GRC_MODE);
3557 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3559 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3560 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3561 buf);
3562 } else {
3563 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3564 buf);
3567 grc_mode = tr32(GRC_MODE);
3568 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3570 tg3_disable_nvram_access(tp);
3571 tg3_nvram_unlock(tp);
3574 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3575 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3576 udelay(40);
3579 return ret;
3582 #define RX_CPU_SCRATCH_BASE 0x30000
3583 #define RX_CPU_SCRATCH_SIZE 0x04000
3584 #define TX_CPU_SCRATCH_BASE 0x34000
3585 #define TX_CPU_SCRATCH_SIZE 0x04000
3587 /* tp->lock is held. */
3588 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3590 int i;
3591 const int iters = 10000;
3593 for (i = 0; i < iters; i++) {
3594 tw32(cpu_base + CPU_STATE, 0xffffffff);
3595 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3596 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3597 break;
3598 if (pci_channel_offline(tp->pdev))
3599 return -EBUSY;
3602 return (i == iters) ? -EBUSY : 0;
3605 /* tp->lock is held. */
3606 static int tg3_rxcpu_pause(struct tg3 *tp)
3608 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3610 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3611 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3612 udelay(10);
3614 return rc;
3617 /* tp->lock is held. */
3618 static int tg3_txcpu_pause(struct tg3 *tp)
3620 return tg3_pause_cpu(tp, TX_CPU_BASE);
3623 /* tp->lock is held. */
3624 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3626 tw32(cpu_base + CPU_STATE, 0xffffffff);
3627 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3630 /* tp->lock is held. */
3631 static void tg3_rxcpu_resume(struct tg3 *tp)
3633 tg3_resume_cpu(tp, RX_CPU_BASE);
3636 /* tp->lock is held. */
3637 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3639 int rc;
3641 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3643 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3644 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3646 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3647 return 0;
3649 if (cpu_base == RX_CPU_BASE) {
3650 rc = tg3_rxcpu_pause(tp);
3651 } else {
3653 * There is only an Rx CPU for the 5750 derivative in the
3654 * BCM4785.
3656 if (tg3_flag(tp, IS_SSB_CORE))
3657 return 0;
3659 rc = tg3_txcpu_pause(tp);
3662 if (rc) {
3663 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3664 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3665 return -ENODEV;
3668 /* Clear firmware's nvram arbitration. */
3669 if (tg3_flag(tp, NVRAM))
3670 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3671 return 0;
3674 static int tg3_fw_data_len(struct tg3 *tp,
3675 const struct tg3_firmware_hdr *fw_hdr)
3677 int fw_len;
3679 /* Non fragmented firmware have one firmware header followed by a
3680 * contiguous chunk of data to be written. The length field in that
3681 * header is not the length of data to be written but the complete
3682 * length of the bss. The data length is determined based on
3683 * tp->fw->size minus headers.
3685 * Fragmented firmware have a main header followed by multiple
3686 * fragments. Each fragment is identical to non fragmented firmware
3687 * with a firmware header followed by a contiguous chunk of data. In
3688 * the main header, the length field is unused and set to 0xffffffff.
3689 * In each fragment header the length is the entire size of that
3690 * fragment i.e. fragment data + header length. Data length is
3691 * therefore length field in the header minus TG3_FW_HDR_LEN.
3693 if (tp->fw_len == 0xffffffff)
3694 fw_len = be32_to_cpu(fw_hdr->len);
3695 else
3696 fw_len = tp->fw->size;
3698 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3701 /* tp->lock is held. */
3702 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3703 u32 cpu_scratch_base, int cpu_scratch_size,
3704 const struct tg3_firmware_hdr *fw_hdr)
3706 int err, i;
3707 void (*write_op)(struct tg3 *, u32, u32);
3708 int total_len = tp->fw->size;
3710 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3711 netdev_err(tp->dev,
3712 "%s: Trying to load TX cpu firmware which is 5705\n",
3713 __func__);
3714 return -EINVAL;
3717 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3718 write_op = tg3_write_mem;
3719 else
3720 write_op = tg3_write_indirect_reg32;
3722 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3723 /* It is possible that bootcode is still loading at this point.
3724 * Get the nvram lock first before halting the cpu.
3726 int lock_err = tg3_nvram_lock(tp);
3727 err = tg3_halt_cpu(tp, cpu_base);
3728 if (!lock_err)
3729 tg3_nvram_unlock(tp);
3730 if (err)
3731 goto out;
3733 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3734 write_op(tp, cpu_scratch_base + i, 0);
3735 tw32(cpu_base + CPU_STATE, 0xffffffff);
3736 tw32(cpu_base + CPU_MODE,
3737 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3738 } else {
3739 /* Subtract additional main header for fragmented firmware and
3740 * advance to the first fragment
3742 total_len -= TG3_FW_HDR_LEN;
3743 fw_hdr++;
3746 do {
3747 u32 *fw_data = (u32 *)(fw_hdr + 1);
3748 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3749 write_op(tp, cpu_scratch_base +
3750 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3751 (i * sizeof(u32)),
3752 be32_to_cpu(fw_data[i]));
3754 total_len -= be32_to_cpu(fw_hdr->len);
3756 /* Advance to next fragment */
3757 fw_hdr = (struct tg3_firmware_hdr *)
3758 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3759 } while (total_len > 0);
3761 err = 0;
3763 out:
3764 return err;
3767 /* tp->lock is held. */
3768 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3770 int i;
3771 const int iters = 5;
3773 tw32(cpu_base + CPU_STATE, 0xffffffff);
3774 tw32_f(cpu_base + CPU_PC, pc);
3776 for (i = 0; i < iters; i++) {
3777 if (tr32(cpu_base + CPU_PC) == pc)
3778 break;
3779 tw32(cpu_base + CPU_STATE, 0xffffffff);
3780 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3781 tw32_f(cpu_base + CPU_PC, pc);
3782 udelay(1000);
3785 return (i == iters) ? -EBUSY : 0;
3788 /* tp->lock is held. */
3789 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3791 const struct tg3_firmware_hdr *fw_hdr;
3792 int err;
3794 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3796 /* Firmware blob starts with version numbers, followed by
3797 start address and length. We are setting complete length.
3798 length = end_address_of_bss - start_address_of_text.
3799 Remainder is the blob to be loaded contiguously
3800 from start address. */
3802 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3803 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3804 fw_hdr);
3805 if (err)
3806 return err;
3808 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3809 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3810 fw_hdr);
3811 if (err)
3812 return err;
3814 /* Now startup only the RX cpu. */
3815 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3816 be32_to_cpu(fw_hdr->base_addr));
3817 if (err) {
3818 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3819 "should be %08x\n", __func__,
3820 tr32(RX_CPU_BASE + CPU_PC),
3821 be32_to_cpu(fw_hdr->base_addr));
3822 return -ENODEV;
3825 tg3_rxcpu_resume(tp);
3827 return 0;
3830 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3832 const int iters = 1000;
3833 int i;
3834 u32 val;
3836 /* Wait for boot code to complete initialization and enter service
3837 * loop. It is then safe to download service patches
3839 for (i = 0; i < iters; i++) {
3840 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3841 break;
3843 udelay(10);
3846 if (i == iters) {
3847 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3848 return -EBUSY;
3851 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3852 if (val & 0xff) {
3853 netdev_warn(tp->dev,
3854 "Other patches exist. Not downloading EEE patch\n");
3855 return -EEXIST;
3858 return 0;
3861 /* tp->lock is held. */
3862 static void tg3_load_57766_firmware(struct tg3 *tp)
3864 struct tg3_firmware_hdr *fw_hdr;
3866 if (!tg3_flag(tp, NO_NVRAM))
3867 return;
3869 if (tg3_validate_rxcpu_state(tp))
3870 return;
3872 if (!tp->fw)
3873 return;
3875 /* This firmware blob has a different format than older firmware
3876 * releases as given below. The main difference is we have fragmented
3877 * data to be written to non-contiguous locations.
3879 * In the beginning we have a firmware header identical to other
3880 * firmware which consists of version, base addr and length. The length
3881 * here is unused and set to 0xffffffff.
3883 * This is followed by a series of firmware fragments which are
3884 * individually identical to previous firmware. i.e. they have the
3885 * firmware header and followed by data for that fragment. The version
3886 * field of the individual fragment header is unused.
3889 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3890 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3891 return;
3893 if (tg3_rxcpu_pause(tp))
3894 return;
3896 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3897 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3899 tg3_rxcpu_resume(tp);
3902 /* tp->lock is held. */
3903 static int tg3_load_tso_firmware(struct tg3 *tp)
3905 const struct tg3_firmware_hdr *fw_hdr;
3906 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3907 int err;
3909 if (!tg3_flag(tp, FW_TSO))
3910 return 0;
3912 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3914 /* Firmware blob starts with version numbers, followed by
3915 start address and length. We are setting complete length.
3916 length = end_address_of_bss - start_address_of_text.
3917 Remainder is the blob to be loaded contiguously
3918 from start address. */
3920 cpu_scratch_size = tp->fw_len;
3922 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3923 cpu_base = RX_CPU_BASE;
3924 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3925 } else {
3926 cpu_base = TX_CPU_BASE;
3927 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3928 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3931 err = tg3_load_firmware_cpu(tp, cpu_base,
3932 cpu_scratch_base, cpu_scratch_size,
3933 fw_hdr);
3934 if (err)
3935 return err;
3937 /* Now startup the cpu. */
3938 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3939 be32_to_cpu(fw_hdr->base_addr));
3940 if (err) {
3941 netdev_err(tp->dev,
3942 "%s fails to set CPU PC, is %08x should be %08x\n",
3943 __func__, tr32(cpu_base + CPU_PC),
3944 be32_to_cpu(fw_hdr->base_addr));
3945 return -ENODEV;
3948 tg3_resume_cpu(tp, cpu_base);
3949 return 0;
3952 /* tp->lock is held. */
3953 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3955 u32 addr_high, addr_low;
3957 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3958 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3959 (mac_addr[4] << 8) | mac_addr[5]);
3961 if (index < 4) {
3962 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3963 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3964 } else {
3965 index -= 4;
3966 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3967 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3971 /* tp->lock is held. */
3972 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3974 u32 addr_high;
3975 int i;
3977 for (i = 0; i < 4; i++) {
3978 if (i == 1 && skip_mac_1)
3979 continue;
3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3983 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3984 tg3_asic_rev(tp) == ASIC_REV_5704) {
3985 for (i = 4; i < 16; i++)
3986 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3989 addr_high = (tp->dev->dev_addr[0] +
3990 tp->dev->dev_addr[1] +
3991 tp->dev->dev_addr[2] +
3992 tp->dev->dev_addr[3] +
3993 tp->dev->dev_addr[4] +
3994 tp->dev->dev_addr[5]) &
3995 TX_BACKOFF_SEED_MASK;
3996 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3999 static void tg3_enable_register_access(struct tg3 *tp)
4002 * Make sure register accesses (indirect or otherwise) will function
4003 * correctly.
4005 pci_write_config_dword(tp->pdev,
4006 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4009 static int tg3_power_up(struct tg3 *tp)
4011 int err;
4013 tg3_enable_register_access(tp);
4015 err = pci_set_power_state(tp->pdev, PCI_D0);
4016 if (!err) {
4017 /* Switch out of Vaux if it is a NIC */
4018 tg3_pwrsrc_switch_to_vmain(tp);
4019 } else {
4020 netdev_err(tp->dev, "Transition to D0 failed\n");
4023 return err;
4026 static int tg3_setup_phy(struct tg3 *, bool);
4028 static int tg3_power_down_prepare(struct tg3 *tp)
4030 u32 misc_host_ctrl;
4031 bool device_should_wake, do_low_power;
4033 tg3_enable_register_access(tp);
4035 /* Restore the CLKREQ setting. */
4036 if (tg3_flag(tp, CLKREQ_BUG))
4037 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4038 PCI_EXP_LNKCTL_CLKREQ_EN);
4040 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4041 tw32(TG3PCI_MISC_HOST_CTRL,
4042 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4044 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4045 tg3_flag(tp, WOL_ENABLE);
4047 if (tg3_flag(tp, USE_PHYLIB)) {
4048 do_low_power = false;
4049 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4050 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4051 struct phy_device *phydev;
4052 u32 phyid, advertising;
4054 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4056 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4058 tp->link_config.speed = phydev->speed;
4059 tp->link_config.duplex = phydev->duplex;
4060 tp->link_config.autoneg = phydev->autoneg;
4061 tp->link_config.advertising = phydev->advertising;
4063 advertising = ADVERTISED_TP |
4064 ADVERTISED_Pause |
4065 ADVERTISED_Autoneg |
4066 ADVERTISED_10baseT_Half;
4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069 if (tg3_flag(tp, WOL_SPEED_100MB))
4070 advertising |=
4071 ADVERTISED_100baseT_Half |
4072 ADVERTISED_100baseT_Full |
4073 ADVERTISED_10baseT_Full;
4074 else
4075 advertising |= ADVERTISED_10baseT_Full;
4078 phydev->advertising = advertising;
4080 phy_start_aneg(phydev);
4082 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4083 if (phyid != PHY_ID_BCMAC131) {
4084 phyid &= PHY_BCM_OUI_MASK;
4085 if (phyid == PHY_BCM_OUI_1 ||
4086 phyid == PHY_BCM_OUI_2 ||
4087 phyid == PHY_BCM_OUI_3)
4088 do_low_power = true;
4091 } else {
4092 do_low_power = true;
4094 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4095 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4097 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4098 tg3_setup_phy(tp, false);
4101 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4102 u32 val;
4104 val = tr32(GRC_VCPU_EXT_CTRL);
4105 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4106 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4107 int i;
4108 u32 val;
4110 for (i = 0; i < 200; i++) {
4111 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4112 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4113 break;
4114 msleep(1);
4117 if (tg3_flag(tp, WOL_CAP))
4118 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4119 WOL_DRV_STATE_SHUTDOWN |
4120 WOL_DRV_WOL |
4121 WOL_SET_MAGIC_PKT);
4123 if (device_should_wake) {
4124 u32 mac_mode;
4126 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4127 if (do_low_power &&
4128 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4129 tg3_phy_auxctl_write(tp,
4130 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4131 MII_TG3_AUXCTL_PCTL_WOL_EN |
4132 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4133 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4134 udelay(40);
4137 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4138 mac_mode = MAC_MODE_PORT_MODE_GMII;
4139 else if (tp->phy_flags &
4140 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4141 if (tp->link_config.active_speed == SPEED_1000)
4142 mac_mode = MAC_MODE_PORT_MODE_GMII;
4143 else
4144 mac_mode = MAC_MODE_PORT_MODE_MII;
4145 } else
4146 mac_mode = MAC_MODE_PORT_MODE_MII;
4148 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4149 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4150 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4151 SPEED_100 : SPEED_10;
4152 if (tg3_5700_link_polarity(tp, speed))
4153 mac_mode |= MAC_MODE_LINK_POLARITY;
4154 else
4155 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4157 } else {
4158 mac_mode = MAC_MODE_PORT_MODE_TBI;
4161 if (!tg3_flag(tp, 5750_PLUS))
4162 tw32(MAC_LED_CTRL, tp->led_ctrl);
4164 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4165 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4166 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4167 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4169 if (tg3_flag(tp, ENABLE_APE))
4170 mac_mode |= MAC_MODE_APE_TX_EN |
4171 MAC_MODE_APE_RX_EN |
4172 MAC_MODE_TDE_ENABLE;
4174 tw32_f(MAC_MODE, mac_mode);
4175 udelay(100);
4177 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4178 udelay(10);
4181 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4182 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4183 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4184 u32 base_val;
4186 base_val = tp->pci_clock_ctrl;
4187 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4188 CLOCK_CTRL_TXCLK_DISABLE);
4190 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4191 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4192 } else if (tg3_flag(tp, 5780_CLASS) ||
4193 tg3_flag(tp, CPMU_PRESENT) ||
4194 tg3_asic_rev(tp) == ASIC_REV_5906) {
4195 /* do nothing */
4196 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4197 u32 newbits1, newbits2;
4199 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4200 tg3_asic_rev(tp) == ASIC_REV_5701) {
4201 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4202 CLOCK_CTRL_TXCLK_DISABLE |
4203 CLOCK_CTRL_ALTCLK);
4204 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4205 } else if (tg3_flag(tp, 5705_PLUS)) {
4206 newbits1 = CLOCK_CTRL_625_CORE;
4207 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4208 } else {
4209 newbits1 = CLOCK_CTRL_ALTCLK;
4210 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4213 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4214 40);
4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4217 40);
4219 if (!tg3_flag(tp, 5705_PLUS)) {
4220 u32 newbits3;
4222 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4223 tg3_asic_rev(tp) == ASIC_REV_5701) {
4224 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4225 CLOCK_CTRL_TXCLK_DISABLE |
4226 CLOCK_CTRL_44MHZ_CORE);
4227 } else {
4228 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4231 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4232 tp->pci_clock_ctrl | newbits3, 40);
4236 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4237 tg3_power_down_phy(tp, do_low_power);
4239 tg3_frob_aux_power(tp, true);
4241 /* Workaround for unstable PLL clock */
4242 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4243 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4244 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4245 u32 val = tr32(0x7d00);
4247 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4248 tw32(0x7d00, val);
4249 if (!tg3_flag(tp, ENABLE_ASF)) {
4250 int err;
4252 err = tg3_nvram_lock(tp);
4253 tg3_halt_cpu(tp, RX_CPU_BASE);
4254 if (!err)
4255 tg3_nvram_unlock(tp);
4259 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4261 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4263 return 0;
4266 static void tg3_power_down(struct tg3 *tp)
4268 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4269 pci_set_power_state(tp->pdev, PCI_D3hot);
4272 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4274 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4275 case MII_TG3_AUX_STAT_10HALF:
4276 *speed = SPEED_10;
4277 *duplex = DUPLEX_HALF;
4278 break;
4280 case MII_TG3_AUX_STAT_10FULL:
4281 *speed = SPEED_10;
4282 *duplex = DUPLEX_FULL;
4283 break;
4285 case MII_TG3_AUX_STAT_100HALF:
4286 *speed = SPEED_100;
4287 *duplex = DUPLEX_HALF;
4288 break;
4290 case MII_TG3_AUX_STAT_100FULL:
4291 *speed = SPEED_100;
4292 *duplex = DUPLEX_FULL;
4293 break;
4295 case MII_TG3_AUX_STAT_1000HALF:
4296 *speed = SPEED_1000;
4297 *duplex = DUPLEX_HALF;
4298 break;
4300 case MII_TG3_AUX_STAT_1000FULL:
4301 *speed = SPEED_1000;
4302 *duplex = DUPLEX_FULL;
4303 break;
4305 default:
4306 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4307 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4308 SPEED_10;
4309 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4310 DUPLEX_HALF;
4311 break;
4313 *speed = SPEED_UNKNOWN;
4314 *duplex = DUPLEX_UNKNOWN;
4315 break;
4319 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4321 int err = 0;
4322 u32 val, new_adv;
4324 new_adv = ADVERTISE_CSMA;
4325 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4326 new_adv |= mii_advertise_flowctrl(flowctrl);
4328 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4329 if (err)
4330 goto done;
4332 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4333 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4335 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4336 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4337 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4339 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4340 if (err)
4341 goto done;
4344 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4345 goto done;
4347 tw32(TG3_CPMU_EEE_MODE,
4348 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4350 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4351 if (!err) {
4352 u32 err2;
4354 val = 0;
4355 /* Advertise 100-BaseTX EEE ability */
4356 if (advertise & ADVERTISED_100baseT_Full)
4357 val |= MDIO_AN_EEE_ADV_100TX;
4358 /* Advertise 1000-BaseT EEE ability */
4359 if (advertise & ADVERTISED_1000baseT_Full)
4360 val |= MDIO_AN_EEE_ADV_1000T;
4362 if (!tp->eee.eee_enabled) {
4363 val = 0;
4364 tp->eee.advertised = 0;
4365 } else {
4366 tp->eee.advertised = advertise &
4367 (ADVERTISED_100baseT_Full |
4368 ADVERTISED_1000baseT_Full);
4371 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4372 if (err)
4373 val = 0;
4375 switch (tg3_asic_rev(tp)) {
4376 case ASIC_REV_5717:
4377 case ASIC_REV_57765:
4378 case ASIC_REV_57766:
4379 case ASIC_REV_5719:
4380 /* If we advertised any eee advertisements above... */
4381 if (val)
4382 val = MII_TG3_DSP_TAP26_ALNOKO |
4383 MII_TG3_DSP_TAP26_RMRXSTO |
4384 MII_TG3_DSP_TAP26_OPCSINPT;
4385 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4386 /* Fall through */
4387 case ASIC_REV_5720:
4388 case ASIC_REV_5762:
4389 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4390 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4391 MII_TG3_DSP_CH34TP2_HIBW01);
4394 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4395 if (!err)
4396 err = err2;
4399 done:
4400 return err;
4403 static void tg3_phy_copper_begin(struct tg3 *tp)
4405 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4406 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4407 u32 adv, fc;
4409 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4410 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4411 adv = ADVERTISED_10baseT_Half |
4412 ADVERTISED_10baseT_Full;
4413 if (tg3_flag(tp, WOL_SPEED_100MB))
4414 adv |= ADVERTISED_100baseT_Half |
4415 ADVERTISED_100baseT_Full;
4416 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4417 if (!(tp->phy_flags &
4418 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4419 adv |= ADVERTISED_1000baseT_Half;
4420 adv |= ADVERTISED_1000baseT_Full;
4423 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4424 } else {
4425 adv = tp->link_config.advertising;
4426 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4427 adv &= ~(ADVERTISED_1000baseT_Half |
4428 ADVERTISED_1000baseT_Full);
4430 fc = tp->link_config.flowctrl;
4433 tg3_phy_autoneg_cfg(tp, adv, fc);
4435 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4436 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4437 /* Normally during power down we want to autonegotiate
4438 * the lowest possible speed for WOL. However, to avoid
4439 * link flap, we leave it untouched.
4441 return;
4444 tg3_writephy(tp, MII_BMCR,
4445 BMCR_ANENABLE | BMCR_ANRESTART);
4446 } else {
4447 int i;
4448 u32 bmcr, orig_bmcr;
4450 tp->link_config.active_speed = tp->link_config.speed;
4451 tp->link_config.active_duplex = tp->link_config.duplex;
4453 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4454 /* With autoneg disabled, 5715 only links up when the
4455 * advertisement register has the configured speed
4456 * enabled.
4458 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4461 bmcr = 0;
4462 switch (tp->link_config.speed) {
4463 default:
4464 case SPEED_10:
4465 break;
4467 case SPEED_100:
4468 bmcr |= BMCR_SPEED100;
4469 break;
4471 case SPEED_1000:
4472 bmcr |= BMCR_SPEED1000;
4473 break;
4476 if (tp->link_config.duplex == DUPLEX_FULL)
4477 bmcr |= BMCR_FULLDPLX;
4479 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4480 (bmcr != orig_bmcr)) {
4481 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4482 for (i = 0; i < 1500; i++) {
4483 u32 tmp;
4485 udelay(10);
4486 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4487 tg3_readphy(tp, MII_BMSR, &tmp))
4488 continue;
4489 if (!(tmp & BMSR_LSTATUS)) {
4490 udelay(40);
4491 break;
4494 tg3_writephy(tp, MII_BMCR, bmcr);
4495 udelay(40);
4500 static int tg3_phy_pull_config(struct tg3 *tp)
4502 int err;
4503 u32 val;
4505 err = tg3_readphy(tp, MII_BMCR, &val);
4506 if (err)
4507 goto done;
4509 if (!(val & BMCR_ANENABLE)) {
4510 tp->link_config.autoneg = AUTONEG_DISABLE;
4511 tp->link_config.advertising = 0;
4512 tg3_flag_clear(tp, PAUSE_AUTONEG);
4514 err = -EIO;
4516 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4517 case 0:
4518 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4519 goto done;
4521 tp->link_config.speed = SPEED_10;
4522 break;
4523 case BMCR_SPEED100:
4524 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4525 goto done;
4527 tp->link_config.speed = SPEED_100;
4528 break;
4529 case BMCR_SPEED1000:
4530 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4531 tp->link_config.speed = SPEED_1000;
4532 break;
4534 /* Fall through */
4535 default:
4536 goto done;
4539 if (val & BMCR_FULLDPLX)
4540 tp->link_config.duplex = DUPLEX_FULL;
4541 else
4542 tp->link_config.duplex = DUPLEX_HALF;
4544 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4546 err = 0;
4547 goto done;
4550 tp->link_config.autoneg = AUTONEG_ENABLE;
4551 tp->link_config.advertising = ADVERTISED_Autoneg;
4552 tg3_flag_set(tp, PAUSE_AUTONEG);
4554 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4555 u32 adv;
4557 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4558 if (err)
4559 goto done;
4561 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4562 tp->link_config.advertising |= adv | ADVERTISED_TP;
4564 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4565 } else {
4566 tp->link_config.advertising |= ADVERTISED_FIBRE;
4569 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4570 u32 adv;
4572 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4573 err = tg3_readphy(tp, MII_CTRL1000, &val);
4574 if (err)
4575 goto done;
4577 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4578 } else {
4579 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4580 if (err)
4581 goto done;
4583 adv = tg3_decode_flowctrl_1000X(val);
4584 tp->link_config.flowctrl = adv;
4586 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4587 adv = mii_adv_to_ethtool_adv_x(val);
4590 tp->link_config.advertising |= adv;
4593 done:
4594 return err;
4597 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4599 int err;
4601 /* Turn off tap power management. */
4602 /* Set Extended packet length bit */
4603 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4605 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4606 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4607 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4608 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4609 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4611 udelay(40);
4613 return err;
4616 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4618 struct ethtool_eee eee;
4620 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4621 return true;
4623 tg3_eee_pull_config(tp, &eee);
4625 if (tp->eee.eee_enabled) {
4626 if (tp->eee.advertised != eee.advertised ||
4627 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4628 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4629 return false;
4630 } else {
4631 /* EEE is disabled but we're advertising */
4632 if (eee.advertised)
4633 return false;
4636 return true;
4639 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4641 u32 advmsk, tgtadv, advertising;
4643 advertising = tp->link_config.advertising;
4644 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4646 advmsk = ADVERTISE_ALL;
4647 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4648 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4649 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4652 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4653 return false;
4655 if ((*lcladv & advmsk) != tgtadv)
4656 return false;
4658 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4659 u32 tg3_ctrl;
4661 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4663 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4664 return false;
4666 if (tgtadv &&
4667 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4668 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4669 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4670 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4671 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4672 } else {
4673 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4676 if (tg3_ctrl != tgtadv)
4677 return false;
4680 return true;
4683 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4685 u32 lpeth = 0;
4687 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4688 u32 val;
4690 if (tg3_readphy(tp, MII_STAT1000, &val))
4691 return false;
4693 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4696 if (tg3_readphy(tp, MII_LPA, rmtadv))
4697 return false;
4699 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4700 tp->link_config.rmt_adv = lpeth;
4702 return true;
4705 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4707 if (curr_link_up != tp->link_up) {
4708 if (curr_link_up) {
4709 netif_carrier_on(tp->dev);
4710 } else {
4711 netif_carrier_off(tp->dev);
4712 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4713 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4716 tg3_link_report(tp);
4717 return true;
4720 return false;
4723 static void tg3_clear_mac_status(struct tg3 *tp)
4725 tw32(MAC_EVENT, 0);
4727 tw32_f(MAC_STATUS,
4728 MAC_STATUS_SYNC_CHANGED |
4729 MAC_STATUS_CFG_CHANGED |
4730 MAC_STATUS_MI_COMPLETION |
4731 MAC_STATUS_LNKSTATE_CHANGED);
4732 udelay(40);
4735 static void tg3_setup_eee(struct tg3 *tp)
4737 u32 val;
4739 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4740 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4741 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4742 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4744 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4746 tw32_f(TG3_CPMU_EEE_CTRL,
4747 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4749 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4750 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4751 TG3_CPMU_EEEMD_LPI_IN_RX |
4752 TG3_CPMU_EEEMD_EEE_ENABLE;
4754 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4755 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4757 if (tg3_flag(tp, ENABLE_APE))
4758 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4760 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4762 tw32_f(TG3_CPMU_EEE_DBTMR1,
4763 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4764 (tp->eee.tx_lpi_timer & 0xffff));
4766 tw32_f(TG3_CPMU_EEE_DBTMR2,
4767 TG3_CPMU_DBTMR2_APE_TX_2047US |
4768 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4771 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4773 bool current_link_up;
4774 u32 bmsr, val;
4775 u32 lcl_adv, rmt_adv;
4776 u16 current_speed;
4777 u8 current_duplex;
4778 int i, err;
4780 tg3_clear_mac_status(tp);
4782 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4783 tw32_f(MAC_MI_MODE,
4784 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4785 udelay(80);
4788 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4790 /* Some third-party PHYs need to be reset on link going
4791 * down.
4793 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4794 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4795 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4796 tp->link_up) {
4797 tg3_readphy(tp, MII_BMSR, &bmsr);
4798 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4799 !(bmsr & BMSR_LSTATUS))
4800 force_reset = true;
4802 if (force_reset)
4803 tg3_phy_reset(tp);
4805 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4806 tg3_readphy(tp, MII_BMSR, &bmsr);
4807 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4808 !tg3_flag(tp, INIT_COMPLETE))
4809 bmsr = 0;
4811 if (!(bmsr & BMSR_LSTATUS)) {
4812 err = tg3_init_5401phy_dsp(tp);
4813 if (err)
4814 return err;
4816 tg3_readphy(tp, MII_BMSR, &bmsr);
4817 for (i = 0; i < 1000; i++) {
4818 udelay(10);
4819 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4820 (bmsr & BMSR_LSTATUS)) {
4821 udelay(40);
4822 break;
4826 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4827 TG3_PHY_REV_BCM5401_B0 &&
4828 !(bmsr & BMSR_LSTATUS) &&
4829 tp->link_config.active_speed == SPEED_1000) {
4830 err = tg3_phy_reset(tp);
4831 if (!err)
4832 err = tg3_init_5401phy_dsp(tp);
4833 if (err)
4834 return err;
4837 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4838 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4839 /* 5701 {A0,B0} CRC bug workaround */
4840 tg3_writephy(tp, 0x15, 0x0a75);
4841 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4842 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4843 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4846 /* Clear pending interrupts... */
4847 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4848 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4850 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4851 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4852 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4853 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4855 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4856 tg3_asic_rev(tp) == ASIC_REV_5701) {
4857 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4858 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4859 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4860 else
4861 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4864 current_link_up = false;
4865 current_speed = SPEED_UNKNOWN;
4866 current_duplex = DUPLEX_UNKNOWN;
4867 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4868 tp->link_config.rmt_adv = 0;
4870 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4871 err = tg3_phy_auxctl_read(tp,
4872 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4873 &val);
4874 if (!err && !(val & (1 << 10))) {
4875 tg3_phy_auxctl_write(tp,
4876 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4877 val | (1 << 10));
4878 goto relink;
4882 bmsr = 0;
4883 for (i = 0; i < 100; i++) {
4884 tg3_readphy(tp, MII_BMSR, &bmsr);
4885 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4886 (bmsr & BMSR_LSTATUS))
4887 break;
4888 udelay(40);
4891 if (bmsr & BMSR_LSTATUS) {
4892 u32 aux_stat, bmcr;
4894 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4895 for (i = 0; i < 2000; i++) {
4896 udelay(10);
4897 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4898 aux_stat)
4899 break;
4902 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4903 &current_speed,
4904 &current_duplex);
4906 bmcr = 0;
4907 for (i = 0; i < 200; i++) {
4908 tg3_readphy(tp, MII_BMCR, &bmcr);
4909 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4910 continue;
4911 if (bmcr && bmcr != 0x7fff)
4912 break;
4913 udelay(10);
4916 lcl_adv = 0;
4917 rmt_adv = 0;
4919 tp->link_config.active_speed = current_speed;
4920 tp->link_config.active_duplex = current_duplex;
4922 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4923 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4925 if ((bmcr & BMCR_ANENABLE) &&
4926 eee_config_ok &&
4927 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4928 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4929 current_link_up = true;
4931 /* EEE settings changes take effect only after a phy
4932 * reset. If we have skipped a reset due to Link Flap
4933 * Avoidance being enabled, do it now.
4935 if (!eee_config_ok &&
4936 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4937 !force_reset) {
4938 tg3_setup_eee(tp);
4939 tg3_phy_reset(tp);
4941 } else {
4942 if (!(bmcr & BMCR_ANENABLE) &&
4943 tp->link_config.speed == current_speed &&
4944 tp->link_config.duplex == current_duplex) {
4945 current_link_up = true;
4949 if (current_link_up &&
4950 tp->link_config.active_duplex == DUPLEX_FULL) {
4951 u32 reg, bit;
4953 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4954 reg = MII_TG3_FET_GEN_STAT;
4955 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4956 } else {
4957 reg = MII_TG3_EXT_STAT;
4958 bit = MII_TG3_EXT_STAT_MDIX;
4961 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4962 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4964 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4968 relink:
4969 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4970 tg3_phy_copper_begin(tp);
4972 if (tg3_flag(tp, ROBOSWITCH)) {
4973 current_link_up = true;
4974 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4975 current_speed = SPEED_1000;
4976 current_duplex = DUPLEX_FULL;
4977 tp->link_config.active_speed = current_speed;
4978 tp->link_config.active_duplex = current_duplex;
4981 tg3_readphy(tp, MII_BMSR, &bmsr);
4982 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4983 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4984 current_link_up = true;
4987 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4988 if (current_link_up) {
4989 if (tp->link_config.active_speed == SPEED_100 ||
4990 tp->link_config.active_speed == SPEED_10)
4991 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4992 else
4993 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4994 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4995 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4996 else
4997 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4999 /* In order for the 5750 core in BCM4785 chip to work properly
5000 * in RGMII mode, the Led Control Register must be set up.
5002 if (tg3_flag(tp, RGMII_MODE)) {
5003 u32 led_ctrl = tr32(MAC_LED_CTRL);
5004 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5006 if (tp->link_config.active_speed == SPEED_10)
5007 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5008 else if (tp->link_config.active_speed == SPEED_100)
5009 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5010 LED_CTRL_100MBPS_ON);
5011 else if (tp->link_config.active_speed == SPEED_1000)
5012 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5013 LED_CTRL_1000MBPS_ON);
5015 tw32(MAC_LED_CTRL, led_ctrl);
5016 udelay(40);
5019 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5020 if (tp->link_config.active_duplex == DUPLEX_HALF)
5021 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5023 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5024 if (current_link_up &&
5025 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5026 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5027 else
5028 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5031 /* ??? Without this setting Netgear GA302T PHY does not
5032 * ??? send/receive packets...
5034 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5035 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5036 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5037 tw32_f(MAC_MI_MODE, tp->mi_mode);
5038 udelay(80);
5041 tw32_f(MAC_MODE, tp->mac_mode);
5042 udelay(40);
5044 tg3_phy_eee_adjust(tp, current_link_up);
5046 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5047 /* Polled via timer. */
5048 tw32_f(MAC_EVENT, 0);
5049 } else {
5050 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5052 udelay(40);
5054 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5055 current_link_up &&
5056 tp->link_config.active_speed == SPEED_1000 &&
5057 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5058 udelay(120);
5059 tw32_f(MAC_STATUS,
5060 (MAC_STATUS_SYNC_CHANGED |
5061 MAC_STATUS_CFG_CHANGED));
5062 udelay(40);
5063 tg3_write_mem(tp,
5064 NIC_SRAM_FIRMWARE_MBOX,
5065 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5068 /* Prevent send BD corruption. */
5069 if (tg3_flag(tp, CLKREQ_BUG)) {
5070 if (tp->link_config.active_speed == SPEED_100 ||
5071 tp->link_config.active_speed == SPEED_10)
5072 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5073 PCI_EXP_LNKCTL_CLKREQ_EN);
5074 else
5075 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5076 PCI_EXP_LNKCTL_CLKREQ_EN);
5079 tg3_test_and_report_link_chg(tp, current_link_up);
5081 return 0;
5084 struct tg3_fiber_aneginfo {
5085 int state;
5086 #define ANEG_STATE_UNKNOWN 0
5087 #define ANEG_STATE_AN_ENABLE 1
5088 #define ANEG_STATE_RESTART_INIT 2
5089 #define ANEG_STATE_RESTART 3
5090 #define ANEG_STATE_DISABLE_LINK_OK 4
5091 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5092 #define ANEG_STATE_ABILITY_DETECT 6
5093 #define ANEG_STATE_ACK_DETECT_INIT 7
5094 #define ANEG_STATE_ACK_DETECT 8
5095 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5096 #define ANEG_STATE_COMPLETE_ACK 10
5097 #define ANEG_STATE_IDLE_DETECT_INIT 11
5098 #define ANEG_STATE_IDLE_DETECT 12
5099 #define ANEG_STATE_LINK_OK 13
5100 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5101 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5103 u32 flags;
5104 #define MR_AN_ENABLE 0x00000001
5105 #define MR_RESTART_AN 0x00000002
5106 #define MR_AN_COMPLETE 0x00000004
5107 #define MR_PAGE_RX 0x00000008
5108 #define MR_NP_LOADED 0x00000010
5109 #define MR_TOGGLE_TX 0x00000020
5110 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5111 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5112 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5113 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5114 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5115 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5116 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5117 #define MR_TOGGLE_RX 0x00002000
5118 #define MR_NP_RX 0x00004000
5120 #define MR_LINK_OK 0x80000000
5122 unsigned long link_time, cur_time;
5124 u32 ability_match_cfg;
5125 int ability_match_count;
5127 char ability_match, idle_match, ack_match;
5129 u32 txconfig, rxconfig;
5130 #define ANEG_CFG_NP 0x00000080
5131 #define ANEG_CFG_ACK 0x00000040
5132 #define ANEG_CFG_RF2 0x00000020
5133 #define ANEG_CFG_RF1 0x00000010
5134 #define ANEG_CFG_PS2 0x00000001
5135 #define ANEG_CFG_PS1 0x00008000
5136 #define ANEG_CFG_HD 0x00004000
5137 #define ANEG_CFG_FD 0x00002000
5138 #define ANEG_CFG_INVAL 0x00001f06
5141 #define ANEG_OK 0
5142 #define ANEG_DONE 1
5143 #define ANEG_TIMER_ENAB 2
5144 #define ANEG_FAILED -1
5146 #define ANEG_STATE_SETTLE_TIME 10000
5148 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5149 struct tg3_fiber_aneginfo *ap)
5151 u16 flowctrl;
5152 unsigned long delta;
5153 u32 rx_cfg_reg;
5154 int ret;
5156 if (ap->state == ANEG_STATE_UNKNOWN) {
5157 ap->rxconfig = 0;
5158 ap->link_time = 0;
5159 ap->cur_time = 0;
5160 ap->ability_match_cfg = 0;
5161 ap->ability_match_count = 0;
5162 ap->ability_match = 0;
5163 ap->idle_match = 0;
5164 ap->ack_match = 0;
5166 ap->cur_time++;
5168 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5169 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5171 if (rx_cfg_reg != ap->ability_match_cfg) {
5172 ap->ability_match_cfg = rx_cfg_reg;
5173 ap->ability_match = 0;
5174 ap->ability_match_count = 0;
5175 } else {
5176 if (++ap->ability_match_count > 1) {
5177 ap->ability_match = 1;
5178 ap->ability_match_cfg = rx_cfg_reg;
5181 if (rx_cfg_reg & ANEG_CFG_ACK)
5182 ap->ack_match = 1;
5183 else
5184 ap->ack_match = 0;
5186 ap->idle_match = 0;
5187 } else {
5188 ap->idle_match = 1;
5189 ap->ability_match_cfg = 0;
5190 ap->ability_match_count = 0;
5191 ap->ability_match = 0;
5192 ap->ack_match = 0;
5194 rx_cfg_reg = 0;
5197 ap->rxconfig = rx_cfg_reg;
5198 ret = ANEG_OK;
5200 switch (ap->state) {
5201 case ANEG_STATE_UNKNOWN:
5202 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5203 ap->state = ANEG_STATE_AN_ENABLE;
5205 /* fallthru */
5206 case ANEG_STATE_AN_ENABLE:
5207 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5208 if (ap->flags & MR_AN_ENABLE) {
5209 ap->link_time = 0;
5210 ap->cur_time = 0;
5211 ap->ability_match_cfg = 0;
5212 ap->ability_match_count = 0;
5213 ap->ability_match = 0;
5214 ap->idle_match = 0;
5215 ap->ack_match = 0;
5217 ap->state = ANEG_STATE_RESTART_INIT;
5218 } else {
5219 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5221 break;
5223 case ANEG_STATE_RESTART_INIT:
5224 ap->link_time = ap->cur_time;
5225 ap->flags &= ~(MR_NP_LOADED);
5226 ap->txconfig = 0;
5227 tw32(MAC_TX_AUTO_NEG, 0);
5228 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5229 tw32_f(MAC_MODE, tp->mac_mode);
5230 udelay(40);
5232 ret = ANEG_TIMER_ENAB;
5233 ap->state = ANEG_STATE_RESTART;
5235 /* fallthru */
5236 case ANEG_STATE_RESTART:
5237 delta = ap->cur_time - ap->link_time;
5238 if (delta > ANEG_STATE_SETTLE_TIME)
5239 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5240 else
5241 ret = ANEG_TIMER_ENAB;
5242 break;
5244 case ANEG_STATE_DISABLE_LINK_OK:
5245 ret = ANEG_DONE;
5246 break;
5248 case ANEG_STATE_ABILITY_DETECT_INIT:
5249 ap->flags &= ~(MR_TOGGLE_TX);
5250 ap->txconfig = ANEG_CFG_FD;
5251 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5252 if (flowctrl & ADVERTISE_1000XPAUSE)
5253 ap->txconfig |= ANEG_CFG_PS1;
5254 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5255 ap->txconfig |= ANEG_CFG_PS2;
5256 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5257 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5258 tw32_f(MAC_MODE, tp->mac_mode);
5259 udelay(40);
5261 ap->state = ANEG_STATE_ABILITY_DETECT;
5262 break;
5264 case ANEG_STATE_ABILITY_DETECT:
5265 if (ap->ability_match != 0 && ap->rxconfig != 0)
5266 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5267 break;
5269 case ANEG_STATE_ACK_DETECT_INIT:
5270 ap->txconfig |= ANEG_CFG_ACK;
5271 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5272 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5273 tw32_f(MAC_MODE, tp->mac_mode);
5274 udelay(40);
5276 ap->state = ANEG_STATE_ACK_DETECT;
5278 /* fallthru */
5279 case ANEG_STATE_ACK_DETECT:
5280 if (ap->ack_match != 0) {
5281 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5282 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5283 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5284 } else {
5285 ap->state = ANEG_STATE_AN_ENABLE;
5287 } else if (ap->ability_match != 0 &&
5288 ap->rxconfig == 0) {
5289 ap->state = ANEG_STATE_AN_ENABLE;
5291 break;
5293 case ANEG_STATE_COMPLETE_ACK_INIT:
5294 if (ap->rxconfig & ANEG_CFG_INVAL) {
5295 ret = ANEG_FAILED;
5296 break;
5298 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5299 MR_LP_ADV_HALF_DUPLEX |
5300 MR_LP_ADV_SYM_PAUSE |
5301 MR_LP_ADV_ASYM_PAUSE |
5302 MR_LP_ADV_REMOTE_FAULT1 |
5303 MR_LP_ADV_REMOTE_FAULT2 |
5304 MR_LP_ADV_NEXT_PAGE |
5305 MR_TOGGLE_RX |
5306 MR_NP_RX);
5307 if (ap->rxconfig & ANEG_CFG_FD)
5308 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5309 if (ap->rxconfig & ANEG_CFG_HD)
5310 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5311 if (ap->rxconfig & ANEG_CFG_PS1)
5312 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5313 if (ap->rxconfig & ANEG_CFG_PS2)
5314 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5315 if (ap->rxconfig & ANEG_CFG_RF1)
5316 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5317 if (ap->rxconfig & ANEG_CFG_RF2)
5318 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5319 if (ap->rxconfig & ANEG_CFG_NP)
5320 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5322 ap->link_time = ap->cur_time;
5324 ap->flags ^= (MR_TOGGLE_TX);
5325 if (ap->rxconfig & 0x0008)
5326 ap->flags |= MR_TOGGLE_RX;
5327 if (ap->rxconfig & ANEG_CFG_NP)
5328 ap->flags |= MR_NP_RX;
5329 ap->flags |= MR_PAGE_RX;
5331 ap->state = ANEG_STATE_COMPLETE_ACK;
5332 ret = ANEG_TIMER_ENAB;
5333 break;
5335 case ANEG_STATE_COMPLETE_ACK:
5336 if (ap->ability_match != 0 &&
5337 ap->rxconfig == 0) {
5338 ap->state = ANEG_STATE_AN_ENABLE;
5339 break;
5341 delta = ap->cur_time - ap->link_time;
5342 if (delta > ANEG_STATE_SETTLE_TIME) {
5343 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5344 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5345 } else {
5346 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5347 !(ap->flags & MR_NP_RX)) {
5348 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5349 } else {
5350 ret = ANEG_FAILED;
5354 break;
5356 case ANEG_STATE_IDLE_DETECT_INIT:
5357 ap->link_time = ap->cur_time;
5358 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5359 tw32_f(MAC_MODE, tp->mac_mode);
5360 udelay(40);
5362 ap->state = ANEG_STATE_IDLE_DETECT;
5363 ret = ANEG_TIMER_ENAB;
5364 break;
5366 case ANEG_STATE_IDLE_DETECT:
5367 if (ap->ability_match != 0 &&
5368 ap->rxconfig == 0) {
5369 ap->state = ANEG_STATE_AN_ENABLE;
5370 break;
5372 delta = ap->cur_time - ap->link_time;
5373 if (delta > ANEG_STATE_SETTLE_TIME) {
5374 /* XXX another gem from the Broadcom driver :( */
5375 ap->state = ANEG_STATE_LINK_OK;
5377 break;
5379 case ANEG_STATE_LINK_OK:
5380 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5381 ret = ANEG_DONE;
5382 break;
5384 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5385 /* ??? unimplemented */
5386 break;
5388 case ANEG_STATE_NEXT_PAGE_WAIT:
5389 /* ??? unimplemented */
5390 break;
5392 default:
5393 ret = ANEG_FAILED;
5394 break;
5397 return ret;
5400 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5402 int res = 0;
5403 struct tg3_fiber_aneginfo aninfo;
5404 int status = ANEG_FAILED;
5405 unsigned int tick;
5406 u32 tmp;
5408 tw32_f(MAC_TX_AUTO_NEG, 0);
5410 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5411 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5412 udelay(40);
5414 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5415 udelay(40);
5417 memset(&aninfo, 0, sizeof(aninfo));
5418 aninfo.flags |= MR_AN_ENABLE;
5419 aninfo.state = ANEG_STATE_UNKNOWN;
5420 aninfo.cur_time = 0;
5421 tick = 0;
5422 while (++tick < 195000) {
5423 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5424 if (status == ANEG_DONE || status == ANEG_FAILED)
5425 break;
5427 udelay(1);
5430 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5431 tw32_f(MAC_MODE, tp->mac_mode);
5432 udelay(40);
5434 *txflags = aninfo.txconfig;
5435 *rxflags = aninfo.flags;
5437 if (status == ANEG_DONE &&
5438 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5439 MR_LP_ADV_FULL_DUPLEX)))
5440 res = 1;
5442 return res;
5445 static void tg3_init_bcm8002(struct tg3 *tp)
5447 u32 mac_status = tr32(MAC_STATUS);
5448 int i;
5450 /* Reset when initting first time or we have a link. */
5451 if (tg3_flag(tp, INIT_COMPLETE) &&
5452 !(mac_status & MAC_STATUS_PCS_SYNCED))
5453 return;
5455 /* Set PLL lock range. */
5456 tg3_writephy(tp, 0x16, 0x8007);
5458 /* SW reset */
5459 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5461 /* Wait for reset to complete. */
5462 /* XXX schedule_timeout() ... */
5463 for (i = 0; i < 500; i++)
5464 udelay(10);
5466 /* Config mode; select PMA/Ch 1 regs. */
5467 tg3_writephy(tp, 0x10, 0x8411);
5469 /* Enable auto-lock and comdet, select txclk for tx. */
5470 tg3_writephy(tp, 0x11, 0x0a10);
5472 tg3_writephy(tp, 0x18, 0x00a0);
5473 tg3_writephy(tp, 0x16, 0x41ff);
5475 /* Assert and deassert POR. */
5476 tg3_writephy(tp, 0x13, 0x0400);
5477 udelay(40);
5478 tg3_writephy(tp, 0x13, 0x0000);
5480 tg3_writephy(tp, 0x11, 0x0a50);
5481 udelay(40);
5482 tg3_writephy(tp, 0x11, 0x0a10);
5484 /* Wait for signal to stabilize */
5485 /* XXX schedule_timeout() ... */
5486 for (i = 0; i < 15000; i++)
5487 udelay(10);
5489 /* Deselect the channel register so we can read the PHYID
5490 * later.
5492 tg3_writephy(tp, 0x10, 0x8011);
5495 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5497 u16 flowctrl;
5498 bool current_link_up;
5499 u32 sg_dig_ctrl, sg_dig_status;
5500 u32 serdes_cfg, expected_sg_dig_ctrl;
5501 int workaround, port_a;
5503 serdes_cfg = 0;
5504 expected_sg_dig_ctrl = 0;
5505 workaround = 0;
5506 port_a = 1;
5507 current_link_up = false;
5509 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5510 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5511 workaround = 1;
5512 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5513 port_a = 0;
5515 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5516 /* preserve bits 20-23 for voltage regulator */
5517 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5520 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5522 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5523 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5524 if (workaround) {
5525 u32 val = serdes_cfg;
5527 if (port_a)
5528 val |= 0xc010000;
5529 else
5530 val |= 0x4010000;
5531 tw32_f(MAC_SERDES_CFG, val);
5534 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5536 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5537 tg3_setup_flow_control(tp, 0, 0);
5538 current_link_up = true;
5540 goto out;
5543 /* Want auto-negotiation. */
5544 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5546 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5547 if (flowctrl & ADVERTISE_1000XPAUSE)
5548 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5549 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5550 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5552 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5553 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5554 tp->serdes_counter &&
5555 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5556 MAC_STATUS_RCVD_CFG)) ==
5557 MAC_STATUS_PCS_SYNCED)) {
5558 tp->serdes_counter--;
5559 current_link_up = true;
5560 goto out;
5562 restart_autoneg:
5563 if (workaround)
5564 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5565 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5566 udelay(5);
5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5569 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5570 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5571 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5572 MAC_STATUS_SIGNAL_DET)) {
5573 sg_dig_status = tr32(SG_DIG_STATUS);
5574 mac_status = tr32(MAC_STATUS);
5576 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5577 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5578 u32 local_adv = 0, remote_adv = 0;
5580 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5581 local_adv |= ADVERTISE_1000XPAUSE;
5582 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5583 local_adv |= ADVERTISE_1000XPSE_ASYM;
5585 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5586 remote_adv |= LPA_1000XPAUSE;
5587 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5588 remote_adv |= LPA_1000XPAUSE_ASYM;
5590 tp->link_config.rmt_adv =
5591 mii_adv_to_ethtool_adv_x(remote_adv);
5593 tg3_setup_flow_control(tp, local_adv, remote_adv);
5594 current_link_up = true;
5595 tp->serdes_counter = 0;
5596 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5597 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5598 if (tp->serdes_counter)
5599 tp->serdes_counter--;
5600 else {
5601 if (workaround) {
5602 u32 val = serdes_cfg;
5604 if (port_a)
5605 val |= 0xc010000;
5606 else
5607 val |= 0x4010000;
5609 tw32_f(MAC_SERDES_CFG, val);
5612 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5613 udelay(40);
5615 /* Link parallel detection - link is up */
5616 /* only if we have PCS_SYNC and not */
5617 /* receiving config code words */
5618 mac_status = tr32(MAC_STATUS);
5619 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5620 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5621 tg3_setup_flow_control(tp, 0, 0);
5622 current_link_up = true;
5623 tp->phy_flags |=
5624 TG3_PHYFLG_PARALLEL_DETECT;
5625 tp->serdes_counter =
5626 SERDES_PARALLEL_DET_TIMEOUT;
5627 } else
5628 goto restart_autoneg;
5631 } else {
5632 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5633 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5636 out:
5637 return current_link_up;
5640 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5642 bool current_link_up = false;
5644 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5645 goto out;
5647 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5648 u32 txflags, rxflags;
5649 int i;
5651 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5652 u32 local_adv = 0, remote_adv = 0;
5654 if (txflags & ANEG_CFG_PS1)
5655 local_adv |= ADVERTISE_1000XPAUSE;
5656 if (txflags & ANEG_CFG_PS2)
5657 local_adv |= ADVERTISE_1000XPSE_ASYM;
5659 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5660 remote_adv |= LPA_1000XPAUSE;
5661 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5662 remote_adv |= LPA_1000XPAUSE_ASYM;
5664 tp->link_config.rmt_adv =
5665 mii_adv_to_ethtool_adv_x(remote_adv);
5667 tg3_setup_flow_control(tp, local_adv, remote_adv);
5669 current_link_up = true;
5671 for (i = 0; i < 30; i++) {
5672 udelay(20);
5673 tw32_f(MAC_STATUS,
5674 (MAC_STATUS_SYNC_CHANGED |
5675 MAC_STATUS_CFG_CHANGED));
5676 udelay(40);
5677 if ((tr32(MAC_STATUS) &
5678 (MAC_STATUS_SYNC_CHANGED |
5679 MAC_STATUS_CFG_CHANGED)) == 0)
5680 break;
5683 mac_status = tr32(MAC_STATUS);
5684 if (!current_link_up &&
5685 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5686 !(mac_status & MAC_STATUS_RCVD_CFG))
5687 current_link_up = true;
5688 } else {
5689 tg3_setup_flow_control(tp, 0, 0);
5691 /* Forcing 1000FD link up. */
5692 current_link_up = true;
5694 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5695 udelay(40);
5697 tw32_f(MAC_MODE, tp->mac_mode);
5698 udelay(40);
5701 out:
5702 return current_link_up;
5705 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5707 u32 orig_pause_cfg;
5708 u16 orig_active_speed;
5709 u8 orig_active_duplex;
5710 u32 mac_status;
5711 bool current_link_up;
5712 int i;
5714 orig_pause_cfg = tp->link_config.active_flowctrl;
5715 orig_active_speed = tp->link_config.active_speed;
5716 orig_active_duplex = tp->link_config.active_duplex;
5718 if (!tg3_flag(tp, HW_AUTONEG) &&
5719 tp->link_up &&
5720 tg3_flag(tp, INIT_COMPLETE)) {
5721 mac_status = tr32(MAC_STATUS);
5722 mac_status &= (MAC_STATUS_PCS_SYNCED |
5723 MAC_STATUS_SIGNAL_DET |
5724 MAC_STATUS_CFG_CHANGED |
5725 MAC_STATUS_RCVD_CFG);
5726 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5727 MAC_STATUS_SIGNAL_DET)) {
5728 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5729 MAC_STATUS_CFG_CHANGED));
5730 return 0;
5734 tw32_f(MAC_TX_AUTO_NEG, 0);
5736 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5737 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5738 tw32_f(MAC_MODE, tp->mac_mode);
5739 udelay(40);
5741 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5742 tg3_init_bcm8002(tp);
5744 /* Enable link change event even when serdes polling. */
5745 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5746 udelay(40);
5748 current_link_up = false;
5749 tp->link_config.rmt_adv = 0;
5750 mac_status = tr32(MAC_STATUS);
5752 if (tg3_flag(tp, HW_AUTONEG))
5753 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5754 else
5755 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5757 tp->napi[0].hw_status->status =
5758 (SD_STATUS_UPDATED |
5759 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5761 for (i = 0; i < 100; i++) {
5762 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5763 MAC_STATUS_CFG_CHANGED));
5764 udelay(5);
5765 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5766 MAC_STATUS_CFG_CHANGED |
5767 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5768 break;
5771 mac_status = tr32(MAC_STATUS);
5772 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5773 current_link_up = false;
5774 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5775 tp->serdes_counter == 0) {
5776 tw32_f(MAC_MODE, (tp->mac_mode |
5777 MAC_MODE_SEND_CONFIGS));
5778 udelay(1);
5779 tw32_f(MAC_MODE, tp->mac_mode);
5783 if (current_link_up) {
5784 tp->link_config.active_speed = SPEED_1000;
5785 tp->link_config.active_duplex = DUPLEX_FULL;
5786 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5787 LED_CTRL_LNKLED_OVERRIDE |
5788 LED_CTRL_1000MBPS_ON));
5789 } else {
5790 tp->link_config.active_speed = SPEED_UNKNOWN;
5791 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5792 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5793 LED_CTRL_LNKLED_OVERRIDE |
5794 LED_CTRL_TRAFFIC_OVERRIDE));
5797 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5798 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5799 if (orig_pause_cfg != now_pause_cfg ||
5800 orig_active_speed != tp->link_config.active_speed ||
5801 orig_active_duplex != tp->link_config.active_duplex)
5802 tg3_link_report(tp);
5805 return 0;
5808 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5810 int err = 0;
5811 u32 bmsr, bmcr;
5812 u16 current_speed = SPEED_UNKNOWN;
5813 u8 current_duplex = DUPLEX_UNKNOWN;
5814 bool current_link_up = false;
5815 u32 local_adv, remote_adv, sgsr;
5817 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5818 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5819 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5820 (sgsr & SERDES_TG3_SGMII_MODE)) {
5822 if (force_reset)
5823 tg3_phy_reset(tp);
5825 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5827 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5828 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5829 } else {
5830 current_link_up = true;
5831 if (sgsr & SERDES_TG3_SPEED_1000) {
5832 current_speed = SPEED_1000;
5833 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5834 } else if (sgsr & SERDES_TG3_SPEED_100) {
5835 current_speed = SPEED_100;
5836 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5837 } else {
5838 current_speed = SPEED_10;
5839 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5842 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5843 current_duplex = DUPLEX_FULL;
5844 else
5845 current_duplex = DUPLEX_HALF;
5848 tw32_f(MAC_MODE, tp->mac_mode);
5849 udelay(40);
5851 tg3_clear_mac_status(tp);
5853 goto fiber_setup_done;
5856 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5857 tw32_f(MAC_MODE, tp->mac_mode);
5858 udelay(40);
5860 tg3_clear_mac_status(tp);
5862 if (force_reset)
5863 tg3_phy_reset(tp);
5865 tp->link_config.rmt_adv = 0;
5867 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5870 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5871 bmsr |= BMSR_LSTATUS;
5872 else
5873 bmsr &= ~BMSR_LSTATUS;
5876 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5878 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5879 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5880 /* do nothing, just check for link up at the end */
5881 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5882 u32 adv, newadv;
5884 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5885 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5886 ADVERTISE_1000XPAUSE |
5887 ADVERTISE_1000XPSE_ASYM |
5888 ADVERTISE_SLCT);
5890 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5891 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5893 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5894 tg3_writephy(tp, MII_ADVERTISE, newadv);
5895 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5896 tg3_writephy(tp, MII_BMCR, bmcr);
5898 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5899 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5900 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5902 return err;
5904 } else {
5905 u32 new_bmcr;
5907 bmcr &= ~BMCR_SPEED1000;
5908 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5910 if (tp->link_config.duplex == DUPLEX_FULL)
5911 new_bmcr |= BMCR_FULLDPLX;
5913 if (new_bmcr != bmcr) {
5914 /* BMCR_SPEED1000 is a reserved bit that needs
5915 * to be set on write.
5917 new_bmcr |= BMCR_SPEED1000;
5919 /* Force a linkdown */
5920 if (tp->link_up) {
5921 u32 adv;
5923 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5924 adv &= ~(ADVERTISE_1000XFULL |
5925 ADVERTISE_1000XHALF |
5926 ADVERTISE_SLCT);
5927 tg3_writephy(tp, MII_ADVERTISE, adv);
5928 tg3_writephy(tp, MII_BMCR, bmcr |
5929 BMCR_ANRESTART |
5930 BMCR_ANENABLE);
5931 udelay(10);
5932 tg3_carrier_off(tp);
5934 tg3_writephy(tp, MII_BMCR, new_bmcr);
5935 bmcr = new_bmcr;
5936 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5939 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5940 bmsr |= BMSR_LSTATUS;
5941 else
5942 bmsr &= ~BMSR_LSTATUS;
5944 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5948 if (bmsr & BMSR_LSTATUS) {
5949 current_speed = SPEED_1000;
5950 current_link_up = true;
5951 if (bmcr & BMCR_FULLDPLX)
5952 current_duplex = DUPLEX_FULL;
5953 else
5954 current_duplex = DUPLEX_HALF;
5956 local_adv = 0;
5957 remote_adv = 0;
5959 if (bmcr & BMCR_ANENABLE) {
5960 u32 common;
5962 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5963 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5964 common = local_adv & remote_adv;
5965 if (common & (ADVERTISE_1000XHALF |
5966 ADVERTISE_1000XFULL)) {
5967 if (common & ADVERTISE_1000XFULL)
5968 current_duplex = DUPLEX_FULL;
5969 else
5970 current_duplex = DUPLEX_HALF;
5972 tp->link_config.rmt_adv =
5973 mii_adv_to_ethtool_adv_x(remote_adv);
5974 } else if (!tg3_flag(tp, 5780_CLASS)) {
5975 /* Link is up via parallel detect */
5976 } else {
5977 current_link_up = false;
5982 fiber_setup_done:
5983 if (current_link_up && current_duplex == DUPLEX_FULL)
5984 tg3_setup_flow_control(tp, local_adv, remote_adv);
5986 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5987 if (tp->link_config.active_duplex == DUPLEX_HALF)
5988 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5990 tw32_f(MAC_MODE, tp->mac_mode);
5991 udelay(40);
5993 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5995 tp->link_config.active_speed = current_speed;
5996 tp->link_config.active_duplex = current_duplex;
5998 tg3_test_and_report_link_chg(tp, current_link_up);
5999 return err;
6002 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6004 if (tp->serdes_counter) {
6005 /* Give autoneg time to complete. */
6006 tp->serdes_counter--;
6007 return;
6010 if (!tp->link_up &&
6011 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6012 u32 bmcr;
6014 tg3_readphy(tp, MII_BMCR, &bmcr);
6015 if (bmcr & BMCR_ANENABLE) {
6016 u32 phy1, phy2;
6018 /* Select shadow register 0x1f */
6019 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6020 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6022 /* Select expansion interrupt status register */
6023 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6024 MII_TG3_DSP_EXP1_INT_STAT);
6025 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6029 /* We have signal detect and not receiving
6030 * config code words, link is up by parallel
6031 * detection.
6034 bmcr &= ~BMCR_ANENABLE;
6035 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6036 tg3_writephy(tp, MII_BMCR, bmcr);
6037 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6040 } else if (tp->link_up &&
6041 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6042 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6043 u32 phy2;
6045 /* Select expansion interrupt status register */
6046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6047 MII_TG3_DSP_EXP1_INT_STAT);
6048 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6049 if (phy2 & 0x20) {
6050 u32 bmcr;
6052 /* Config code words received, turn on autoneg. */
6053 tg3_readphy(tp, MII_BMCR, &bmcr);
6054 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6056 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6062 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6064 u32 val;
6065 int err;
6067 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6068 err = tg3_setup_fiber_phy(tp, force_reset);
6069 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6070 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6071 else
6072 err = tg3_setup_copper_phy(tp, force_reset);
6074 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6075 u32 scale;
6077 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6078 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6079 scale = 65;
6080 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6081 scale = 6;
6082 else
6083 scale = 12;
6085 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6086 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6087 tw32(GRC_MISC_CFG, val);
6090 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6091 (6 << TX_LENGTHS_IPG_SHIFT);
6092 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6093 tg3_asic_rev(tp) == ASIC_REV_5762)
6094 val |= tr32(MAC_TX_LENGTHS) &
6095 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6096 TX_LENGTHS_CNT_DWN_VAL_MSK);
6098 if (tp->link_config.active_speed == SPEED_1000 &&
6099 tp->link_config.active_duplex == DUPLEX_HALF)
6100 tw32(MAC_TX_LENGTHS, val |
6101 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6102 else
6103 tw32(MAC_TX_LENGTHS, val |
6104 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6106 if (!tg3_flag(tp, 5705_PLUS)) {
6107 if (tp->link_up) {
6108 tw32(HOSTCC_STAT_COAL_TICKS,
6109 tp->coal.stats_block_coalesce_usecs);
6110 } else {
6111 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6115 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6116 val = tr32(PCIE_PWR_MGMT_THRESH);
6117 if (!tp->link_up)
6118 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6119 tp->pwrmgmt_thresh;
6120 else
6121 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6122 tw32(PCIE_PWR_MGMT_THRESH, val);
6125 return err;
6128 /* tp->lock must be held */
6129 static u64 tg3_refclk_read(struct tg3 *tp)
6131 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6132 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6135 /* tp->lock must be held */
6136 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6138 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6140 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6141 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6142 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6143 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6146 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6147 static inline void tg3_full_unlock(struct tg3 *tp);
6148 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6150 struct tg3 *tp = netdev_priv(dev);
6152 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6153 SOF_TIMESTAMPING_RX_SOFTWARE |
6154 SOF_TIMESTAMPING_SOFTWARE;
6156 if (tg3_flag(tp, PTP_CAPABLE)) {
6157 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6158 SOF_TIMESTAMPING_RX_HARDWARE |
6159 SOF_TIMESTAMPING_RAW_HARDWARE;
6162 if (tp->ptp_clock)
6163 info->phc_index = ptp_clock_index(tp->ptp_clock);
6164 else
6165 info->phc_index = -1;
6167 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6169 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6170 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6171 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6172 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6173 return 0;
6176 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6178 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6179 bool neg_adj = false;
6180 u32 correction = 0;
6182 if (ppb < 0) {
6183 neg_adj = true;
6184 ppb = -ppb;
6187 /* Frequency adjustment is performed using hardware with a 24 bit
6188 * accumulator and a programmable correction value. On each clk, the
6189 * correction value gets added to the accumulator and when it
6190 * overflows, the time counter is incremented/decremented.
6192 * So conversion from ppb to correction value is
6193 * ppb * (1 << 24) / 1000000000
6195 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6196 TG3_EAV_REF_CLK_CORRECT_MASK;
6198 tg3_full_lock(tp, 0);
6200 if (correction)
6201 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6202 TG3_EAV_REF_CLK_CORRECT_EN |
6203 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6204 else
6205 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6207 tg3_full_unlock(tp);
6209 return 0;
6212 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6214 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6216 tg3_full_lock(tp, 0);
6217 tp->ptp_adjust += delta;
6218 tg3_full_unlock(tp);
6220 return 0;
6223 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6225 u64 ns;
6226 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228 tg3_full_lock(tp, 0);
6229 ns = tg3_refclk_read(tp);
6230 ns += tp->ptp_adjust;
6231 tg3_full_unlock(tp);
6233 *ts = ns_to_timespec64(ns);
6235 return 0;
6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6239 const struct timespec64 *ts)
6241 u64 ns;
6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6244 ns = timespec64_to_ns(ts);
6246 tg3_full_lock(tp, 0);
6247 tg3_refclk_write(tp, ns);
6248 tp->ptp_adjust = 0;
6249 tg3_full_unlock(tp);
6251 return 0;
6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6255 struct ptp_clock_request *rq, int on)
6257 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6258 u32 clock_ctl;
6259 int rval = 0;
6261 switch (rq->type) {
6262 case PTP_CLK_REQ_PEROUT:
6263 if (rq->perout.index != 0)
6264 return -EINVAL;
6266 tg3_full_lock(tp, 0);
6267 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6268 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6270 if (on) {
6271 u64 nsec;
6273 nsec = rq->perout.start.sec * 1000000000ULL +
6274 rq->perout.start.nsec;
6276 if (rq->perout.period.sec || rq->perout.period.nsec) {
6277 netdev_warn(tp->dev,
6278 "Device supports only a one-shot timesync output, period must be 0\n");
6279 rval = -EINVAL;
6280 goto err_out;
6283 if (nsec & (1ULL << 63)) {
6284 netdev_warn(tp->dev,
6285 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6286 rval = -EINVAL;
6287 goto err_out;
6290 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6291 tw32(TG3_EAV_WATCHDOG0_MSB,
6292 TG3_EAV_WATCHDOG0_EN |
6293 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6295 tw32(TG3_EAV_REF_CLCK_CTL,
6296 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6297 } else {
6298 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6299 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6302 err_out:
6303 tg3_full_unlock(tp);
6304 return rval;
6306 default:
6307 break;
6310 return -EOPNOTSUPP;
6313 static const struct ptp_clock_info tg3_ptp_caps = {
6314 .owner = THIS_MODULE,
6315 .name = "tg3 clock",
6316 .max_adj = 250000000,
6317 .n_alarm = 0,
6318 .n_ext_ts = 0,
6319 .n_per_out = 1,
6320 .n_pins = 0,
6321 .pps = 0,
6322 .adjfreq = tg3_ptp_adjfreq,
6323 .adjtime = tg3_ptp_adjtime,
6324 .gettime64 = tg3_ptp_gettime,
6325 .settime64 = tg3_ptp_settime,
6326 .enable = tg3_ptp_enable,
6329 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6330 struct skb_shared_hwtstamps *timestamp)
6332 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6333 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6334 tp->ptp_adjust);
6337 /* tp->lock must be held */
6338 static void tg3_ptp_init(struct tg3 *tp)
6340 if (!tg3_flag(tp, PTP_CAPABLE))
6341 return;
6343 /* Initialize the hardware clock to the system time. */
6344 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6345 tp->ptp_adjust = 0;
6346 tp->ptp_info = tg3_ptp_caps;
6349 /* tp->lock must be held */
6350 static void tg3_ptp_resume(struct tg3 *tp)
6352 if (!tg3_flag(tp, PTP_CAPABLE))
6353 return;
6355 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6356 tp->ptp_adjust = 0;
6359 static void tg3_ptp_fini(struct tg3 *tp)
6361 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6362 return;
6364 ptp_clock_unregister(tp->ptp_clock);
6365 tp->ptp_clock = NULL;
6366 tp->ptp_adjust = 0;
6369 static inline int tg3_irq_sync(struct tg3 *tp)
6371 return tp->irq_sync;
6374 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6376 int i;
6378 dst = (u32 *)((u8 *)dst + off);
6379 for (i = 0; i < len; i += sizeof(u32))
6380 *dst++ = tr32(off + i);
6383 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6385 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6386 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6387 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6388 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6389 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6390 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6391 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6392 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6393 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6394 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6395 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6396 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6397 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6398 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6399 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6400 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6401 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6402 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6403 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6405 if (tg3_flag(tp, SUPPORT_MSIX))
6406 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6408 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6409 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6410 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6411 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6412 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6413 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6414 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6415 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6417 if (!tg3_flag(tp, 5705_PLUS)) {
6418 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6419 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6420 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6423 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6424 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6425 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6426 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6427 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6429 if (tg3_flag(tp, NVRAM))
6430 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6433 static void tg3_dump_state(struct tg3 *tp)
6435 int i;
6436 u32 *regs;
6438 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6439 if (!regs)
6440 return;
6442 if (tg3_flag(tp, PCI_EXPRESS)) {
6443 /* Read up to but not including private PCI registers */
6444 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6445 regs[i / sizeof(u32)] = tr32(i);
6446 } else
6447 tg3_dump_legacy_regs(tp, regs);
6449 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6450 if (!regs[i + 0] && !regs[i + 1] &&
6451 !regs[i + 2] && !regs[i + 3])
6452 continue;
6454 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6455 i * 4,
6456 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6459 kfree(regs);
6461 for (i = 0; i < tp->irq_cnt; i++) {
6462 struct tg3_napi *tnapi = &tp->napi[i];
6464 /* SW status block */
6465 netdev_err(tp->dev,
6466 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6468 tnapi->hw_status->status,
6469 tnapi->hw_status->status_tag,
6470 tnapi->hw_status->rx_jumbo_consumer,
6471 tnapi->hw_status->rx_consumer,
6472 tnapi->hw_status->rx_mini_consumer,
6473 tnapi->hw_status->idx[0].rx_producer,
6474 tnapi->hw_status->idx[0].tx_consumer);
6476 netdev_err(tp->dev,
6477 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6479 tnapi->last_tag, tnapi->last_irq_tag,
6480 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6481 tnapi->rx_rcb_ptr,
6482 tnapi->prodring.rx_std_prod_idx,
6483 tnapi->prodring.rx_std_cons_idx,
6484 tnapi->prodring.rx_jmb_prod_idx,
6485 tnapi->prodring.rx_jmb_cons_idx);
6489 /* This is called whenever we suspect that the system chipset is re-
6490 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6491 * is bogus tx completions. We try to recover by setting the
6492 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6493 * in the workqueue.
6495 static void tg3_tx_recover(struct tg3 *tp)
6497 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6498 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6500 netdev_warn(tp->dev,
6501 "The system may be re-ordering memory-mapped I/O "
6502 "cycles to the network device, attempting to recover. "
6503 "Please report the problem to the driver maintainer "
6504 "and include system chipset information.\n");
6506 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6509 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6511 /* Tell compiler to fetch tx indices from memory. */
6512 barrier();
6513 return tnapi->tx_pending -
6514 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6517 /* Tigon3 never reports partial packet sends. So we do not
6518 * need special logic to handle SKBs that have not had all
6519 * of their frags sent yet, like SunGEM does.
6521 static void tg3_tx(struct tg3_napi *tnapi)
6523 struct tg3 *tp = tnapi->tp;
6524 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6525 u32 sw_idx = tnapi->tx_cons;
6526 struct netdev_queue *txq;
6527 int index = tnapi - tp->napi;
6528 unsigned int pkts_compl = 0, bytes_compl = 0;
6530 if (tg3_flag(tp, ENABLE_TSS))
6531 index--;
6533 txq = netdev_get_tx_queue(tp->dev, index);
6535 while (sw_idx != hw_idx) {
6536 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6537 struct sk_buff *skb = ri->skb;
6538 int i, tx_bug = 0;
6540 if (unlikely(skb == NULL)) {
6541 tg3_tx_recover(tp);
6542 return;
6545 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6546 struct skb_shared_hwtstamps timestamp;
6547 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6548 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6550 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6552 skb_tstamp_tx(skb, &timestamp);
6555 pci_unmap_single(tp->pdev,
6556 dma_unmap_addr(ri, mapping),
6557 skb_headlen(skb),
6558 PCI_DMA_TODEVICE);
6560 ri->skb = NULL;
6562 while (ri->fragmented) {
6563 ri->fragmented = false;
6564 sw_idx = NEXT_TX(sw_idx);
6565 ri = &tnapi->tx_buffers[sw_idx];
6568 sw_idx = NEXT_TX(sw_idx);
6570 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6571 ri = &tnapi->tx_buffers[sw_idx];
6572 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6573 tx_bug = 1;
6575 pci_unmap_page(tp->pdev,
6576 dma_unmap_addr(ri, mapping),
6577 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6578 PCI_DMA_TODEVICE);
6580 while (ri->fragmented) {
6581 ri->fragmented = false;
6582 sw_idx = NEXT_TX(sw_idx);
6583 ri = &tnapi->tx_buffers[sw_idx];
6586 sw_idx = NEXT_TX(sw_idx);
6589 pkts_compl++;
6590 bytes_compl += skb->len;
6592 dev_consume_skb_any(skb);
6594 if (unlikely(tx_bug)) {
6595 tg3_tx_recover(tp);
6596 return;
6600 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6602 tnapi->tx_cons = sw_idx;
6604 /* Need to make the tx_cons update visible to tg3_start_xmit()
6605 * before checking for netif_queue_stopped(). Without the
6606 * memory barrier, there is a small possibility that tg3_start_xmit()
6607 * will miss it and cause the queue to be stopped forever.
6609 smp_mb();
6611 if (unlikely(netif_tx_queue_stopped(txq) &&
6612 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6613 __netif_tx_lock(txq, smp_processor_id());
6614 if (netif_tx_queue_stopped(txq) &&
6615 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6616 netif_tx_wake_queue(txq);
6617 __netif_tx_unlock(txq);
6621 static void tg3_frag_free(bool is_frag, void *data)
6623 if (is_frag)
6624 skb_free_frag(data);
6625 else
6626 kfree(data);
6629 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6631 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6632 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6634 if (!ri->data)
6635 return;
6637 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6638 map_sz, PCI_DMA_FROMDEVICE);
6639 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6640 ri->data = NULL;
6644 /* Returns size of skb allocated or < 0 on error.
6646 * We only need to fill in the address because the other members
6647 * of the RX descriptor are invariant, see tg3_init_rings.
6649 * Note the purposeful assymetry of cpu vs. chip accesses. For
6650 * posting buffers we only dirty the first cache line of the RX
6651 * descriptor (containing the address). Whereas for the RX status
6652 * buffers the cpu only reads the last cacheline of the RX descriptor
6653 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6655 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6656 u32 opaque_key, u32 dest_idx_unmasked,
6657 unsigned int *frag_size)
6659 struct tg3_rx_buffer_desc *desc;
6660 struct ring_info *map;
6661 u8 *data;
6662 dma_addr_t mapping;
6663 int skb_size, data_size, dest_idx;
6665 switch (opaque_key) {
6666 case RXD_OPAQUE_RING_STD:
6667 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6668 desc = &tpr->rx_std[dest_idx];
6669 map = &tpr->rx_std_buffers[dest_idx];
6670 data_size = tp->rx_pkt_map_sz;
6671 break;
6673 case RXD_OPAQUE_RING_JUMBO:
6674 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6675 desc = &tpr->rx_jmb[dest_idx].std;
6676 map = &tpr->rx_jmb_buffers[dest_idx];
6677 data_size = TG3_RX_JMB_MAP_SZ;
6678 break;
6680 default:
6681 return -EINVAL;
6684 /* Do not overwrite any of the map or rp information
6685 * until we are sure we can commit to a new buffer.
6687 * Callers depend upon this behavior and assume that
6688 * we leave everything unchanged if we fail.
6690 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6691 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6692 if (skb_size <= PAGE_SIZE) {
6693 data = netdev_alloc_frag(skb_size);
6694 *frag_size = skb_size;
6695 } else {
6696 data = kmalloc(skb_size, GFP_ATOMIC);
6697 *frag_size = 0;
6699 if (!data)
6700 return -ENOMEM;
6702 mapping = pci_map_single(tp->pdev,
6703 data + TG3_RX_OFFSET(tp),
6704 data_size,
6705 PCI_DMA_FROMDEVICE);
6706 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6707 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6708 return -EIO;
6711 map->data = data;
6712 dma_unmap_addr_set(map, mapping, mapping);
6714 desc->addr_hi = ((u64)mapping >> 32);
6715 desc->addr_lo = ((u64)mapping & 0xffffffff);
6717 return data_size;
6720 /* We only need to move over in the address because the other
6721 * members of the RX descriptor are invariant. See notes above
6722 * tg3_alloc_rx_data for full details.
6724 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6725 struct tg3_rx_prodring_set *dpr,
6726 u32 opaque_key, int src_idx,
6727 u32 dest_idx_unmasked)
6729 struct tg3 *tp = tnapi->tp;
6730 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6731 struct ring_info *src_map, *dest_map;
6732 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6733 int dest_idx;
6735 switch (opaque_key) {
6736 case RXD_OPAQUE_RING_STD:
6737 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6738 dest_desc = &dpr->rx_std[dest_idx];
6739 dest_map = &dpr->rx_std_buffers[dest_idx];
6740 src_desc = &spr->rx_std[src_idx];
6741 src_map = &spr->rx_std_buffers[src_idx];
6742 break;
6744 case RXD_OPAQUE_RING_JUMBO:
6745 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6746 dest_desc = &dpr->rx_jmb[dest_idx].std;
6747 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6748 src_desc = &spr->rx_jmb[src_idx].std;
6749 src_map = &spr->rx_jmb_buffers[src_idx];
6750 break;
6752 default:
6753 return;
6756 dest_map->data = src_map->data;
6757 dma_unmap_addr_set(dest_map, mapping,
6758 dma_unmap_addr(src_map, mapping));
6759 dest_desc->addr_hi = src_desc->addr_hi;
6760 dest_desc->addr_lo = src_desc->addr_lo;
6762 /* Ensure that the update to the skb happens after the physical
6763 * addresses have been transferred to the new BD location.
6765 smp_wmb();
6767 src_map->data = NULL;
6770 /* The RX ring scheme is composed of multiple rings which post fresh
6771 * buffers to the chip, and one special ring the chip uses to report
6772 * status back to the host.
6774 * The special ring reports the status of received packets to the
6775 * host. The chip does not write into the original descriptor the
6776 * RX buffer was obtained from. The chip simply takes the original
6777 * descriptor as provided by the host, updates the status and length
6778 * field, then writes this into the next status ring entry.
6780 * Each ring the host uses to post buffers to the chip is described
6781 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6782 * it is first placed into the on-chip ram. When the packet's length
6783 * is known, it walks down the TG3_BDINFO entries to select the ring.
6784 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6785 * which is within the range of the new packet's length is chosen.
6787 * The "separate ring for rx status" scheme may sound queer, but it makes
6788 * sense from a cache coherency perspective. If only the host writes
6789 * to the buffer post rings, and only the chip writes to the rx status
6790 * rings, then cache lines never move beyond shared-modified state.
6791 * If both the host and chip were to write into the same ring, cache line
6792 * eviction could occur since both entities want it in an exclusive state.
6794 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6796 struct tg3 *tp = tnapi->tp;
6797 u32 work_mask, rx_std_posted = 0;
6798 u32 std_prod_idx, jmb_prod_idx;
6799 u32 sw_idx = tnapi->rx_rcb_ptr;
6800 u16 hw_idx;
6801 int received;
6802 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6804 hw_idx = *(tnapi->rx_rcb_prod_idx);
6806 * We need to order the read of hw_idx and the read of
6807 * the opaque cookie.
6809 rmb();
6810 work_mask = 0;
6811 received = 0;
6812 std_prod_idx = tpr->rx_std_prod_idx;
6813 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6814 while (sw_idx != hw_idx && budget > 0) {
6815 struct ring_info *ri;
6816 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6817 unsigned int len;
6818 struct sk_buff *skb;
6819 dma_addr_t dma_addr;
6820 u32 opaque_key, desc_idx, *post_ptr;
6821 u8 *data;
6822 u64 tstamp = 0;
6824 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6825 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6826 if (opaque_key == RXD_OPAQUE_RING_STD) {
6827 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6828 dma_addr = dma_unmap_addr(ri, mapping);
6829 data = ri->data;
6830 post_ptr = &std_prod_idx;
6831 rx_std_posted++;
6832 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6833 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6834 dma_addr = dma_unmap_addr(ri, mapping);
6835 data = ri->data;
6836 post_ptr = &jmb_prod_idx;
6837 } else
6838 goto next_pkt_nopost;
6840 work_mask |= opaque_key;
6842 if (desc->err_vlan & RXD_ERR_MASK) {
6843 drop_it:
6844 tg3_recycle_rx(tnapi, tpr, opaque_key,
6845 desc_idx, *post_ptr);
6846 drop_it_no_recycle:
6847 /* Other statistics kept track of by card. */
6848 tp->rx_dropped++;
6849 goto next_pkt;
6852 prefetch(data + TG3_RX_OFFSET(tp));
6853 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6854 ETH_FCS_LEN;
6856 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6857 RXD_FLAG_PTPSTAT_PTPV1 ||
6858 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6859 RXD_FLAG_PTPSTAT_PTPV2) {
6860 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6861 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6864 if (len > TG3_RX_COPY_THRESH(tp)) {
6865 int skb_size;
6866 unsigned int frag_size;
6868 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6869 *post_ptr, &frag_size);
6870 if (skb_size < 0)
6871 goto drop_it;
6873 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6874 PCI_DMA_FROMDEVICE);
6876 /* Ensure that the update to the data happens
6877 * after the usage of the old DMA mapping.
6879 smp_wmb();
6881 ri->data = NULL;
6883 skb = build_skb(data, frag_size);
6884 if (!skb) {
6885 tg3_frag_free(frag_size != 0, data);
6886 goto drop_it_no_recycle;
6888 skb_reserve(skb, TG3_RX_OFFSET(tp));
6889 } else {
6890 tg3_recycle_rx(tnapi, tpr, opaque_key,
6891 desc_idx, *post_ptr);
6893 skb = netdev_alloc_skb(tp->dev,
6894 len + TG3_RAW_IP_ALIGN);
6895 if (skb == NULL)
6896 goto drop_it_no_recycle;
6898 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6899 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6900 memcpy(skb->data,
6901 data + TG3_RX_OFFSET(tp),
6902 len);
6903 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6906 skb_put(skb, len);
6907 if (tstamp)
6908 tg3_hwclock_to_timestamp(tp, tstamp,
6909 skb_hwtstamps(skb));
6911 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6912 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6913 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6914 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6915 skb->ip_summed = CHECKSUM_UNNECESSARY;
6916 else
6917 skb_checksum_none_assert(skb);
6919 skb->protocol = eth_type_trans(skb, tp->dev);
6921 if (len > (tp->dev->mtu + ETH_HLEN) &&
6922 skb->protocol != htons(ETH_P_8021Q) &&
6923 skb->protocol != htons(ETH_P_8021AD)) {
6924 dev_kfree_skb_any(skb);
6925 goto drop_it_no_recycle;
6928 if (desc->type_flags & RXD_FLAG_VLAN &&
6929 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6930 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6931 desc->err_vlan & RXD_VLAN_MASK);
6933 napi_gro_receive(&tnapi->napi, skb);
6935 received++;
6936 budget--;
6938 next_pkt:
6939 (*post_ptr)++;
6941 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6942 tpr->rx_std_prod_idx = std_prod_idx &
6943 tp->rx_std_ring_mask;
6944 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6945 tpr->rx_std_prod_idx);
6946 work_mask &= ~RXD_OPAQUE_RING_STD;
6947 rx_std_posted = 0;
6949 next_pkt_nopost:
6950 sw_idx++;
6951 sw_idx &= tp->rx_ret_ring_mask;
6953 /* Refresh hw_idx to see if there is new work */
6954 if (sw_idx == hw_idx) {
6955 hw_idx = *(tnapi->rx_rcb_prod_idx);
6956 rmb();
6960 /* ACK the status ring. */
6961 tnapi->rx_rcb_ptr = sw_idx;
6962 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6964 /* Refill RX ring(s). */
6965 if (!tg3_flag(tp, ENABLE_RSS)) {
6966 /* Sync BD data before updating mailbox */
6967 wmb();
6969 if (work_mask & RXD_OPAQUE_RING_STD) {
6970 tpr->rx_std_prod_idx = std_prod_idx &
6971 tp->rx_std_ring_mask;
6972 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6973 tpr->rx_std_prod_idx);
6975 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6976 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6977 tp->rx_jmb_ring_mask;
6978 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6979 tpr->rx_jmb_prod_idx);
6981 mmiowb();
6982 } else if (work_mask) {
6983 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6984 * updated before the producer indices can be updated.
6986 smp_wmb();
6988 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6989 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6991 if (tnapi != &tp->napi[1]) {
6992 tp->rx_refill = true;
6993 napi_schedule(&tp->napi[1].napi);
6997 return received;
7000 static void tg3_poll_link(struct tg3 *tp)
7002 /* handle link change and other phy events */
7003 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7004 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7006 if (sblk->status & SD_STATUS_LINK_CHG) {
7007 sblk->status = SD_STATUS_UPDATED |
7008 (sblk->status & ~SD_STATUS_LINK_CHG);
7009 spin_lock(&tp->lock);
7010 if (tg3_flag(tp, USE_PHYLIB)) {
7011 tw32_f(MAC_STATUS,
7012 (MAC_STATUS_SYNC_CHANGED |
7013 MAC_STATUS_CFG_CHANGED |
7014 MAC_STATUS_MI_COMPLETION |
7015 MAC_STATUS_LNKSTATE_CHANGED));
7016 udelay(40);
7017 } else
7018 tg3_setup_phy(tp, false);
7019 spin_unlock(&tp->lock);
7024 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7025 struct tg3_rx_prodring_set *dpr,
7026 struct tg3_rx_prodring_set *spr)
7028 u32 si, di, cpycnt, src_prod_idx;
7029 int i, err = 0;
7031 while (1) {
7032 src_prod_idx = spr->rx_std_prod_idx;
7034 /* Make sure updates to the rx_std_buffers[] entries and the
7035 * standard producer index are seen in the correct order.
7037 smp_rmb();
7039 if (spr->rx_std_cons_idx == src_prod_idx)
7040 break;
7042 if (spr->rx_std_cons_idx < src_prod_idx)
7043 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7044 else
7045 cpycnt = tp->rx_std_ring_mask + 1 -
7046 spr->rx_std_cons_idx;
7048 cpycnt = min(cpycnt,
7049 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7051 si = spr->rx_std_cons_idx;
7052 di = dpr->rx_std_prod_idx;
7054 for (i = di; i < di + cpycnt; i++) {
7055 if (dpr->rx_std_buffers[i].data) {
7056 cpycnt = i - di;
7057 err = -ENOSPC;
7058 break;
7062 if (!cpycnt)
7063 break;
7065 /* Ensure that updates to the rx_std_buffers ring and the
7066 * shadowed hardware producer ring from tg3_recycle_skb() are
7067 * ordered correctly WRT the skb check above.
7069 smp_rmb();
7071 memcpy(&dpr->rx_std_buffers[di],
7072 &spr->rx_std_buffers[si],
7073 cpycnt * sizeof(struct ring_info));
7075 for (i = 0; i < cpycnt; i++, di++, si++) {
7076 struct tg3_rx_buffer_desc *sbd, *dbd;
7077 sbd = &spr->rx_std[si];
7078 dbd = &dpr->rx_std[di];
7079 dbd->addr_hi = sbd->addr_hi;
7080 dbd->addr_lo = sbd->addr_lo;
7083 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7084 tp->rx_std_ring_mask;
7085 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7086 tp->rx_std_ring_mask;
7089 while (1) {
7090 src_prod_idx = spr->rx_jmb_prod_idx;
7092 /* Make sure updates to the rx_jmb_buffers[] entries and
7093 * the jumbo producer index are seen in the correct order.
7095 smp_rmb();
7097 if (spr->rx_jmb_cons_idx == src_prod_idx)
7098 break;
7100 if (spr->rx_jmb_cons_idx < src_prod_idx)
7101 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7102 else
7103 cpycnt = tp->rx_jmb_ring_mask + 1 -
7104 spr->rx_jmb_cons_idx;
7106 cpycnt = min(cpycnt,
7107 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7109 si = spr->rx_jmb_cons_idx;
7110 di = dpr->rx_jmb_prod_idx;
7112 for (i = di; i < di + cpycnt; i++) {
7113 if (dpr->rx_jmb_buffers[i].data) {
7114 cpycnt = i - di;
7115 err = -ENOSPC;
7116 break;
7120 if (!cpycnt)
7121 break;
7123 /* Ensure that updates to the rx_jmb_buffers ring and the
7124 * shadowed hardware producer ring from tg3_recycle_skb() are
7125 * ordered correctly WRT the skb check above.
7127 smp_rmb();
7129 memcpy(&dpr->rx_jmb_buffers[di],
7130 &spr->rx_jmb_buffers[si],
7131 cpycnt * sizeof(struct ring_info));
7133 for (i = 0; i < cpycnt; i++, di++, si++) {
7134 struct tg3_rx_buffer_desc *sbd, *dbd;
7135 sbd = &spr->rx_jmb[si].std;
7136 dbd = &dpr->rx_jmb[di].std;
7137 dbd->addr_hi = sbd->addr_hi;
7138 dbd->addr_lo = sbd->addr_lo;
7141 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7142 tp->rx_jmb_ring_mask;
7143 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7144 tp->rx_jmb_ring_mask;
7147 return err;
7150 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7152 struct tg3 *tp = tnapi->tp;
7154 /* run TX completion thread */
7155 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7156 tg3_tx(tnapi);
7157 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7158 return work_done;
7161 if (!tnapi->rx_rcb_prod_idx)
7162 return work_done;
7164 /* run RX thread, within the bounds set by NAPI.
7165 * All RX "locking" is done by ensuring outside
7166 * code synchronizes with tg3->napi.poll()
7168 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7169 work_done += tg3_rx(tnapi, budget - work_done);
7171 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7172 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7173 int i, err = 0;
7174 u32 std_prod_idx = dpr->rx_std_prod_idx;
7175 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7177 tp->rx_refill = false;
7178 for (i = 1; i <= tp->rxq_cnt; i++)
7179 err |= tg3_rx_prodring_xfer(tp, dpr,
7180 &tp->napi[i].prodring);
7182 wmb();
7184 if (std_prod_idx != dpr->rx_std_prod_idx)
7185 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7186 dpr->rx_std_prod_idx);
7188 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7189 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7190 dpr->rx_jmb_prod_idx);
7192 mmiowb();
7194 if (err)
7195 tw32_f(HOSTCC_MODE, tp->coal_now);
7198 return work_done;
7201 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7203 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7204 schedule_work(&tp->reset_task);
7207 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7209 cancel_work_sync(&tp->reset_task);
7210 tg3_flag_clear(tp, RESET_TASK_PENDING);
7211 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7214 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7216 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7217 struct tg3 *tp = tnapi->tp;
7218 int work_done = 0;
7219 struct tg3_hw_status *sblk = tnapi->hw_status;
7221 while (1) {
7222 work_done = tg3_poll_work(tnapi, work_done, budget);
7224 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7225 goto tx_recovery;
7227 if (unlikely(work_done >= budget))
7228 break;
7230 /* tp->last_tag is used in tg3_int_reenable() below
7231 * to tell the hw how much work has been processed,
7232 * so we must read it before checking for more work.
7234 tnapi->last_tag = sblk->status_tag;
7235 tnapi->last_irq_tag = tnapi->last_tag;
7236 rmb();
7238 /* check for RX/TX work to do */
7239 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7240 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7242 /* This test here is not race free, but will reduce
7243 * the number of interrupts by looping again.
7245 if (tnapi == &tp->napi[1] && tp->rx_refill)
7246 continue;
7248 napi_complete_done(napi, work_done);
7249 /* Reenable interrupts. */
7250 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7252 /* This test here is synchronized by napi_schedule()
7253 * and napi_complete() to close the race condition.
7255 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7256 tw32(HOSTCC_MODE, tp->coalesce_mode |
7257 HOSTCC_MODE_ENABLE |
7258 tnapi->coal_now);
7260 mmiowb();
7261 break;
7265 return work_done;
7267 tx_recovery:
7268 /* work_done is guaranteed to be less than budget. */
7269 napi_complete(napi);
7270 tg3_reset_task_schedule(tp);
7271 return work_done;
7274 static void tg3_process_error(struct tg3 *tp)
7276 u32 val;
7277 bool real_error = false;
7279 if (tg3_flag(tp, ERROR_PROCESSED))
7280 return;
7282 /* Check Flow Attention register */
7283 val = tr32(HOSTCC_FLOW_ATTN);
7284 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7285 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7286 real_error = true;
7289 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7290 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7291 real_error = true;
7294 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7295 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7296 real_error = true;
7299 if (!real_error)
7300 return;
7302 tg3_dump_state(tp);
7304 tg3_flag_set(tp, ERROR_PROCESSED);
7305 tg3_reset_task_schedule(tp);
7308 static int tg3_poll(struct napi_struct *napi, int budget)
7310 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7311 struct tg3 *tp = tnapi->tp;
7312 int work_done = 0;
7313 struct tg3_hw_status *sblk = tnapi->hw_status;
7315 while (1) {
7316 if (sblk->status & SD_STATUS_ERROR)
7317 tg3_process_error(tp);
7319 tg3_poll_link(tp);
7321 work_done = tg3_poll_work(tnapi, work_done, budget);
7323 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7324 goto tx_recovery;
7326 if (unlikely(work_done >= budget))
7327 break;
7329 if (tg3_flag(tp, TAGGED_STATUS)) {
7330 /* tp->last_tag is used in tg3_int_reenable() below
7331 * to tell the hw how much work has been processed,
7332 * so we must read it before checking for more work.
7334 tnapi->last_tag = sblk->status_tag;
7335 tnapi->last_irq_tag = tnapi->last_tag;
7336 rmb();
7337 } else
7338 sblk->status &= ~SD_STATUS_UPDATED;
7340 if (likely(!tg3_has_work(tnapi))) {
7341 napi_complete_done(napi, work_done);
7342 tg3_int_reenable(tnapi);
7343 break;
7347 return work_done;
7349 tx_recovery:
7350 /* work_done is guaranteed to be less than budget. */
7351 napi_complete(napi);
7352 tg3_reset_task_schedule(tp);
7353 return work_done;
7356 static void tg3_napi_disable(struct tg3 *tp)
7358 int i;
7360 for (i = tp->irq_cnt - 1; i >= 0; i--)
7361 napi_disable(&tp->napi[i].napi);
7364 static void tg3_napi_enable(struct tg3 *tp)
7366 int i;
7368 for (i = 0; i < tp->irq_cnt; i++)
7369 napi_enable(&tp->napi[i].napi);
7372 static void tg3_napi_init(struct tg3 *tp)
7374 int i;
7376 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7377 for (i = 1; i < tp->irq_cnt; i++)
7378 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7381 static void tg3_napi_fini(struct tg3 *tp)
7383 int i;
7385 for (i = 0; i < tp->irq_cnt; i++)
7386 netif_napi_del(&tp->napi[i].napi);
7389 static inline void tg3_netif_stop(struct tg3 *tp)
7391 netif_trans_update(tp->dev); /* prevent tx timeout */
7392 tg3_napi_disable(tp);
7393 netif_carrier_off(tp->dev);
7394 netif_tx_disable(tp->dev);
7397 /* tp->lock must be held */
7398 static inline void tg3_netif_start(struct tg3 *tp)
7400 tg3_ptp_resume(tp);
7402 /* NOTE: unconditional netif_tx_wake_all_queues is only
7403 * appropriate so long as all callers are assured to
7404 * have free tx slots (such as after tg3_init_hw)
7406 netif_tx_wake_all_queues(tp->dev);
7408 if (tp->link_up)
7409 netif_carrier_on(tp->dev);
7411 tg3_napi_enable(tp);
7412 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7413 tg3_enable_ints(tp);
7416 static void tg3_irq_quiesce(struct tg3 *tp)
7417 __releases(tp->lock)
7418 __acquires(tp->lock)
7420 int i;
7422 BUG_ON(tp->irq_sync);
7424 tp->irq_sync = 1;
7425 smp_mb();
7427 spin_unlock_bh(&tp->lock);
7429 for (i = 0; i < tp->irq_cnt; i++)
7430 synchronize_irq(tp->napi[i].irq_vec);
7432 spin_lock_bh(&tp->lock);
7435 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7436 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7437 * with as well. Most of the time, this is not necessary except when
7438 * shutting down the device.
7440 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7442 spin_lock_bh(&tp->lock);
7443 if (irq_sync)
7444 tg3_irq_quiesce(tp);
7447 static inline void tg3_full_unlock(struct tg3 *tp)
7449 spin_unlock_bh(&tp->lock);
7452 /* One-shot MSI handler - Chip automatically disables interrupt
7453 * after sending MSI so driver doesn't have to do it.
7455 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7457 struct tg3_napi *tnapi = dev_id;
7458 struct tg3 *tp = tnapi->tp;
7460 prefetch(tnapi->hw_status);
7461 if (tnapi->rx_rcb)
7462 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7464 if (likely(!tg3_irq_sync(tp)))
7465 napi_schedule(&tnapi->napi);
7467 return IRQ_HANDLED;
7470 /* MSI ISR - No need to check for interrupt sharing and no need to
7471 * flush status block and interrupt mailbox. PCI ordering rules
7472 * guarantee that MSI will arrive after the status block.
7474 static irqreturn_t tg3_msi(int irq, void *dev_id)
7476 struct tg3_napi *tnapi = dev_id;
7477 struct tg3 *tp = tnapi->tp;
7479 prefetch(tnapi->hw_status);
7480 if (tnapi->rx_rcb)
7481 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7483 * Writing any value to intr-mbox-0 clears PCI INTA# and
7484 * chip-internal interrupt pending events.
7485 * Writing non-zero to intr-mbox-0 additional tells the
7486 * NIC to stop sending us irqs, engaging "in-intr-handler"
7487 * event coalescing.
7489 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7490 if (likely(!tg3_irq_sync(tp)))
7491 napi_schedule(&tnapi->napi);
7493 return IRQ_RETVAL(1);
7496 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7498 struct tg3_napi *tnapi = dev_id;
7499 struct tg3 *tp = tnapi->tp;
7500 struct tg3_hw_status *sblk = tnapi->hw_status;
7501 unsigned int handled = 1;
7503 /* In INTx mode, it is possible for the interrupt to arrive at
7504 * the CPU before the status block posted prior to the interrupt.
7505 * Reading the PCI State register will confirm whether the
7506 * interrupt is ours and will flush the status block.
7508 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7509 if (tg3_flag(tp, CHIP_RESETTING) ||
7510 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7511 handled = 0;
7512 goto out;
7517 * Writing any value to intr-mbox-0 clears PCI INTA# and
7518 * chip-internal interrupt pending events.
7519 * Writing non-zero to intr-mbox-0 additional tells the
7520 * NIC to stop sending us irqs, engaging "in-intr-handler"
7521 * event coalescing.
7523 * Flush the mailbox to de-assert the IRQ immediately to prevent
7524 * spurious interrupts. The flush impacts performance but
7525 * excessive spurious interrupts can be worse in some cases.
7527 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7528 if (tg3_irq_sync(tp))
7529 goto out;
7530 sblk->status &= ~SD_STATUS_UPDATED;
7531 if (likely(tg3_has_work(tnapi))) {
7532 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7533 napi_schedule(&tnapi->napi);
7534 } else {
7535 /* No work, shared interrupt perhaps? re-enable
7536 * interrupts, and flush that PCI write
7538 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7539 0x00000000);
7541 out:
7542 return IRQ_RETVAL(handled);
7545 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7547 struct tg3_napi *tnapi = dev_id;
7548 struct tg3 *tp = tnapi->tp;
7549 struct tg3_hw_status *sblk = tnapi->hw_status;
7550 unsigned int handled = 1;
7552 /* In INTx mode, it is possible for the interrupt to arrive at
7553 * the CPU before the status block posted prior to the interrupt.
7554 * Reading the PCI State register will confirm whether the
7555 * interrupt is ours and will flush the status block.
7557 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7558 if (tg3_flag(tp, CHIP_RESETTING) ||
7559 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7560 handled = 0;
7561 goto out;
7566 * writing any value to intr-mbox-0 clears PCI INTA# and
7567 * chip-internal interrupt pending events.
7568 * writing non-zero to intr-mbox-0 additional tells the
7569 * NIC to stop sending us irqs, engaging "in-intr-handler"
7570 * event coalescing.
7572 * Flush the mailbox to de-assert the IRQ immediately to prevent
7573 * spurious interrupts. The flush impacts performance but
7574 * excessive spurious interrupts can be worse in some cases.
7576 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7579 * In a shared interrupt configuration, sometimes other devices'
7580 * interrupts will scream. We record the current status tag here
7581 * so that the above check can report that the screaming interrupts
7582 * are unhandled. Eventually they will be silenced.
7584 tnapi->last_irq_tag = sblk->status_tag;
7586 if (tg3_irq_sync(tp))
7587 goto out;
7589 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7591 napi_schedule(&tnapi->napi);
7593 out:
7594 return IRQ_RETVAL(handled);
7597 /* ISR for interrupt test */
7598 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7600 struct tg3_napi *tnapi = dev_id;
7601 struct tg3 *tp = tnapi->tp;
7602 struct tg3_hw_status *sblk = tnapi->hw_status;
7604 if ((sblk->status & SD_STATUS_UPDATED) ||
7605 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7606 tg3_disable_ints(tp);
7607 return IRQ_RETVAL(1);
7609 return IRQ_RETVAL(0);
7612 #ifdef CONFIG_NET_POLL_CONTROLLER
7613 static void tg3_poll_controller(struct net_device *dev)
7615 int i;
7616 struct tg3 *tp = netdev_priv(dev);
7618 if (tg3_irq_sync(tp))
7619 return;
7621 for (i = 0; i < tp->irq_cnt; i++)
7622 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7624 #endif
7626 static void tg3_tx_timeout(struct net_device *dev)
7628 struct tg3 *tp = netdev_priv(dev);
7630 if (netif_msg_tx_err(tp)) {
7631 netdev_err(dev, "transmit timed out, resetting\n");
7632 tg3_dump_state(tp);
7635 tg3_reset_task_schedule(tp);
7638 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7639 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7641 u32 base = (u32) mapping & 0xffffffff;
7643 return base + len + 8 < base;
7646 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7647 * of any 4GB boundaries: 4G, 8G, etc
7649 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7650 u32 len, u32 mss)
7652 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7653 u32 base = (u32) mapping & 0xffffffff;
7655 return ((base + len + (mss & 0x3fff)) < base);
7657 return 0;
7660 /* Test for DMA addresses > 40-bit */
7661 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7662 int len)
7664 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7665 if (tg3_flag(tp, 40BIT_DMA_BUG))
7666 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7667 return 0;
7668 #else
7669 return 0;
7670 #endif
7673 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7674 dma_addr_t mapping, u32 len, u32 flags,
7675 u32 mss, u32 vlan)
7677 txbd->addr_hi = ((u64) mapping >> 32);
7678 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7679 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7680 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7683 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7684 dma_addr_t map, u32 len, u32 flags,
7685 u32 mss, u32 vlan)
7687 struct tg3 *tp = tnapi->tp;
7688 bool hwbug = false;
7690 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7691 hwbug = true;
7693 if (tg3_4g_overflow_test(map, len))
7694 hwbug = true;
7696 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7697 hwbug = true;
7699 if (tg3_40bit_overflow_test(tp, map, len))
7700 hwbug = true;
7702 if (tp->dma_limit) {
7703 u32 prvidx = *entry;
7704 u32 tmp_flag = flags & ~TXD_FLAG_END;
7705 while (len > tp->dma_limit && *budget) {
7706 u32 frag_len = tp->dma_limit;
7707 len -= tp->dma_limit;
7709 /* Avoid the 8byte DMA problem */
7710 if (len <= 8) {
7711 len += tp->dma_limit / 2;
7712 frag_len = tp->dma_limit / 2;
7715 tnapi->tx_buffers[*entry].fragmented = true;
7717 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7718 frag_len, tmp_flag, mss, vlan);
7719 *budget -= 1;
7720 prvidx = *entry;
7721 *entry = NEXT_TX(*entry);
7723 map += frag_len;
7726 if (len) {
7727 if (*budget) {
7728 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7729 len, flags, mss, vlan);
7730 *budget -= 1;
7731 *entry = NEXT_TX(*entry);
7732 } else {
7733 hwbug = true;
7734 tnapi->tx_buffers[prvidx].fragmented = false;
7737 } else {
7738 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7739 len, flags, mss, vlan);
7740 *entry = NEXT_TX(*entry);
7743 return hwbug;
7746 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7748 int i;
7749 struct sk_buff *skb;
7750 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7752 skb = txb->skb;
7753 txb->skb = NULL;
7755 pci_unmap_single(tnapi->tp->pdev,
7756 dma_unmap_addr(txb, mapping),
7757 skb_headlen(skb),
7758 PCI_DMA_TODEVICE);
7760 while (txb->fragmented) {
7761 txb->fragmented = false;
7762 entry = NEXT_TX(entry);
7763 txb = &tnapi->tx_buffers[entry];
7766 for (i = 0; i <= last; i++) {
7767 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7769 entry = NEXT_TX(entry);
7770 txb = &tnapi->tx_buffers[entry];
7772 pci_unmap_page(tnapi->tp->pdev,
7773 dma_unmap_addr(txb, mapping),
7774 skb_frag_size(frag), PCI_DMA_TODEVICE);
7776 while (txb->fragmented) {
7777 txb->fragmented = false;
7778 entry = NEXT_TX(entry);
7779 txb = &tnapi->tx_buffers[entry];
7784 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7785 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7786 struct sk_buff **pskb,
7787 u32 *entry, u32 *budget,
7788 u32 base_flags, u32 mss, u32 vlan)
7790 struct tg3 *tp = tnapi->tp;
7791 struct sk_buff *new_skb, *skb = *pskb;
7792 dma_addr_t new_addr = 0;
7793 int ret = 0;
7795 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7796 new_skb = skb_copy(skb, GFP_ATOMIC);
7797 else {
7798 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7800 new_skb = skb_copy_expand(skb,
7801 skb_headroom(skb) + more_headroom,
7802 skb_tailroom(skb), GFP_ATOMIC);
7805 if (!new_skb) {
7806 ret = -1;
7807 } else {
7808 /* New SKB is guaranteed to be linear. */
7809 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7810 PCI_DMA_TODEVICE);
7811 /* Make sure the mapping succeeded */
7812 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7813 dev_kfree_skb_any(new_skb);
7814 ret = -1;
7815 } else {
7816 u32 save_entry = *entry;
7818 base_flags |= TXD_FLAG_END;
7820 tnapi->tx_buffers[*entry].skb = new_skb;
7821 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7822 mapping, new_addr);
7824 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7825 new_skb->len, base_flags,
7826 mss, vlan)) {
7827 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7828 dev_kfree_skb_any(new_skb);
7829 ret = -1;
7834 dev_consume_skb_any(skb);
7835 *pskb = new_skb;
7836 return ret;
7839 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7841 /* Check if we will never have enough descriptors,
7842 * as gso_segs can be more than current ring size
7844 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7847 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7849 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7850 * indicated in tg3_tx_frag_set()
7852 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7853 struct netdev_queue *txq, struct sk_buff *skb)
7855 struct sk_buff *segs, *nskb;
7856 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7858 /* Estimate the number of fragments in the worst case */
7859 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7860 netif_tx_stop_queue(txq);
7862 /* netif_tx_stop_queue() must be done before checking
7863 * checking tx index in tg3_tx_avail() below, because in
7864 * tg3_tx(), we update tx index before checking for
7865 * netif_tx_queue_stopped().
7867 smp_mb();
7868 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7869 return NETDEV_TX_BUSY;
7871 netif_tx_wake_queue(txq);
7874 segs = skb_gso_segment(skb, tp->dev->features &
7875 ~(NETIF_F_TSO | NETIF_F_TSO6));
7876 if (IS_ERR(segs) || !segs)
7877 goto tg3_tso_bug_end;
7879 do {
7880 nskb = segs;
7881 segs = segs->next;
7882 nskb->next = NULL;
7883 tg3_start_xmit(nskb, tp->dev);
7884 } while (segs);
7886 tg3_tso_bug_end:
7887 dev_consume_skb_any(skb);
7889 return NETDEV_TX_OK;
7892 /* hard_start_xmit for all devices */
7893 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7895 struct tg3 *tp = netdev_priv(dev);
7896 u32 len, entry, base_flags, mss, vlan = 0;
7897 u32 budget;
7898 int i = -1, would_hit_hwbug;
7899 dma_addr_t mapping;
7900 struct tg3_napi *tnapi;
7901 struct netdev_queue *txq;
7902 unsigned int last;
7903 struct iphdr *iph = NULL;
7904 struct tcphdr *tcph = NULL;
7905 __sum16 tcp_csum = 0, ip_csum = 0;
7906 __be16 ip_tot_len = 0;
7908 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7909 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7910 if (tg3_flag(tp, ENABLE_TSS))
7911 tnapi++;
7913 budget = tg3_tx_avail(tnapi);
7915 /* We are running in BH disabled context with netif_tx_lock
7916 * and TX reclaim runs via tp->napi.poll inside of a software
7917 * interrupt. Furthermore, IRQ processing runs lockless so we have
7918 * no IRQ context deadlocks to worry about either. Rejoice!
7920 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7921 if (!netif_tx_queue_stopped(txq)) {
7922 netif_tx_stop_queue(txq);
7924 /* This is a hard error, log it. */
7925 netdev_err(dev,
7926 "BUG! Tx Ring full when queue awake!\n");
7928 return NETDEV_TX_BUSY;
7931 entry = tnapi->tx_prod;
7932 base_flags = 0;
7934 mss = skb_shinfo(skb)->gso_size;
7935 if (mss) {
7936 u32 tcp_opt_len, hdr_len;
7938 if (skb_cow_head(skb, 0))
7939 goto drop;
7941 iph = ip_hdr(skb);
7942 tcp_opt_len = tcp_optlen(skb);
7944 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7946 /* HW/FW can not correctly segment packets that have been
7947 * vlan encapsulated.
7949 if (skb->protocol == htons(ETH_P_8021Q) ||
7950 skb->protocol == htons(ETH_P_8021AD)) {
7951 if (tg3_tso_bug_gso_check(tnapi, skb))
7952 return tg3_tso_bug(tp, tnapi, txq, skb);
7953 goto drop;
7956 if (!skb_is_gso_v6(skb)) {
7957 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7958 tg3_flag(tp, TSO_BUG)) {
7959 if (tg3_tso_bug_gso_check(tnapi, skb))
7960 return tg3_tso_bug(tp, tnapi, txq, skb);
7961 goto drop;
7963 ip_csum = iph->check;
7964 ip_tot_len = iph->tot_len;
7965 iph->check = 0;
7966 iph->tot_len = htons(mss + hdr_len);
7969 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7970 TXD_FLAG_CPU_POST_DMA);
7972 tcph = tcp_hdr(skb);
7973 tcp_csum = tcph->check;
7975 if (tg3_flag(tp, HW_TSO_1) ||
7976 tg3_flag(tp, HW_TSO_2) ||
7977 tg3_flag(tp, HW_TSO_3)) {
7978 tcph->check = 0;
7979 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7980 } else {
7981 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7982 0, IPPROTO_TCP, 0);
7985 if (tg3_flag(tp, HW_TSO_3)) {
7986 mss |= (hdr_len & 0xc) << 12;
7987 if (hdr_len & 0x10)
7988 base_flags |= 0x00000010;
7989 base_flags |= (hdr_len & 0x3e0) << 5;
7990 } else if (tg3_flag(tp, HW_TSO_2))
7991 mss |= hdr_len << 9;
7992 else if (tg3_flag(tp, HW_TSO_1) ||
7993 tg3_asic_rev(tp) == ASIC_REV_5705) {
7994 if (tcp_opt_len || iph->ihl > 5) {
7995 int tsflags;
7997 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7998 mss |= (tsflags << 11);
8000 } else {
8001 if (tcp_opt_len || iph->ihl > 5) {
8002 int tsflags;
8004 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8005 base_flags |= tsflags << 12;
8008 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8009 /* HW/FW can not correctly checksum packets that have been
8010 * vlan encapsulated.
8012 if (skb->protocol == htons(ETH_P_8021Q) ||
8013 skb->protocol == htons(ETH_P_8021AD)) {
8014 if (skb_checksum_help(skb))
8015 goto drop;
8016 } else {
8017 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8021 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8022 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8023 base_flags |= TXD_FLAG_JMB_PKT;
8025 if (skb_vlan_tag_present(skb)) {
8026 base_flags |= TXD_FLAG_VLAN;
8027 vlan = skb_vlan_tag_get(skb);
8030 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8031 tg3_flag(tp, TX_TSTAMP_EN)) {
8032 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8033 base_flags |= TXD_FLAG_HWTSTAMP;
8036 len = skb_headlen(skb);
8038 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8039 if (pci_dma_mapping_error(tp->pdev, mapping))
8040 goto drop;
8043 tnapi->tx_buffers[entry].skb = skb;
8044 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8046 would_hit_hwbug = 0;
8048 if (tg3_flag(tp, 5701_DMA_BUG))
8049 would_hit_hwbug = 1;
8051 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8052 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8053 mss, vlan)) {
8054 would_hit_hwbug = 1;
8055 } else if (skb_shinfo(skb)->nr_frags > 0) {
8056 u32 tmp_mss = mss;
8058 if (!tg3_flag(tp, HW_TSO_1) &&
8059 !tg3_flag(tp, HW_TSO_2) &&
8060 !tg3_flag(tp, HW_TSO_3))
8061 tmp_mss = 0;
8063 /* Now loop through additional data
8064 * fragments, and queue them.
8066 last = skb_shinfo(skb)->nr_frags - 1;
8067 for (i = 0; i <= last; i++) {
8068 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8070 len = skb_frag_size(frag);
8071 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8072 len, DMA_TO_DEVICE);
8074 tnapi->tx_buffers[entry].skb = NULL;
8075 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8076 mapping);
8077 if (dma_mapping_error(&tp->pdev->dev, mapping))
8078 goto dma_error;
8080 if (!budget ||
8081 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8082 len, base_flags |
8083 ((i == last) ? TXD_FLAG_END : 0),
8084 tmp_mss, vlan)) {
8085 would_hit_hwbug = 1;
8086 break;
8091 if (would_hit_hwbug) {
8092 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8094 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8095 /* If it's a TSO packet, do GSO instead of
8096 * allocating and copying to a large linear SKB
8098 if (ip_tot_len) {
8099 iph->check = ip_csum;
8100 iph->tot_len = ip_tot_len;
8102 tcph->check = tcp_csum;
8103 return tg3_tso_bug(tp, tnapi, txq, skb);
8106 /* If the workaround fails due to memory/mapping
8107 * failure, silently drop this packet.
8109 entry = tnapi->tx_prod;
8110 budget = tg3_tx_avail(tnapi);
8111 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8112 base_flags, mss, vlan))
8113 goto drop_nofree;
8116 skb_tx_timestamp(skb);
8117 netdev_tx_sent_queue(txq, skb->len);
8119 /* Sync BD data before updating mailbox */
8120 wmb();
8122 tnapi->tx_prod = entry;
8123 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8124 netif_tx_stop_queue(txq);
8126 /* netif_tx_stop_queue() must be done before checking
8127 * checking tx index in tg3_tx_avail() below, because in
8128 * tg3_tx(), we update tx index before checking for
8129 * netif_tx_queue_stopped().
8131 smp_mb();
8132 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8133 netif_tx_wake_queue(txq);
8136 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8137 /* Packets are ready, update Tx producer idx on card. */
8138 tw32_tx_mbox(tnapi->prodmbox, entry);
8139 mmiowb();
8142 return NETDEV_TX_OK;
8144 dma_error:
8145 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8146 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8147 drop:
8148 dev_kfree_skb_any(skb);
8149 drop_nofree:
8150 tp->tx_dropped++;
8151 return NETDEV_TX_OK;
8154 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8156 if (enable) {
8157 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8158 MAC_MODE_PORT_MODE_MASK);
8160 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8162 if (!tg3_flag(tp, 5705_PLUS))
8163 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8165 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8166 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8167 else
8168 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8169 } else {
8170 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8172 if (tg3_flag(tp, 5705_PLUS) ||
8173 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8174 tg3_asic_rev(tp) == ASIC_REV_5700)
8175 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8178 tw32(MAC_MODE, tp->mac_mode);
8179 udelay(40);
8182 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8184 u32 val, bmcr, mac_mode, ptest = 0;
8186 tg3_phy_toggle_apd(tp, false);
8187 tg3_phy_toggle_automdix(tp, false);
8189 if (extlpbk && tg3_phy_set_extloopbk(tp))
8190 return -EIO;
8192 bmcr = BMCR_FULLDPLX;
8193 switch (speed) {
8194 case SPEED_10:
8195 break;
8196 case SPEED_100:
8197 bmcr |= BMCR_SPEED100;
8198 break;
8199 case SPEED_1000:
8200 default:
8201 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8202 speed = SPEED_100;
8203 bmcr |= BMCR_SPEED100;
8204 } else {
8205 speed = SPEED_1000;
8206 bmcr |= BMCR_SPEED1000;
8210 if (extlpbk) {
8211 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8212 tg3_readphy(tp, MII_CTRL1000, &val);
8213 val |= CTL1000_AS_MASTER |
8214 CTL1000_ENABLE_MASTER;
8215 tg3_writephy(tp, MII_CTRL1000, val);
8216 } else {
8217 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8218 MII_TG3_FET_PTEST_TRIM_2;
8219 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8221 } else
8222 bmcr |= BMCR_LOOPBACK;
8224 tg3_writephy(tp, MII_BMCR, bmcr);
8226 /* The write needs to be flushed for the FETs */
8227 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8228 tg3_readphy(tp, MII_BMCR, &bmcr);
8230 udelay(40);
8232 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8233 tg3_asic_rev(tp) == ASIC_REV_5785) {
8234 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8235 MII_TG3_FET_PTEST_FRC_TX_LINK |
8236 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8238 /* The write needs to be flushed for the AC131 */
8239 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8242 /* Reset to prevent losing 1st rx packet intermittently */
8243 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8244 tg3_flag(tp, 5780_CLASS)) {
8245 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8246 udelay(10);
8247 tw32_f(MAC_RX_MODE, tp->rx_mode);
8250 mac_mode = tp->mac_mode &
8251 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8252 if (speed == SPEED_1000)
8253 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8254 else
8255 mac_mode |= MAC_MODE_PORT_MODE_MII;
8257 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8258 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8260 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8261 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8262 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8263 mac_mode |= MAC_MODE_LINK_POLARITY;
8265 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8266 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8269 tw32(MAC_MODE, mac_mode);
8270 udelay(40);
8272 return 0;
8275 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8277 struct tg3 *tp = netdev_priv(dev);
8279 if (features & NETIF_F_LOOPBACK) {
8280 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8281 return;
8283 spin_lock_bh(&tp->lock);
8284 tg3_mac_loopback(tp, true);
8285 netif_carrier_on(tp->dev);
8286 spin_unlock_bh(&tp->lock);
8287 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8288 } else {
8289 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8290 return;
8292 spin_lock_bh(&tp->lock);
8293 tg3_mac_loopback(tp, false);
8294 /* Force link status check */
8295 tg3_setup_phy(tp, true);
8296 spin_unlock_bh(&tp->lock);
8297 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8301 static netdev_features_t tg3_fix_features(struct net_device *dev,
8302 netdev_features_t features)
8304 struct tg3 *tp = netdev_priv(dev);
8306 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8307 features &= ~NETIF_F_ALL_TSO;
8309 return features;
8312 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8314 netdev_features_t changed = dev->features ^ features;
8316 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8317 tg3_set_loopback(dev, features);
8319 return 0;
8322 static void tg3_rx_prodring_free(struct tg3 *tp,
8323 struct tg3_rx_prodring_set *tpr)
8325 int i;
8327 if (tpr != &tp->napi[0].prodring) {
8328 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8329 i = (i + 1) & tp->rx_std_ring_mask)
8330 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8331 tp->rx_pkt_map_sz);
8333 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8334 for (i = tpr->rx_jmb_cons_idx;
8335 i != tpr->rx_jmb_prod_idx;
8336 i = (i + 1) & tp->rx_jmb_ring_mask) {
8337 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8338 TG3_RX_JMB_MAP_SZ);
8342 return;
8345 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8346 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8347 tp->rx_pkt_map_sz);
8349 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8350 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8351 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8352 TG3_RX_JMB_MAP_SZ);
8356 /* Initialize rx rings for packet processing.
8358 * The chip has been shut down and the driver detached from
8359 * the networking, so no interrupts or new tx packets will
8360 * end up in the driver. tp->{tx,}lock are held and thus
8361 * we may not sleep.
8363 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8364 struct tg3_rx_prodring_set *tpr)
8366 u32 i, rx_pkt_dma_sz;
8368 tpr->rx_std_cons_idx = 0;
8369 tpr->rx_std_prod_idx = 0;
8370 tpr->rx_jmb_cons_idx = 0;
8371 tpr->rx_jmb_prod_idx = 0;
8373 if (tpr != &tp->napi[0].prodring) {
8374 memset(&tpr->rx_std_buffers[0], 0,
8375 TG3_RX_STD_BUFF_RING_SIZE(tp));
8376 if (tpr->rx_jmb_buffers)
8377 memset(&tpr->rx_jmb_buffers[0], 0,
8378 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8379 goto done;
8382 /* Zero out all descriptors. */
8383 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8385 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8386 if (tg3_flag(tp, 5780_CLASS) &&
8387 tp->dev->mtu > ETH_DATA_LEN)
8388 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8389 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8391 /* Initialize invariants of the rings, we only set this
8392 * stuff once. This works because the card does not
8393 * write into the rx buffer posting rings.
8395 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8396 struct tg3_rx_buffer_desc *rxd;
8398 rxd = &tpr->rx_std[i];
8399 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8400 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8401 rxd->opaque = (RXD_OPAQUE_RING_STD |
8402 (i << RXD_OPAQUE_INDEX_SHIFT));
8405 /* Now allocate fresh SKBs for each rx ring. */
8406 for (i = 0; i < tp->rx_pending; i++) {
8407 unsigned int frag_size;
8409 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8410 &frag_size) < 0) {
8411 netdev_warn(tp->dev,
8412 "Using a smaller RX standard ring. Only "
8413 "%d out of %d buffers were allocated "
8414 "successfully\n", i, tp->rx_pending);
8415 if (i == 0)
8416 goto initfail;
8417 tp->rx_pending = i;
8418 break;
8422 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8423 goto done;
8425 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8427 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8428 goto done;
8430 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8431 struct tg3_rx_buffer_desc *rxd;
8433 rxd = &tpr->rx_jmb[i].std;
8434 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8435 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8436 RXD_FLAG_JUMBO;
8437 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8438 (i << RXD_OPAQUE_INDEX_SHIFT));
8441 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8442 unsigned int frag_size;
8444 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8445 &frag_size) < 0) {
8446 netdev_warn(tp->dev,
8447 "Using a smaller RX jumbo ring. Only %d "
8448 "out of %d buffers were allocated "
8449 "successfully\n", i, tp->rx_jumbo_pending);
8450 if (i == 0)
8451 goto initfail;
8452 tp->rx_jumbo_pending = i;
8453 break;
8457 done:
8458 return 0;
8460 initfail:
8461 tg3_rx_prodring_free(tp, tpr);
8462 return -ENOMEM;
8465 static void tg3_rx_prodring_fini(struct tg3 *tp,
8466 struct tg3_rx_prodring_set *tpr)
8468 kfree(tpr->rx_std_buffers);
8469 tpr->rx_std_buffers = NULL;
8470 kfree(tpr->rx_jmb_buffers);
8471 tpr->rx_jmb_buffers = NULL;
8472 if (tpr->rx_std) {
8473 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8474 tpr->rx_std, tpr->rx_std_mapping);
8475 tpr->rx_std = NULL;
8477 if (tpr->rx_jmb) {
8478 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8479 tpr->rx_jmb, tpr->rx_jmb_mapping);
8480 tpr->rx_jmb = NULL;
8484 static int tg3_rx_prodring_init(struct tg3 *tp,
8485 struct tg3_rx_prodring_set *tpr)
8487 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8488 GFP_KERNEL);
8489 if (!tpr->rx_std_buffers)
8490 return -ENOMEM;
8492 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8493 TG3_RX_STD_RING_BYTES(tp),
8494 &tpr->rx_std_mapping,
8495 GFP_KERNEL);
8496 if (!tpr->rx_std)
8497 goto err_out;
8499 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8500 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8501 GFP_KERNEL);
8502 if (!tpr->rx_jmb_buffers)
8503 goto err_out;
8505 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8506 TG3_RX_JMB_RING_BYTES(tp),
8507 &tpr->rx_jmb_mapping,
8508 GFP_KERNEL);
8509 if (!tpr->rx_jmb)
8510 goto err_out;
8513 return 0;
8515 err_out:
8516 tg3_rx_prodring_fini(tp, tpr);
8517 return -ENOMEM;
8520 /* Free up pending packets in all rx/tx rings.
8522 * The chip has been shut down and the driver detached from
8523 * the networking, so no interrupts or new tx packets will
8524 * end up in the driver. tp->{tx,}lock is not held and we are not
8525 * in an interrupt context and thus may sleep.
8527 static void tg3_free_rings(struct tg3 *tp)
8529 int i, j;
8531 for (j = 0; j < tp->irq_cnt; j++) {
8532 struct tg3_napi *tnapi = &tp->napi[j];
8534 tg3_rx_prodring_free(tp, &tnapi->prodring);
8536 if (!tnapi->tx_buffers)
8537 continue;
8539 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8540 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8542 if (!skb)
8543 continue;
8545 tg3_tx_skb_unmap(tnapi, i,
8546 skb_shinfo(skb)->nr_frags - 1);
8548 dev_consume_skb_any(skb);
8550 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8554 /* Initialize tx/rx rings for packet processing.
8556 * The chip has been shut down and the driver detached from
8557 * the networking, so no interrupts or new tx packets will
8558 * end up in the driver. tp->{tx,}lock are held and thus
8559 * we may not sleep.
8561 static int tg3_init_rings(struct tg3 *tp)
8563 int i;
8565 /* Free up all the SKBs. */
8566 tg3_free_rings(tp);
8568 for (i = 0; i < tp->irq_cnt; i++) {
8569 struct tg3_napi *tnapi = &tp->napi[i];
8571 tnapi->last_tag = 0;
8572 tnapi->last_irq_tag = 0;
8573 tnapi->hw_status->status = 0;
8574 tnapi->hw_status->status_tag = 0;
8575 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8577 tnapi->tx_prod = 0;
8578 tnapi->tx_cons = 0;
8579 if (tnapi->tx_ring)
8580 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8582 tnapi->rx_rcb_ptr = 0;
8583 if (tnapi->rx_rcb)
8584 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8586 if (tnapi->prodring.rx_std &&
8587 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8588 tg3_free_rings(tp);
8589 return -ENOMEM;
8593 return 0;
8596 static void tg3_mem_tx_release(struct tg3 *tp)
8598 int i;
8600 for (i = 0; i < tp->irq_max; i++) {
8601 struct tg3_napi *tnapi = &tp->napi[i];
8603 if (tnapi->tx_ring) {
8604 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8605 tnapi->tx_ring, tnapi->tx_desc_mapping);
8606 tnapi->tx_ring = NULL;
8609 kfree(tnapi->tx_buffers);
8610 tnapi->tx_buffers = NULL;
8614 static int tg3_mem_tx_acquire(struct tg3 *tp)
8616 int i;
8617 struct tg3_napi *tnapi = &tp->napi[0];
8619 /* If multivector TSS is enabled, vector 0 does not handle
8620 * tx interrupts. Don't allocate any resources for it.
8622 if (tg3_flag(tp, ENABLE_TSS))
8623 tnapi++;
8625 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8626 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8627 TG3_TX_RING_SIZE, GFP_KERNEL);
8628 if (!tnapi->tx_buffers)
8629 goto err_out;
8631 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8632 TG3_TX_RING_BYTES,
8633 &tnapi->tx_desc_mapping,
8634 GFP_KERNEL);
8635 if (!tnapi->tx_ring)
8636 goto err_out;
8639 return 0;
8641 err_out:
8642 tg3_mem_tx_release(tp);
8643 return -ENOMEM;
8646 static void tg3_mem_rx_release(struct tg3 *tp)
8648 int i;
8650 for (i = 0; i < tp->irq_max; i++) {
8651 struct tg3_napi *tnapi = &tp->napi[i];
8653 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8655 if (!tnapi->rx_rcb)
8656 continue;
8658 dma_free_coherent(&tp->pdev->dev,
8659 TG3_RX_RCB_RING_BYTES(tp),
8660 tnapi->rx_rcb,
8661 tnapi->rx_rcb_mapping);
8662 tnapi->rx_rcb = NULL;
8666 static int tg3_mem_rx_acquire(struct tg3 *tp)
8668 unsigned int i, limit;
8670 limit = tp->rxq_cnt;
8672 /* If RSS is enabled, we need a (dummy) producer ring
8673 * set on vector zero. This is the true hw prodring.
8675 if (tg3_flag(tp, ENABLE_RSS))
8676 limit++;
8678 for (i = 0; i < limit; i++) {
8679 struct tg3_napi *tnapi = &tp->napi[i];
8681 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8682 goto err_out;
8684 /* If multivector RSS is enabled, vector 0
8685 * does not handle rx or tx interrupts.
8686 * Don't allocate any resources for it.
8688 if (!i && tg3_flag(tp, ENABLE_RSS))
8689 continue;
8691 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8692 TG3_RX_RCB_RING_BYTES(tp),
8693 &tnapi->rx_rcb_mapping,
8694 GFP_KERNEL);
8695 if (!tnapi->rx_rcb)
8696 goto err_out;
8699 return 0;
8701 err_out:
8702 tg3_mem_rx_release(tp);
8703 return -ENOMEM;
8707 * Must not be invoked with interrupt sources disabled and
8708 * the hardware shutdown down.
8710 static void tg3_free_consistent(struct tg3 *tp)
8712 int i;
8714 for (i = 0; i < tp->irq_cnt; i++) {
8715 struct tg3_napi *tnapi = &tp->napi[i];
8717 if (tnapi->hw_status) {
8718 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8719 tnapi->hw_status,
8720 tnapi->status_mapping);
8721 tnapi->hw_status = NULL;
8725 tg3_mem_rx_release(tp);
8726 tg3_mem_tx_release(tp);
8728 /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8729 tg3_full_lock(tp, 0);
8730 if (tp->hw_stats) {
8731 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8732 tp->hw_stats, tp->stats_mapping);
8733 tp->hw_stats = NULL;
8735 tg3_full_unlock(tp);
8739 * Must not be invoked with interrupt sources disabled and
8740 * the hardware shutdown down. Can sleep.
8742 static int tg3_alloc_consistent(struct tg3 *tp)
8744 int i;
8746 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8747 sizeof(struct tg3_hw_stats),
8748 &tp->stats_mapping, GFP_KERNEL);
8749 if (!tp->hw_stats)
8750 goto err_out;
8752 for (i = 0; i < tp->irq_cnt; i++) {
8753 struct tg3_napi *tnapi = &tp->napi[i];
8754 struct tg3_hw_status *sblk;
8756 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8757 TG3_HW_STATUS_SIZE,
8758 &tnapi->status_mapping,
8759 GFP_KERNEL);
8760 if (!tnapi->hw_status)
8761 goto err_out;
8763 sblk = tnapi->hw_status;
8765 if (tg3_flag(tp, ENABLE_RSS)) {
8766 u16 *prodptr = NULL;
8769 * When RSS is enabled, the status block format changes
8770 * slightly. The "rx_jumbo_consumer", "reserved",
8771 * and "rx_mini_consumer" members get mapped to the
8772 * other three rx return ring producer indexes.
8774 switch (i) {
8775 case 1:
8776 prodptr = &sblk->idx[0].rx_producer;
8777 break;
8778 case 2:
8779 prodptr = &sblk->rx_jumbo_consumer;
8780 break;
8781 case 3:
8782 prodptr = &sblk->reserved;
8783 break;
8784 case 4:
8785 prodptr = &sblk->rx_mini_consumer;
8786 break;
8788 tnapi->rx_rcb_prod_idx = prodptr;
8789 } else {
8790 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8794 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8795 goto err_out;
8797 return 0;
8799 err_out:
8800 tg3_free_consistent(tp);
8801 return -ENOMEM;
8804 #define MAX_WAIT_CNT 1000
8806 /* To stop a block, clear the enable bit and poll till it
8807 * clears. tp->lock is held.
8809 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8811 unsigned int i;
8812 u32 val;
8814 if (tg3_flag(tp, 5705_PLUS)) {
8815 switch (ofs) {
8816 case RCVLSC_MODE:
8817 case DMAC_MODE:
8818 case MBFREE_MODE:
8819 case BUFMGR_MODE:
8820 case MEMARB_MODE:
8821 /* We can't enable/disable these bits of the
8822 * 5705/5750, just say success.
8824 return 0;
8826 default:
8827 break;
8831 val = tr32(ofs);
8832 val &= ~enable_bit;
8833 tw32_f(ofs, val);
8835 for (i = 0; i < MAX_WAIT_CNT; i++) {
8836 if (pci_channel_offline(tp->pdev)) {
8837 dev_err(&tp->pdev->dev,
8838 "tg3_stop_block device offline, "
8839 "ofs=%lx enable_bit=%x\n",
8840 ofs, enable_bit);
8841 return -ENODEV;
8844 udelay(100);
8845 val = tr32(ofs);
8846 if ((val & enable_bit) == 0)
8847 break;
8850 if (i == MAX_WAIT_CNT && !silent) {
8851 dev_err(&tp->pdev->dev,
8852 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8853 ofs, enable_bit);
8854 return -ENODEV;
8857 return 0;
8860 /* tp->lock is held. */
8861 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8863 int i, err;
8865 tg3_disable_ints(tp);
8867 if (pci_channel_offline(tp->pdev)) {
8868 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8869 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8870 err = -ENODEV;
8871 goto err_no_dev;
8874 tp->rx_mode &= ~RX_MODE_ENABLE;
8875 tw32_f(MAC_RX_MODE, tp->rx_mode);
8876 udelay(10);
8878 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8879 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8880 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8881 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8882 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8883 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8885 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8886 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8887 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8888 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8889 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8890 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8891 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8893 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8894 tw32_f(MAC_MODE, tp->mac_mode);
8895 udelay(40);
8897 tp->tx_mode &= ~TX_MODE_ENABLE;
8898 tw32_f(MAC_TX_MODE, tp->tx_mode);
8900 for (i = 0; i < MAX_WAIT_CNT; i++) {
8901 udelay(100);
8902 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8903 break;
8905 if (i >= MAX_WAIT_CNT) {
8906 dev_err(&tp->pdev->dev,
8907 "%s timed out, TX_MODE_ENABLE will not clear "
8908 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8909 err |= -ENODEV;
8912 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8913 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8914 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8916 tw32(FTQ_RESET, 0xffffffff);
8917 tw32(FTQ_RESET, 0x00000000);
8919 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8920 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8922 err_no_dev:
8923 for (i = 0; i < tp->irq_cnt; i++) {
8924 struct tg3_napi *tnapi = &tp->napi[i];
8925 if (tnapi->hw_status)
8926 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8929 return err;
8932 /* Save PCI command register before chip reset */
8933 static void tg3_save_pci_state(struct tg3 *tp)
8935 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8938 /* Restore PCI state after chip reset */
8939 static void tg3_restore_pci_state(struct tg3 *tp)
8941 u32 val;
8943 /* Re-enable indirect register accesses. */
8944 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8945 tp->misc_host_ctrl);
8947 /* Set MAX PCI retry to zero. */
8948 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8949 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8950 tg3_flag(tp, PCIX_MODE))
8951 val |= PCISTATE_RETRY_SAME_DMA;
8952 /* Allow reads and writes to the APE register and memory space. */
8953 if (tg3_flag(tp, ENABLE_APE))
8954 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8955 PCISTATE_ALLOW_APE_SHMEM_WR |
8956 PCISTATE_ALLOW_APE_PSPACE_WR;
8957 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8959 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8961 if (!tg3_flag(tp, PCI_EXPRESS)) {
8962 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8963 tp->pci_cacheline_sz);
8964 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8965 tp->pci_lat_timer);
8968 /* Make sure PCI-X relaxed ordering bit is clear. */
8969 if (tg3_flag(tp, PCIX_MODE)) {
8970 u16 pcix_cmd;
8972 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8973 &pcix_cmd);
8974 pcix_cmd &= ~PCI_X_CMD_ERO;
8975 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8976 pcix_cmd);
8979 if (tg3_flag(tp, 5780_CLASS)) {
8981 /* Chip reset on 5780 will reset MSI enable bit,
8982 * so need to restore it.
8984 if (tg3_flag(tp, USING_MSI)) {
8985 u16 ctrl;
8987 pci_read_config_word(tp->pdev,
8988 tp->msi_cap + PCI_MSI_FLAGS,
8989 &ctrl);
8990 pci_write_config_word(tp->pdev,
8991 tp->msi_cap + PCI_MSI_FLAGS,
8992 ctrl | PCI_MSI_FLAGS_ENABLE);
8993 val = tr32(MSGINT_MODE);
8994 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8999 static void tg3_override_clk(struct tg3 *tp)
9001 u32 val;
9003 switch (tg3_asic_rev(tp)) {
9004 case ASIC_REV_5717:
9005 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9006 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9007 TG3_CPMU_MAC_ORIDE_ENABLE);
9008 break;
9010 case ASIC_REV_5719:
9011 case ASIC_REV_5720:
9012 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9013 break;
9015 default:
9016 return;
9020 static void tg3_restore_clk(struct tg3 *tp)
9022 u32 val;
9024 switch (tg3_asic_rev(tp)) {
9025 case ASIC_REV_5717:
9026 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9027 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9028 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9029 break;
9031 case ASIC_REV_5719:
9032 case ASIC_REV_5720:
9033 val = tr32(TG3_CPMU_CLCK_ORIDE);
9034 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9035 break;
9037 default:
9038 return;
9042 /* tp->lock is held. */
9043 static int tg3_chip_reset(struct tg3 *tp)
9044 __releases(tp->lock)
9045 __acquires(tp->lock)
9047 u32 val;
9048 void (*write_op)(struct tg3 *, u32, u32);
9049 int i, err;
9051 if (!pci_device_is_present(tp->pdev))
9052 return -ENODEV;
9054 tg3_nvram_lock(tp);
9056 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9058 /* No matching tg3_nvram_unlock() after this because
9059 * chip reset below will undo the nvram lock.
9061 tp->nvram_lock_cnt = 0;
9063 /* GRC_MISC_CFG core clock reset will clear the memory
9064 * enable bit in PCI register 4 and the MSI enable bit
9065 * on some chips, so we save relevant registers here.
9067 tg3_save_pci_state(tp);
9069 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9070 tg3_flag(tp, 5755_PLUS))
9071 tw32(GRC_FASTBOOT_PC, 0);
9074 * We must avoid the readl() that normally takes place.
9075 * It locks machines, causes machine checks, and other
9076 * fun things. So, temporarily disable the 5701
9077 * hardware workaround, while we do the reset.
9079 write_op = tp->write32;
9080 if (write_op == tg3_write_flush_reg32)
9081 tp->write32 = tg3_write32;
9083 /* Prevent the irq handler from reading or writing PCI registers
9084 * during chip reset when the memory enable bit in the PCI command
9085 * register may be cleared. The chip does not generate interrupt
9086 * at this time, but the irq handler may still be called due to irq
9087 * sharing or irqpoll.
9089 tg3_flag_set(tp, CHIP_RESETTING);
9090 for (i = 0; i < tp->irq_cnt; i++) {
9091 struct tg3_napi *tnapi = &tp->napi[i];
9092 if (tnapi->hw_status) {
9093 tnapi->hw_status->status = 0;
9094 tnapi->hw_status->status_tag = 0;
9096 tnapi->last_tag = 0;
9097 tnapi->last_irq_tag = 0;
9099 smp_mb();
9101 tg3_full_unlock(tp);
9103 for (i = 0; i < tp->irq_cnt; i++)
9104 synchronize_irq(tp->napi[i].irq_vec);
9106 tg3_full_lock(tp, 0);
9108 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9109 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9110 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9113 /* do the reset */
9114 val = GRC_MISC_CFG_CORECLK_RESET;
9116 if (tg3_flag(tp, PCI_EXPRESS)) {
9117 /* Force PCIe 1.0a mode */
9118 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9119 !tg3_flag(tp, 57765_PLUS) &&
9120 tr32(TG3_PCIE_PHY_TSTCTL) ==
9121 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9122 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9124 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9125 tw32(GRC_MISC_CFG, (1 << 29));
9126 val |= (1 << 29);
9130 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9131 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9132 tw32(GRC_VCPU_EXT_CTRL,
9133 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9136 /* Set the clock to the highest frequency to avoid timeouts. With link
9137 * aware mode, the clock speed could be slow and bootcode does not
9138 * complete within the expected time. Override the clock to allow the
9139 * bootcode to finish sooner and then restore it.
9141 tg3_override_clk(tp);
9143 /* Manage gphy power for all CPMU absent PCIe devices. */
9144 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9145 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9147 tw32(GRC_MISC_CFG, val);
9149 /* restore 5701 hardware bug workaround write method */
9150 tp->write32 = write_op;
9152 /* Unfortunately, we have to delay before the PCI read back.
9153 * Some 575X chips even will not respond to a PCI cfg access
9154 * when the reset command is given to the chip.
9156 * How do these hardware designers expect things to work
9157 * properly if the PCI write is posted for a long period
9158 * of time? It is always necessary to have some method by
9159 * which a register read back can occur to push the write
9160 * out which does the reset.
9162 * For most tg3 variants the trick below was working.
9163 * Ho hum...
9165 udelay(120);
9167 /* Flush PCI posted writes. The normal MMIO registers
9168 * are inaccessible at this time so this is the only
9169 * way to make this reliably (actually, this is no longer
9170 * the case, see above). I tried to use indirect
9171 * register read/write but this upset some 5701 variants.
9173 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9175 udelay(120);
9177 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9178 u16 val16;
9180 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9181 int j;
9182 u32 cfg_val;
9184 /* Wait for link training to complete. */
9185 for (j = 0; j < 5000; j++)
9186 udelay(100);
9188 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9189 pci_write_config_dword(tp->pdev, 0xc4,
9190 cfg_val | (1 << 15));
9193 /* Clear the "no snoop" and "relaxed ordering" bits. */
9194 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9196 * Older PCIe devices only support the 128 byte
9197 * MPS setting. Enforce the restriction.
9199 if (!tg3_flag(tp, CPMU_PRESENT))
9200 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9201 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9203 /* Clear error status */
9204 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9205 PCI_EXP_DEVSTA_CED |
9206 PCI_EXP_DEVSTA_NFED |
9207 PCI_EXP_DEVSTA_FED |
9208 PCI_EXP_DEVSTA_URD);
9211 tg3_restore_pci_state(tp);
9213 tg3_flag_clear(tp, CHIP_RESETTING);
9214 tg3_flag_clear(tp, ERROR_PROCESSED);
9216 val = 0;
9217 if (tg3_flag(tp, 5780_CLASS))
9218 val = tr32(MEMARB_MODE);
9219 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9221 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9222 tg3_stop_fw(tp);
9223 tw32(0x5000, 0x400);
9226 if (tg3_flag(tp, IS_SSB_CORE)) {
9228 * BCM4785: In order to avoid repercussions from using
9229 * potentially defective internal ROM, stop the Rx RISC CPU,
9230 * which is not required.
9232 tg3_stop_fw(tp);
9233 tg3_halt_cpu(tp, RX_CPU_BASE);
9236 err = tg3_poll_fw(tp);
9237 if (err)
9238 return err;
9240 tw32(GRC_MODE, tp->grc_mode);
9242 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9243 val = tr32(0xc4);
9245 tw32(0xc4, val | (1 << 15));
9248 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9249 tg3_asic_rev(tp) == ASIC_REV_5705) {
9250 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9251 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9252 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9253 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9256 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9257 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9258 val = tp->mac_mode;
9259 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9260 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9261 val = tp->mac_mode;
9262 } else
9263 val = 0;
9265 tw32_f(MAC_MODE, val);
9266 udelay(40);
9268 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9270 tg3_mdio_start(tp);
9272 if (tg3_flag(tp, PCI_EXPRESS) &&
9273 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9274 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9275 !tg3_flag(tp, 57765_PLUS)) {
9276 val = tr32(0x7c00);
9278 tw32(0x7c00, val | (1 << 25));
9281 tg3_restore_clk(tp);
9283 /* Reprobe ASF enable state. */
9284 tg3_flag_clear(tp, ENABLE_ASF);
9285 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9286 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9288 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9289 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9290 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9291 u32 nic_cfg;
9293 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9294 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9295 tg3_flag_set(tp, ENABLE_ASF);
9296 tp->last_event_jiffies = jiffies;
9297 if (tg3_flag(tp, 5750_PLUS))
9298 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9300 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9301 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9302 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9303 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9304 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9308 return 0;
9311 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9312 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9313 static void __tg3_set_rx_mode(struct net_device *);
9315 /* tp->lock is held. */
9316 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9318 int err;
9320 tg3_stop_fw(tp);
9322 tg3_write_sig_pre_reset(tp, kind);
9324 tg3_abort_hw(tp, silent);
9325 err = tg3_chip_reset(tp);
9327 __tg3_set_mac_addr(tp, false);
9329 tg3_write_sig_legacy(tp, kind);
9330 tg3_write_sig_post_reset(tp, kind);
9332 if (tp->hw_stats) {
9333 /* Save the stats across chip resets... */
9334 tg3_get_nstats(tp, &tp->net_stats_prev);
9335 tg3_get_estats(tp, &tp->estats_prev);
9337 /* And make sure the next sample is new data */
9338 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9341 return err;
9344 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9346 struct tg3 *tp = netdev_priv(dev);
9347 struct sockaddr *addr = p;
9348 int err = 0;
9349 bool skip_mac_1 = false;
9351 if (!is_valid_ether_addr(addr->sa_data))
9352 return -EADDRNOTAVAIL;
9354 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9356 if (!netif_running(dev))
9357 return 0;
9359 if (tg3_flag(tp, ENABLE_ASF)) {
9360 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9362 addr0_high = tr32(MAC_ADDR_0_HIGH);
9363 addr0_low = tr32(MAC_ADDR_0_LOW);
9364 addr1_high = tr32(MAC_ADDR_1_HIGH);
9365 addr1_low = tr32(MAC_ADDR_1_LOW);
9367 /* Skip MAC addr 1 if ASF is using it. */
9368 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9369 !(addr1_high == 0 && addr1_low == 0))
9370 skip_mac_1 = true;
9372 spin_lock_bh(&tp->lock);
9373 __tg3_set_mac_addr(tp, skip_mac_1);
9374 __tg3_set_rx_mode(dev);
9375 spin_unlock_bh(&tp->lock);
9377 return err;
9380 /* tp->lock is held. */
9381 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9382 dma_addr_t mapping, u32 maxlen_flags,
9383 u32 nic_addr)
9385 tg3_write_mem(tp,
9386 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9387 ((u64) mapping >> 32));
9388 tg3_write_mem(tp,
9389 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9390 ((u64) mapping & 0xffffffff));
9391 tg3_write_mem(tp,
9392 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9393 maxlen_flags);
9395 if (!tg3_flag(tp, 5705_PLUS))
9396 tg3_write_mem(tp,
9397 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9398 nic_addr);
9402 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9404 int i = 0;
9406 if (!tg3_flag(tp, ENABLE_TSS)) {
9407 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9408 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9409 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9410 } else {
9411 tw32(HOSTCC_TXCOL_TICKS, 0);
9412 tw32(HOSTCC_TXMAX_FRAMES, 0);
9413 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9415 for (; i < tp->txq_cnt; i++) {
9416 u32 reg;
9418 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9419 tw32(reg, ec->tx_coalesce_usecs);
9420 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9421 tw32(reg, ec->tx_max_coalesced_frames);
9422 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9423 tw32(reg, ec->tx_max_coalesced_frames_irq);
9427 for (; i < tp->irq_max - 1; i++) {
9428 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9429 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9430 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9434 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9436 int i = 0;
9437 u32 limit = tp->rxq_cnt;
9439 if (!tg3_flag(tp, ENABLE_RSS)) {
9440 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9441 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9442 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9443 limit--;
9444 } else {
9445 tw32(HOSTCC_RXCOL_TICKS, 0);
9446 tw32(HOSTCC_RXMAX_FRAMES, 0);
9447 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9450 for (; i < limit; i++) {
9451 u32 reg;
9453 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9454 tw32(reg, ec->rx_coalesce_usecs);
9455 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9456 tw32(reg, ec->rx_max_coalesced_frames);
9457 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9458 tw32(reg, ec->rx_max_coalesced_frames_irq);
9461 for (; i < tp->irq_max - 1; i++) {
9462 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9463 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9464 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9468 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9470 tg3_coal_tx_init(tp, ec);
9471 tg3_coal_rx_init(tp, ec);
9473 if (!tg3_flag(tp, 5705_PLUS)) {
9474 u32 val = ec->stats_block_coalesce_usecs;
9476 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9477 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9479 if (!tp->link_up)
9480 val = 0;
9482 tw32(HOSTCC_STAT_COAL_TICKS, val);
9486 /* tp->lock is held. */
9487 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9489 u32 txrcb, limit;
9491 /* Disable all transmit rings but the first. */
9492 if (!tg3_flag(tp, 5705_PLUS))
9493 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9494 else if (tg3_flag(tp, 5717_PLUS))
9495 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9496 else if (tg3_flag(tp, 57765_CLASS) ||
9497 tg3_asic_rev(tp) == ASIC_REV_5762)
9498 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9499 else
9500 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9502 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9503 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9504 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9505 BDINFO_FLAGS_DISABLED);
9508 /* tp->lock is held. */
9509 static void tg3_tx_rcbs_init(struct tg3 *tp)
9511 int i = 0;
9512 u32 txrcb = NIC_SRAM_SEND_RCB;
9514 if (tg3_flag(tp, ENABLE_TSS))
9515 i++;
9517 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9518 struct tg3_napi *tnapi = &tp->napi[i];
9520 if (!tnapi->tx_ring)
9521 continue;
9523 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9524 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9525 NIC_SRAM_TX_BUFFER_DESC);
9529 /* tp->lock is held. */
9530 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9532 u32 rxrcb, limit;
9534 /* Disable all receive return rings but the first. */
9535 if (tg3_flag(tp, 5717_PLUS))
9536 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9537 else if (!tg3_flag(tp, 5705_PLUS))
9538 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9539 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9540 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9541 tg3_flag(tp, 57765_CLASS))
9542 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9543 else
9544 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9546 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9547 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9548 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9549 BDINFO_FLAGS_DISABLED);
9552 /* tp->lock is held. */
9553 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9555 int i = 0;
9556 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9558 if (tg3_flag(tp, ENABLE_RSS))
9559 i++;
9561 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9562 struct tg3_napi *tnapi = &tp->napi[i];
9564 if (!tnapi->rx_rcb)
9565 continue;
9567 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9568 (tp->rx_ret_ring_mask + 1) <<
9569 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9573 /* tp->lock is held. */
9574 static void tg3_rings_reset(struct tg3 *tp)
9576 int i;
9577 u32 stblk;
9578 struct tg3_napi *tnapi = &tp->napi[0];
9580 tg3_tx_rcbs_disable(tp);
9582 tg3_rx_ret_rcbs_disable(tp);
9584 /* Disable interrupts */
9585 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9586 tp->napi[0].chk_msi_cnt = 0;
9587 tp->napi[0].last_rx_cons = 0;
9588 tp->napi[0].last_tx_cons = 0;
9590 /* Zero mailbox registers. */
9591 if (tg3_flag(tp, SUPPORT_MSIX)) {
9592 for (i = 1; i < tp->irq_max; i++) {
9593 tp->napi[i].tx_prod = 0;
9594 tp->napi[i].tx_cons = 0;
9595 if (tg3_flag(tp, ENABLE_TSS))
9596 tw32_mailbox(tp->napi[i].prodmbox, 0);
9597 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9598 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9599 tp->napi[i].chk_msi_cnt = 0;
9600 tp->napi[i].last_rx_cons = 0;
9601 tp->napi[i].last_tx_cons = 0;
9603 if (!tg3_flag(tp, ENABLE_TSS))
9604 tw32_mailbox(tp->napi[0].prodmbox, 0);
9605 } else {
9606 tp->napi[0].tx_prod = 0;
9607 tp->napi[0].tx_cons = 0;
9608 tw32_mailbox(tp->napi[0].prodmbox, 0);
9609 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9612 /* Make sure the NIC-based send BD rings are disabled. */
9613 if (!tg3_flag(tp, 5705_PLUS)) {
9614 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9615 for (i = 0; i < 16; i++)
9616 tw32_tx_mbox(mbox + i * 8, 0);
9619 /* Clear status block in ram. */
9620 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9622 /* Set status block DMA address */
9623 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9624 ((u64) tnapi->status_mapping >> 32));
9625 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9626 ((u64) tnapi->status_mapping & 0xffffffff));
9628 stblk = HOSTCC_STATBLCK_RING1;
9630 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9631 u64 mapping = (u64)tnapi->status_mapping;
9632 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9633 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9634 stblk += 8;
9636 /* Clear status block in ram. */
9637 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9640 tg3_tx_rcbs_init(tp);
9641 tg3_rx_ret_rcbs_init(tp);
9644 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9646 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9648 if (!tg3_flag(tp, 5750_PLUS) ||
9649 tg3_flag(tp, 5780_CLASS) ||
9650 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9651 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9652 tg3_flag(tp, 57765_PLUS))
9653 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9654 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9655 tg3_asic_rev(tp) == ASIC_REV_5787)
9656 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9657 else
9658 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9660 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9661 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9663 val = min(nic_rep_thresh, host_rep_thresh);
9664 tw32(RCVBDI_STD_THRESH, val);
9666 if (tg3_flag(tp, 57765_PLUS))
9667 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9669 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9670 return;
9672 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9674 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9676 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9677 tw32(RCVBDI_JUMBO_THRESH, val);
9679 if (tg3_flag(tp, 57765_PLUS))
9680 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9683 static inline u32 calc_crc(unsigned char *buf, int len)
9685 u32 reg;
9686 u32 tmp;
9687 int j, k;
9689 reg = 0xffffffff;
9691 for (j = 0; j < len; j++) {
9692 reg ^= buf[j];
9694 for (k = 0; k < 8; k++) {
9695 tmp = reg & 0x01;
9697 reg >>= 1;
9699 if (tmp)
9700 reg ^= 0xedb88320;
9704 return ~reg;
9707 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9709 /* accept or reject all multicast frames */
9710 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9711 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9712 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9713 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9716 static void __tg3_set_rx_mode(struct net_device *dev)
9718 struct tg3 *tp = netdev_priv(dev);
9719 u32 rx_mode;
9721 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9722 RX_MODE_KEEP_VLAN_TAG);
9724 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9725 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9726 * flag clear.
9728 if (!tg3_flag(tp, ENABLE_ASF))
9729 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9730 #endif
9732 if (dev->flags & IFF_PROMISC) {
9733 /* Promiscuous mode. */
9734 rx_mode |= RX_MODE_PROMISC;
9735 } else if (dev->flags & IFF_ALLMULTI) {
9736 /* Accept all multicast. */
9737 tg3_set_multi(tp, 1);
9738 } else if (netdev_mc_empty(dev)) {
9739 /* Reject all multicast. */
9740 tg3_set_multi(tp, 0);
9741 } else {
9742 /* Accept one or more multicast(s). */
9743 struct netdev_hw_addr *ha;
9744 u32 mc_filter[4] = { 0, };
9745 u32 regidx;
9746 u32 bit;
9747 u32 crc;
9749 netdev_for_each_mc_addr(ha, dev) {
9750 crc = calc_crc(ha->addr, ETH_ALEN);
9751 bit = ~crc & 0x7f;
9752 regidx = (bit & 0x60) >> 5;
9753 bit &= 0x1f;
9754 mc_filter[regidx] |= (1 << bit);
9757 tw32(MAC_HASH_REG_0, mc_filter[0]);
9758 tw32(MAC_HASH_REG_1, mc_filter[1]);
9759 tw32(MAC_HASH_REG_2, mc_filter[2]);
9760 tw32(MAC_HASH_REG_3, mc_filter[3]);
9763 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9764 rx_mode |= RX_MODE_PROMISC;
9765 } else if (!(dev->flags & IFF_PROMISC)) {
9766 /* Add all entries into to the mac addr filter list */
9767 int i = 0;
9768 struct netdev_hw_addr *ha;
9770 netdev_for_each_uc_addr(ha, dev) {
9771 __tg3_set_one_mac_addr(tp, ha->addr,
9772 i + TG3_UCAST_ADDR_IDX(tp));
9773 i++;
9777 if (rx_mode != tp->rx_mode) {
9778 tp->rx_mode = rx_mode;
9779 tw32_f(MAC_RX_MODE, rx_mode);
9780 udelay(10);
9784 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9786 int i;
9788 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9789 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9792 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9794 int i;
9796 if (!tg3_flag(tp, SUPPORT_MSIX))
9797 return;
9799 if (tp->rxq_cnt == 1) {
9800 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9801 return;
9804 /* Validate table against current IRQ count */
9805 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9806 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9807 break;
9810 if (i != TG3_RSS_INDIR_TBL_SIZE)
9811 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9814 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9816 int i = 0;
9817 u32 reg = MAC_RSS_INDIR_TBL_0;
9819 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9820 u32 val = tp->rss_ind_tbl[i];
9821 i++;
9822 for (; i % 8; i++) {
9823 val <<= 4;
9824 val |= tp->rss_ind_tbl[i];
9826 tw32(reg, val);
9827 reg += 4;
9831 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9833 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9834 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9835 else
9836 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9839 /* tp->lock is held. */
9840 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9842 u32 val, rdmac_mode;
9843 int i, err, limit;
9844 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9846 tg3_disable_ints(tp);
9848 tg3_stop_fw(tp);
9850 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9852 if (tg3_flag(tp, INIT_COMPLETE))
9853 tg3_abort_hw(tp, 1);
9855 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9856 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9857 tg3_phy_pull_config(tp);
9858 tg3_eee_pull_config(tp, NULL);
9859 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9862 /* Enable MAC control of LPI */
9863 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9864 tg3_setup_eee(tp);
9866 if (reset_phy)
9867 tg3_phy_reset(tp);
9869 err = tg3_chip_reset(tp);
9870 if (err)
9871 return err;
9873 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9875 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9876 val = tr32(TG3_CPMU_CTRL);
9877 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9878 tw32(TG3_CPMU_CTRL, val);
9880 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9881 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9882 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9883 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9885 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9886 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9887 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9888 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9890 val = tr32(TG3_CPMU_HST_ACC);
9891 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9892 val |= CPMU_HST_ACC_MACCLK_6_25;
9893 tw32(TG3_CPMU_HST_ACC, val);
9896 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9897 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9898 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9899 PCIE_PWR_MGMT_L1_THRESH_4MS;
9900 tw32(PCIE_PWR_MGMT_THRESH, val);
9902 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9903 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9905 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9907 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9908 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9911 if (tg3_flag(tp, L1PLLPD_EN)) {
9912 u32 grc_mode = tr32(GRC_MODE);
9914 /* Access the lower 1K of PL PCIE block registers. */
9915 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9916 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9918 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9919 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9920 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9922 tw32(GRC_MODE, grc_mode);
9925 if (tg3_flag(tp, 57765_CLASS)) {
9926 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9927 u32 grc_mode = tr32(GRC_MODE);
9929 /* Access the lower 1K of PL PCIE block registers. */
9930 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9931 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9933 val = tr32(TG3_PCIE_TLDLPL_PORT +
9934 TG3_PCIE_PL_LO_PHYCTL5);
9935 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9936 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9938 tw32(GRC_MODE, grc_mode);
9941 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9942 u32 grc_mode;
9944 /* Fix transmit hangs */
9945 val = tr32(TG3_CPMU_PADRNG_CTL);
9946 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9947 tw32(TG3_CPMU_PADRNG_CTL, val);
9949 grc_mode = tr32(GRC_MODE);
9951 /* Access the lower 1K of DL PCIE block registers. */
9952 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9953 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9955 val = tr32(TG3_PCIE_TLDLPL_PORT +
9956 TG3_PCIE_DL_LO_FTSMAX);
9957 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9958 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9959 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9961 tw32(GRC_MODE, grc_mode);
9964 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9965 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9966 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9967 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9970 /* This works around an issue with Athlon chipsets on
9971 * B3 tigon3 silicon. This bit has no effect on any
9972 * other revision. But do not set this on PCI Express
9973 * chips and don't even touch the clocks if the CPMU is present.
9975 if (!tg3_flag(tp, CPMU_PRESENT)) {
9976 if (!tg3_flag(tp, PCI_EXPRESS))
9977 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9978 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9981 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9982 tg3_flag(tp, PCIX_MODE)) {
9983 val = tr32(TG3PCI_PCISTATE);
9984 val |= PCISTATE_RETRY_SAME_DMA;
9985 tw32(TG3PCI_PCISTATE, val);
9988 if (tg3_flag(tp, ENABLE_APE)) {
9989 /* Allow reads and writes to the
9990 * APE register and memory space.
9992 val = tr32(TG3PCI_PCISTATE);
9993 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9994 PCISTATE_ALLOW_APE_SHMEM_WR |
9995 PCISTATE_ALLOW_APE_PSPACE_WR;
9996 tw32(TG3PCI_PCISTATE, val);
9999 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10000 /* Enable some hw fixes. */
10001 val = tr32(TG3PCI_MSI_DATA);
10002 val |= (1 << 26) | (1 << 28) | (1 << 29);
10003 tw32(TG3PCI_MSI_DATA, val);
10006 /* Descriptor ring init may make accesses to the
10007 * NIC SRAM area to setup the TX descriptors, so we
10008 * can only do this after the hardware has been
10009 * successfully reset.
10011 err = tg3_init_rings(tp);
10012 if (err)
10013 return err;
10015 if (tg3_flag(tp, 57765_PLUS)) {
10016 val = tr32(TG3PCI_DMA_RW_CTRL) &
10017 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10018 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10019 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10020 if (!tg3_flag(tp, 57765_CLASS) &&
10021 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10022 tg3_asic_rev(tp) != ASIC_REV_5762)
10023 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10024 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10025 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10026 tg3_asic_rev(tp) != ASIC_REV_5761) {
10027 /* This value is determined during the probe time DMA
10028 * engine test, tg3_test_dma.
10030 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10033 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10034 GRC_MODE_4X_NIC_SEND_RINGS |
10035 GRC_MODE_NO_TX_PHDR_CSUM |
10036 GRC_MODE_NO_RX_PHDR_CSUM);
10037 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10039 /* Pseudo-header checksum is done by hardware logic and not
10040 * the offload processers, so make the chip do the pseudo-
10041 * header checksums on receive. For transmit it is more
10042 * convenient to do the pseudo-header checksum in software
10043 * as Linux does that on transmit for us in all cases.
10045 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10047 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10048 if (tp->rxptpctl)
10049 tw32(TG3_RX_PTP_CTL,
10050 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10052 if (tg3_flag(tp, PTP_CAPABLE))
10053 val |= GRC_MODE_TIME_SYNC_ENABLE;
10055 tw32(GRC_MODE, tp->grc_mode | val);
10057 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10058 * south bridge limitation. As a workaround, Driver is setting MRRS
10059 * to 2048 instead of default 4096.
10061 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10062 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10063 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10064 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10067 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10068 val = tr32(GRC_MISC_CFG);
10069 val &= ~0xff;
10070 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10071 tw32(GRC_MISC_CFG, val);
10073 /* Initialize MBUF/DESC pool. */
10074 if (tg3_flag(tp, 5750_PLUS)) {
10075 /* Do nothing. */
10076 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10077 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10078 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10079 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10080 else
10081 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10082 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10083 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10084 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10085 int fw_len;
10087 fw_len = tp->fw_len;
10088 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10089 tw32(BUFMGR_MB_POOL_ADDR,
10090 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10091 tw32(BUFMGR_MB_POOL_SIZE,
10092 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10095 if (tp->dev->mtu <= ETH_DATA_LEN) {
10096 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10097 tp->bufmgr_config.mbuf_read_dma_low_water);
10098 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10099 tp->bufmgr_config.mbuf_mac_rx_low_water);
10100 tw32(BUFMGR_MB_HIGH_WATER,
10101 tp->bufmgr_config.mbuf_high_water);
10102 } else {
10103 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10104 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10105 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10106 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10107 tw32(BUFMGR_MB_HIGH_WATER,
10108 tp->bufmgr_config.mbuf_high_water_jumbo);
10110 tw32(BUFMGR_DMA_LOW_WATER,
10111 tp->bufmgr_config.dma_low_water);
10112 tw32(BUFMGR_DMA_HIGH_WATER,
10113 tp->bufmgr_config.dma_high_water);
10115 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10116 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10117 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10118 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10119 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10120 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10121 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10122 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10123 tw32(BUFMGR_MODE, val);
10124 for (i = 0; i < 2000; i++) {
10125 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10126 break;
10127 udelay(10);
10129 if (i >= 2000) {
10130 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10131 return -ENODEV;
10134 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10135 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10137 tg3_setup_rxbd_thresholds(tp);
10139 /* Initialize TG3_BDINFO's at:
10140 * RCVDBDI_STD_BD: standard eth size rx ring
10141 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10142 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10144 * like so:
10145 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10146 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10147 * ring attribute flags
10148 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10150 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10151 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10153 * The size of each ring is fixed in the firmware, but the location is
10154 * configurable.
10156 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10157 ((u64) tpr->rx_std_mapping >> 32));
10158 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10159 ((u64) tpr->rx_std_mapping & 0xffffffff));
10160 if (!tg3_flag(tp, 5717_PLUS))
10161 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10162 NIC_SRAM_RX_BUFFER_DESC);
10164 /* Disable the mini ring */
10165 if (!tg3_flag(tp, 5705_PLUS))
10166 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10167 BDINFO_FLAGS_DISABLED);
10169 /* Program the jumbo buffer descriptor ring control
10170 * blocks on those devices that have them.
10172 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10173 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10175 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10176 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10177 ((u64) tpr->rx_jmb_mapping >> 32));
10178 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10179 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10180 val = TG3_RX_JMB_RING_SIZE(tp) <<
10181 BDINFO_FLAGS_MAXLEN_SHIFT;
10182 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10183 val | BDINFO_FLAGS_USE_EXT_RECV);
10184 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10185 tg3_flag(tp, 57765_CLASS) ||
10186 tg3_asic_rev(tp) == ASIC_REV_5762)
10187 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10188 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10189 } else {
10190 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10191 BDINFO_FLAGS_DISABLED);
10194 if (tg3_flag(tp, 57765_PLUS)) {
10195 val = TG3_RX_STD_RING_SIZE(tp);
10196 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10197 val |= (TG3_RX_STD_DMA_SZ << 2);
10198 } else
10199 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10200 } else
10201 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10203 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10205 tpr->rx_std_prod_idx = tp->rx_pending;
10206 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10208 tpr->rx_jmb_prod_idx =
10209 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10210 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10212 tg3_rings_reset(tp);
10214 /* Initialize MAC address and backoff seed. */
10215 __tg3_set_mac_addr(tp, false);
10217 /* MTU + ethernet header + FCS + optional VLAN tag */
10218 tw32(MAC_RX_MTU_SIZE,
10219 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10221 /* The slot time is changed by tg3_setup_phy if we
10222 * run at gigabit with half duplex.
10224 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10225 (6 << TX_LENGTHS_IPG_SHIFT) |
10226 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10228 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10229 tg3_asic_rev(tp) == ASIC_REV_5762)
10230 val |= tr32(MAC_TX_LENGTHS) &
10231 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10232 TX_LENGTHS_CNT_DWN_VAL_MSK);
10234 tw32(MAC_TX_LENGTHS, val);
10236 /* Receive rules. */
10237 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10238 tw32(RCVLPC_CONFIG, 0x0181);
10240 /* Calculate RDMAC_MODE setting early, we need it to determine
10241 * the RCVLPC_STATE_ENABLE mask.
10243 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10244 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10245 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10246 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10247 RDMAC_MODE_LNGREAD_ENAB);
10249 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10250 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10252 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10253 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10254 tg3_asic_rev(tp) == ASIC_REV_57780)
10255 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10256 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10257 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10259 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10260 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10261 if (tg3_flag(tp, TSO_CAPABLE) &&
10262 tg3_asic_rev(tp) == ASIC_REV_5705) {
10263 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10264 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10265 !tg3_flag(tp, IS_5788)) {
10266 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10270 if (tg3_flag(tp, PCI_EXPRESS))
10271 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10273 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10274 tp->dma_limit = 0;
10275 if (tp->dev->mtu <= ETH_DATA_LEN) {
10276 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10277 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10281 if (tg3_flag(tp, HW_TSO_1) ||
10282 tg3_flag(tp, HW_TSO_2) ||
10283 tg3_flag(tp, HW_TSO_3))
10284 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10286 if (tg3_flag(tp, 57765_PLUS) ||
10287 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10288 tg3_asic_rev(tp) == ASIC_REV_57780)
10289 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10291 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10292 tg3_asic_rev(tp) == ASIC_REV_5762)
10293 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10295 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10296 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10297 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10298 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10299 tg3_flag(tp, 57765_PLUS)) {
10300 u32 tgtreg;
10302 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10303 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10304 else
10305 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10307 val = tr32(tgtreg);
10308 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10309 tg3_asic_rev(tp) == ASIC_REV_5762) {
10310 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10311 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10312 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10313 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10314 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10315 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10317 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10320 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10321 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10322 tg3_asic_rev(tp) == ASIC_REV_5762) {
10323 u32 tgtreg;
10325 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10326 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10327 else
10328 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10330 val = tr32(tgtreg);
10331 tw32(tgtreg, val |
10332 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10333 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10336 /* Receive/send statistics. */
10337 if (tg3_flag(tp, 5750_PLUS)) {
10338 val = tr32(RCVLPC_STATS_ENABLE);
10339 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10340 tw32(RCVLPC_STATS_ENABLE, val);
10341 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10342 tg3_flag(tp, TSO_CAPABLE)) {
10343 val = tr32(RCVLPC_STATS_ENABLE);
10344 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10345 tw32(RCVLPC_STATS_ENABLE, val);
10346 } else {
10347 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10349 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10350 tw32(SNDDATAI_STATSENAB, 0xffffff);
10351 tw32(SNDDATAI_STATSCTRL,
10352 (SNDDATAI_SCTRL_ENABLE |
10353 SNDDATAI_SCTRL_FASTUPD));
10355 /* Setup host coalescing engine. */
10356 tw32(HOSTCC_MODE, 0);
10357 for (i = 0; i < 2000; i++) {
10358 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10359 break;
10360 udelay(10);
10363 __tg3_set_coalesce(tp, &tp->coal);
10365 if (!tg3_flag(tp, 5705_PLUS)) {
10366 /* Status/statistics block address. See tg3_timer,
10367 * the tg3_periodic_fetch_stats call there, and
10368 * tg3_get_stats to see how this works for 5705/5750 chips.
10370 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10371 ((u64) tp->stats_mapping >> 32));
10372 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10373 ((u64) tp->stats_mapping & 0xffffffff));
10374 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10376 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10378 /* Clear statistics and status block memory areas */
10379 for (i = NIC_SRAM_STATS_BLK;
10380 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10381 i += sizeof(u32)) {
10382 tg3_write_mem(tp, i, 0);
10383 udelay(40);
10387 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10389 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10390 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10391 if (!tg3_flag(tp, 5705_PLUS))
10392 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10394 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10395 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10396 /* reset to prevent losing 1st rx packet intermittently */
10397 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10398 udelay(10);
10401 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10402 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10403 MAC_MODE_FHDE_ENABLE;
10404 if (tg3_flag(tp, ENABLE_APE))
10405 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10406 if (!tg3_flag(tp, 5705_PLUS) &&
10407 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10408 tg3_asic_rev(tp) != ASIC_REV_5700)
10409 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10410 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10411 udelay(40);
10413 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10414 * If TG3_FLAG_IS_NIC is zero, we should read the
10415 * register to preserve the GPIO settings for LOMs. The GPIOs,
10416 * whether used as inputs or outputs, are set by boot code after
10417 * reset.
10419 if (!tg3_flag(tp, IS_NIC)) {
10420 u32 gpio_mask;
10422 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10423 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10424 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10426 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10427 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10428 GRC_LCLCTRL_GPIO_OUTPUT3;
10430 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10431 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10433 tp->grc_local_ctrl &= ~gpio_mask;
10434 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10436 /* GPIO1 must be driven high for eeprom write protect */
10437 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10438 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10439 GRC_LCLCTRL_GPIO_OUTPUT1);
10441 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10442 udelay(100);
10444 if (tg3_flag(tp, USING_MSIX)) {
10445 val = tr32(MSGINT_MODE);
10446 val |= MSGINT_MODE_ENABLE;
10447 if (tp->irq_cnt > 1)
10448 val |= MSGINT_MODE_MULTIVEC_EN;
10449 if (!tg3_flag(tp, 1SHOT_MSI))
10450 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10451 tw32(MSGINT_MODE, val);
10454 if (!tg3_flag(tp, 5705_PLUS)) {
10455 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10456 udelay(40);
10459 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10460 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10461 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10462 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10463 WDMAC_MODE_LNGREAD_ENAB);
10465 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10466 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10467 if (tg3_flag(tp, TSO_CAPABLE) &&
10468 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10469 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10470 /* nothing */
10471 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10472 !tg3_flag(tp, IS_5788)) {
10473 val |= WDMAC_MODE_RX_ACCEL;
10477 /* Enable host coalescing bug fix */
10478 if (tg3_flag(tp, 5755_PLUS))
10479 val |= WDMAC_MODE_STATUS_TAG_FIX;
10481 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10482 val |= WDMAC_MODE_BURST_ALL_DATA;
10484 tw32_f(WDMAC_MODE, val);
10485 udelay(40);
10487 if (tg3_flag(tp, PCIX_MODE)) {
10488 u16 pcix_cmd;
10490 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10491 &pcix_cmd);
10492 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10493 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10494 pcix_cmd |= PCI_X_CMD_READ_2K;
10495 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10496 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10497 pcix_cmd |= PCI_X_CMD_READ_2K;
10499 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10500 pcix_cmd);
10503 tw32_f(RDMAC_MODE, rdmac_mode);
10504 udelay(40);
10506 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10507 tg3_asic_rev(tp) == ASIC_REV_5720) {
10508 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10509 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10510 break;
10512 if (i < TG3_NUM_RDMA_CHANNELS) {
10513 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10514 val |= tg3_lso_rd_dma_workaround_bit(tp);
10515 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10516 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10520 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10521 if (!tg3_flag(tp, 5705_PLUS))
10522 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10524 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10525 tw32(SNDDATAC_MODE,
10526 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10527 else
10528 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10530 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10531 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10532 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10533 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10534 val |= RCVDBDI_MODE_LRG_RING_SZ;
10535 tw32(RCVDBDI_MODE, val);
10536 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10537 if (tg3_flag(tp, HW_TSO_1) ||
10538 tg3_flag(tp, HW_TSO_2) ||
10539 tg3_flag(tp, HW_TSO_3))
10540 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10541 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10542 if (tg3_flag(tp, ENABLE_TSS))
10543 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10544 tw32(SNDBDI_MODE, val);
10545 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10547 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10548 err = tg3_load_5701_a0_firmware_fix(tp);
10549 if (err)
10550 return err;
10553 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10554 /* Ignore any errors for the firmware download. If download
10555 * fails, the device will operate with EEE disabled
10557 tg3_load_57766_firmware(tp);
10560 if (tg3_flag(tp, TSO_CAPABLE)) {
10561 err = tg3_load_tso_firmware(tp);
10562 if (err)
10563 return err;
10566 tp->tx_mode = TX_MODE_ENABLE;
10568 if (tg3_flag(tp, 5755_PLUS) ||
10569 tg3_asic_rev(tp) == ASIC_REV_5906)
10570 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10572 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10573 tg3_asic_rev(tp) == ASIC_REV_5762) {
10574 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10575 tp->tx_mode &= ~val;
10576 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10579 tw32_f(MAC_TX_MODE, tp->tx_mode);
10580 udelay(100);
10582 if (tg3_flag(tp, ENABLE_RSS)) {
10583 u32 rss_key[10];
10585 tg3_rss_write_indir_tbl(tp);
10587 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10589 for (i = 0; i < 10 ; i++)
10590 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10593 tp->rx_mode = RX_MODE_ENABLE;
10594 if (tg3_flag(tp, 5755_PLUS))
10595 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10597 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10598 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10600 if (tg3_flag(tp, ENABLE_RSS))
10601 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10602 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10603 RX_MODE_RSS_IPV6_HASH_EN |
10604 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10605 RX_MODE_RSS_IPV4_HASH_EN |
10606 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10608 tw32_f(MAC_RX_MODE, tp->rx_mode);
10609 udelay(10);
10611 tw32(MAC_LED_CTRL, tp->led_ctrl);
10613 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10614 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10615 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10616 udelay(10);
10618 tw32_f(MAC_RX_MODE, tp->rx_mode);
10619 udelay(10);
10621 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10622 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10623 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10624 /* Set drive transmission level to 1.2V */
10625 /* only if the signal pre-emphasis bit is not set */
10626 val = tr32(MAC_SERDES_CFG);
10627 val &= 0xfffff000;
10628 val |= 0x880;
10629 tw32(MAC_SERDES_CFG, val);
10631 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10632 tw32(MAC_SERDES_CFG, 0x616000);
10635 /* Prevent chip from dropping frames when flow control
10636 * is enabled.
10638 if (tg3_flag(tp, 57765_CLASS))
10639 val = 1;
10640 else
10641 val = 2;
10642 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10644 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10645 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10646 /* Use hardware link auto-negotiation */
10647 tg3_flag_set(tp, HW_AUTONEG);
10650 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10651 tg3_asic_rev(tp) == ASIC_REV_5714) {
10652 u32 tmp;
10654 tmp = tr32(SERDES_RX_CTRL);
10655 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10656 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10657 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10658 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10661 if (!tg3_flag(tp, USE_PHYLIB)) {
10662 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10663 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10665 err = tg3_setup_phy(tp, false);
10666 if (err)
10667 return err;
10669 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10670 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10671 u32 tmp;
10673 /* Clear CRC stats. */
10674 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10675 tg3_writephy(tp, MII_TG3_TEST1,
10676 tmp | MII_TG3_TEST1_CRC_EN);
10677 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10682 __tg3_set_rx_mode(tp->dev);
10684 /* Initialize receive rules. */
10685 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10686 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10687 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10688 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10690 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10691 limit = 8;
10692 else
10693 limit = 16;
10694 if (tg3_flag(tp, ENABLE_ASF))
10695 limit -= 4;
10696 switch (limit) {
10697 case 16:
10698 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10699 case 15:
10700 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10701 case 14:
10702 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10703 case 13:
10704 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10705 case 12:
10706 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10707 case 11:
10708 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10709 case 10:
10710 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10711 case 9:
10712 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10713 case 8:
10714 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10715 case 7:
10716 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10717 case 6:
10718 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10719 case 5:
10720 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10721 case 4:
10722 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10723 case 3:
10724 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10725 case 2:
10726 case 1:
10728 default:
10729 break;
10732 if (tg3_flag(tp, ENABLE_APE))
10733 /* Write our heartbeat update interval to APE. */
10734 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10735 APE_HOST_HEARTBEAT_INT_DISABLE);
10737 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10739 return 0;
10742 /* Called at device open time to get the chip ready for
10743 * packet processing. Invoked with tp->lock held.
10745 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10747 /* Chip may have been just powered on. If so, the boot code may still
10748 * be running initialization. Wait for it to finish to avoid races in
10749 * accessing the hardware.
10751 tg3_enable_register_access(tp);
10752 tg3_poll_fw(tp);
10754 tg3_switch_clocks(tp);
10756 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10758 return tg3_reset_hw(tp, reset_phy);
10761 #ifdef CONFIG_TIGON3_HWMON
10762 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10764 int i;
10766 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10767 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10769 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10770 off += len;
10772 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10773 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10774 memset(ocir, 0, TG3_OCIR_LEN);
10778 /* sysfs attributes for hwmon */
10779 static ssize_t tg3_show_temp(struct device *dev,
10780 struct device_attribute *devattr, char *buf)
10782 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10783 struct tg3 *tp = dev_get_drvdata(dev);
10784 u32 temperature;
10786 spin_lock_bh(&tp->lock);
10787 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10788 sizeof(temperature));
10789 spin_unlock_bh(&tp->lock);
10790 return sprintf(buf, "%u\n", temperature * 1000);
10794 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10795 TG3_TEMP_SENSOR_OFFSET);
10796 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10797 TG3_TEMP_CAUTION_OFFSET);
10798 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10799 TG3_TEMP_MAX_OFFSET);
10801 static struct attribute *tg3_attrs[] = {
10802 &sensor_dev_attr_temp1_input.dev_attr.attr,
10803 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10804 &sensor_dev_attr_temp1_max.dev_attr.attr,
10805 NULL
10807 ATTRIBUTE_GROUPS(tg3);
10809 static void tg3_hwmon_close(struct tg3 *tp)
10811 if (tp->hwmon_dev) {
10812 hwmon_device_unregister(tp->hwmon_dev);
10813 tp->hwmon_dev = NULL;
10817 static void tg3_hwmon_open(struct tg3 *tp)
10819 int i;
10820 u32 size = 0;
10821 struct pci_dev *pdev = tp->pdev;
10822 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10824 tg3_sd_scan_scratchpad(tp, ocirs);
10826 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10827 if (!ocirs[i].src_data_length)
10828 continue;
10830 size += ocirs[i].src_hdr_length;
10831 size += ocirs[i].src_data_length;
10834 if (!size)
10835 return;
10837 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10838 tp, tg3_groups);
10839 if (IS_ERR(tp->hwmon_dev)) {
10840 tp->hwmon_dev = NULL;
10841 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10844 #else
10845 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10846 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10847 #endif /* CONFIG_TIGON3_HWMON */
10850 #define TG3_STAT_ADD32(PSTAT, REG) \
10851 do { u32 __val = tr32(REG); \
10852 (PSTAT)->low += __val; \
10853 if ((PSTAT)->low < __val) \
10854 (PSTAT)->high += 1; \
10855 } while (0)
10857 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10859 struct tg3_hw_stats *sp = tp->hw_stats;
10861 if (!tp->link_up)
10862 return;
10864 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10865 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10866 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10867 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10868 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10869 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10870 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10871 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10872 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10873 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10874 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10875 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10876 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10877 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10878 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10879 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10880 u32 val;
10882 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10883 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10884 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10885 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10888 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10889 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10890 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10891 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10892 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10893 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10894 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10895 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10896 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10897 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10898 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10899 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10900 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10901 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10903 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10904 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10905 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10906 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10907 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10908 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10909 } else {
10910 u32 val = tr32(HOSTCC_FLOW_ATTN);
10911 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10912 if (val) {
10913 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10914 sp->rx_discards.low += val;
10915 if (sp->rx_discards.low < val)
10916 sp->rx_discards.high += 1;
10918 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10920 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10923 static void tg3_chk_missed_msi(struct tg3 *tp)
10925 u32 i;
10927 for (i = 0; i < tp->irq_cnt; i++) {
10928 struct tg3_napi *tnapi = &tp->napi[i];
10930 if (tg3_has_work(tnapi)) {
10931 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10932 tnapi->last_tx_cons == tnapi->tx_cons) {
10933 if (tnapi->chk_msi_cnt < 1) {
10934 tnapi->chk_msi_cnt++;
10935 return;
10937 tg3_msi(0, tnapi);
10940 tnapi->chk_msi_cnt = 0;
10941 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10942 tnapi->last_tx_cons = tnapi->tx_cons;
10946 static void tg3_timer(struct timer_list *t)
10948 struct tg3 *tp = from_timer(tp, t, timer);
10950 spin_lock(&tp->lock);
10952 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10953 spin_unlock(&tp->lock);
10954 goto restart_timer;
10957 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10958 tg3_flag(tp, 57765_CLASS))
10959 tg3_chk_missed_msi(tp);
10961 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10962 /* BCM4785: Flush posted writes from GbE to host memory. */
10963 tr32(HOSTCC_MODE);
10966 if (!tg3_flag(tp, TAGGED_STATUS)) {
10967 /* All of this garbage is because when using non-tagged
10968 * IRQ status the mailbox/status_block protocol the chip
10969 * uses with the cpu is race prone.
10971 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10972 tw32(GRC_LOCAL_CTRL,
10973 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10974 } else {
10975 tw32(HOSTCC_MODE, tp->coalesce_mode |
10976 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10979 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10980 spin_unlock(&tp->lock);
10981 tg3_reset_task_schedule(tp);
10982 goto restart_timer;
10986 /* This part only runs once per second. */
10987 if (!--tp->timer_counter) {
10988 if (tg3_flag(tp, 5705_PLUS))
10989 tg3_periodic_fetch_stats(tp);
10991 if (tp->setlpicnt && !--tp->setlpicnt)
10992 tg3_phy_eee_enable(tp);
10994 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10995 u32 mac_stat;
10996 int phy_event;
10998 mac_stat = tr32(MAC_STATUS);
11000 phy_event = 0;
11001 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11002 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11003 phy_event = 1;
11004 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11005 phy_event = 1;
11007 if (phy_event)
11008 tg3_setup_phy(tp, false);
11009 } else if (tg3_flag(tp, POLL_SERDES)) {
11010 u32 mac_stat = tr32(MAC_STATUS);
11011 int need_setup = 0;
11013 if (tp->link_up &&
11014 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11015 need_setup = 1;
11017 if (!tp->link_up &&
11018 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11019 MAC_STATUS_SIGNAL_DET))) {
11020 need_setup = 1;
11022 if (need_setup) {
11023 if (!tp->serdes_counter) {
11024 tw32_f(MAC_MODE,
11025 (tp->mac_mode &
11026 ~MAC_MODE_PORT_MODE_MASK));
11027 udelay(40);
11028 tw32_f(MAC_MODE, tp->mac_mode);
11029 udelay(40);
11031 tg3_setup_phy(tp, false);
11033 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11034 tg3_flag(tp, 5780_CLASS)) {
11035 tg3_serdes_parallel_detect(tp);
11036 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11037 u32 cpmu = tr32(TG3_CPMU_STATUS);
11038 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11039 TG3_CPMU_STATUS_LINK_MASK);
11041 if (link_up != tp->link_up)
11042 tg3_setup_phy(tp, false);
11045 tp->timer_counter = tp->timer_multiplier;
11048 /* Heartbeat is only sent once every 2 seconds.
11050 * The heartbeat is to tell the ASF firmware that the host
11051 * driver is still alive. In the event that the OS crashes,
11052 * ASF needs to reset the hardware to free up the FIFO space
11053 * that may be filled with rx packets destined for the host.
11054 * If the FIFO is full, ASF will no longer function properly.
11056 * Unintended resets have been reported on real time kernels
11057 * where the timer doesn't run on time. Netpoll will also have
11058 * same problem.
11060 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11061 * to check the ring condition when the heartbeat is expiring
11062 * before doing the reset. This will prevent most unintended
11063 * resets.
11065 if (!--tp->asf_counter) {
11066 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11067 tg3_wait_for_event_ack(tp);
11069 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11070 FWCMD_NICDRV_ALIVE3);
11071 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11072 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11073 TG3_FW_UPDATE_TIMEOUT_SEC);
11075 tg3_generate_fw_event(tp);
11077 tp->asf_counter = tp->asf_multiplier;
11080 spin_unlock(&tp->lock);
11082 restart_timer:
11083 tp->timer.expires = jiffies + tp->timer_offset;
11084 add_timer(&tp->timer);
11087 static void tg3_timer_init(struct tg3 *tp)
11089 if (tg3_flag(tp, TAGGED_STATUS) &&
11090 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11091 !tg3_flag(tp, 57765_CLASS))
11092 tp->timer_offset = HZ;
11093 else
11094 tp->timer_offset = HZ / 10;
11096 BUG_ON(tp->timer_offset > HZ);
11098 tp->timer_multiplier = (HZ / tp->timer_offset);
11099 tp->asf_multiplier = (HZ / tp->timer_offset) *
11100 TG3_FW_UPDATE_FREQ_SEC;
11102 timer_setup(&tp->timer, tg3_timer, 0);
11105 static void tg3_timer_start(struct tg3 *tp)
11107 tp->asf_counter = tp->asf_multiplier;
11108 tp->timer_counter = tp->timer_multiplier;
11110 tp->timer.expires = jiffies + tp->timer_offset;
11111 add_timer(&tp->timer);
11114 static void tg3_timer_stop(struct tg3 *tp)
11116 del_timer_sync(&tp->timer);
11119 /* Restart hardware after configuration changes, self-test, etc.
11120 * Invoked with tp->lock held.
11122 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11123 __releases(tp->lock)
11124 __acquires(tp->lock)
11126 int err;
11128 err = tg3_init_hw(tp, reset_phy);
11129 if (err) {
11130 netdev_err(tp->dev,
11131 "Failed to re-initialize device, aborting\n");
11132 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11133 tg3_full_unlock(tp);
11134 tg3_timer_stop(tp);
11135 tp->irq_sync = 0;
11136 tg3_napi_enable(tp);
11137 dev_close(tp->dev);
11138 tg3_full_lock(tp, 0);
11140 return err;
11143 static void tg3_reset_task(struct work_struct *work)
11145 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11146 int err;
11148 rtnl_lock();
11149 tg3_full_lock(tp, 0);
11151 if (!netif_running(tp->dev)) {
11152 tg3_flag_clear(tp, RESET_TASK_PENDING);
11153 tg3_full_unlock(tp);
11154 rtnl_unlock();
11155 return;
11158 tg3_full_unlock(tp);
11160 tg3_phy_stop(tp);
11162 tg3_netif_stop(tp);
11164 tg3_full_lock(tp, 1);
11166 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11167 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11168 tp->write32_rx_mbox = tg3_write_flush_reg32;
11169 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11170 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11173 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11174 err = tg3_init_hw(tp, true);
11175 if (err)
11176 goto out;
11178 tg3_netif_start(tp);
11180 out:
11181 tg3_full_unlock(tp);
11183 if (!err)
11184 tg3_phy_start(tp);
11186 tg3_flag_clear(tp, RESET_TASK_PENDING);
11187 rtnl_unlock();
11190 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11192 irq_handler_t fn;
11193 unsigned long flags;
11194 char *name;
11195 struct tg3_napi *tnapi = &tp->napi[irq_num];
11197 if (tp->irq_cnt == 1)
11198 name = tp->dev->name;
11199 else {
11200 name = &tnapi->irq_lbl[0];
11201 if (tnapi->tx_buffers && tnapi->rx_rcb)
11202 snprintf(name, IFNAMSIZ,
11203 "%s-txrx-%d", tp->dev->name, irq_num);
11204 else if (tnapi->tx_buffers)
11205 snprintf(name, IFNAMSIZ,
11206 "%s-tx-%d", tp->dev->name, irq_num);
11207 else if (tnapi->rx_rcb)
11208 snprintf(name, IFNAMSIZ,
11209 "%s-rx-%d", tp->dev->name, irq_num);
11210 else
11211 snprintf(name, IFNAMSIZ,
11212 "%s-%d", tp->dev->name, irq_num);
11213 name[IFNAMSIZ-1] = 0;
11216 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11217 fn = tg3_msi;
11218 if (tg3_flag(tp, 1SHOT_MSI))
11219 fn = tg3_msi_1shot;
11220 flags = 0;
11221 } else {
11222 fn = tg3_interrupt;
11223 if (tg3_flag(tp, TAGGED_STATUS))
11224 fn = tg3_interrupt_tagged;
11225 flags = IRQF_SHARED;
11228 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11231 static int tg3_test_interrupt(struct tg3 *tp)
11233 struct tg3_napi *tnapi = &tp->napi[0];
11234 struct net_device *dev = tp->dev;
11235 int err, i, intr_ok = 0;
11236 u32 val;
11238 if (!netif_running(dev))
11239 return -ENODEV;
11241 tg3_disable_ints(tp);
11243 free_irq(tnapi->irq_vec, tnapi);
11246 * Turn off MSI one shot mode. Otherwise this test has no
11247 * observable way to know whether the interrupt was delivered.
11249 if (tg3_flag(tp, 57765_PLUS)) {
11250 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11251 tw32(MSGINT_MODE, val);
11254 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11255 IRQF_SHARED, dev->name, tnapi);
11256 if (err)
11257 return err;
11259 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11260 tg3_enable_ints(tp);
11262 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11263 tnapi->coal_now);
11265 for (i = 0; i < 5; i++) {
11266 u32 int_mbox, misc_host_ctrl;
11268 int_mbox = tr32_mailbox(tnapi->int_mbox);
11269 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11271 if ((int_mbox != 0) ||
11272 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11273 intr_ok = 1;
11274 break;
11277 if (tg3_flag(tp, 57765_PLUS) &&
11278 tnapi->hw_status->status_tag != tnapi->last_tag)
11279 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11281 msleep(10);
11284 tg3_disable_ints(tp);
11286 free_irq(tnapi->irq_vec, tnapi);
11288 err = tg3_request_irq(tp, 0);
11290 if (err)
11291 return err;
11293 if (intr_ok) {
11294 /* Reenable MSI one shot mode. */
11295 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11296 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11297 tw32(MSGINT_MODE, val);
11299 return 0;
11302 return -EIO;
11305 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11306 * successfully restored
11308 static int tg3_test_msi(struct tg3 *tp)
11310 int err;
11311 u16 pci_cmd;
11313 if (!tg3_flag(tp, USING_MSI))
11314 return 0;
11316 /* Turn off SERR reporting in case MSI terminates with Master
11317 * Abort.
11319 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11320 pci_write_config_word(tp->pdev, PCI_COMMAND,
11321 pci_cmd & ~PCI_COMMAND_SERR);
11323 err = tg3_test_interrupt(tp);
11325 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11327 if (!err)
11328 return 0;
11330 /* other failures */
11331 if (err != -EIO)
11332 return err;
11334 /* MSI test failed, go back to INTx mode */
11335 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11336 "to INTx mode. Please report this failure to the PCI "
11337 "maintainer and include system chipset information\n");
11339 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11341 pci_disable_msi(tp->pdev);
11343 tg3_flag_clear(tp, USING_MSI);
11344 tp->napi[0].irq_vec = tp->pdev->irq;
11346 err = tg3_request_irq(tp, 0);
11347 if (err)
11348 return err;
11350 /* Need to reset the chip because the MSI cycle may have terminated
11351 * with Master Abort.
11353 tg3_full_lock(tp, 1);
11355 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11356 err = tg3_init_hw(tp, true);
11358 tg3_full_unlock(tp);
11360 if (err)
11361 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11363 return err;
11366 static int tg3_request_firmware(struct tg3 *tp)
11368 const struct tg3_firmware_hdr *fw_hdr;
11370 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11371 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11372 tp->fw_needed);
11373 return -ENOENT;
11376 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11378 /* Firmware blob starts with version numbers, followed by
11379 * start address and _full_ length including BSS sections
11380 * (which must be longer than the actual data, of course
11383 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11384 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11385 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11386 tp->fw_len, tp->fw_needed);
11387 release_firmware(tp->fw);
11388 tp->fw = NULL;
11389 return -EINVAL;
11392 /* We no longer need firmware; we have it. */
11393 tp->fw_needed = NULL;
11394 return 0;
11397 static u32 tg3_irq_count(struct tg3 *tp)
11399 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11401 if (irq_cnt > 1) {
11402 /* We want as many rx rings enabled as there are cpus.
11403 * In multiqueue MSI-X mode, the first MSI-X vector
11404 * only deals with link interrupts, etc, so we add
11405 * one to the number of vectors we are requesting.
11407 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11410 return irq_cnt;
11413 static bool tg3_enable_msix(struct tg3 *tp)
11415 int i, rc;
11416 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11418 tp->txq_cnt = tp->txq_req;
11419 tp->rxq_cnt = tp->rxq_req;
11420 if (!tp->rxq_cnt)
11421 tp->rxq_cnt = netif_get_num_default_rss_queues();
11422 if (tp->rxq_cnt > tp->rxq_max)
11423 tp->rxq_cnt = tp->rxq_max;
11425 /* Disable multiple TX rings by default. Simple round-robin hardware
11426 * scheduling of the TX rings can cause starvation of rings with
11427 * small packets when other rings have TSO or jumbo packets.
11429 if (!tp->txq_req)
11430 tp->txq_cnt = 1;
11432 tp->irq_cnt = tg3_irq_count(tp);
11434 for (i = 0; i < tp->irq_max; i++) {
11435 msix_ent[i].entry = i;
11436 msix_ent[i].vector = 0;
11439 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11440 if (rc < 0) {
11441 return false;
11442 } else if (rc < tp->irq_cnt) {
11443 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11444 tp->irq_cnt, rc);
11445 tp->irq_cnt = rc;
11446 tp->rxq_cnt = max(rc - 1, 1);
11447 if (tp->txq_cnt)
11448 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11451 for (i = 0; i < tp->irq_max; i++)
11452 tp->napi[i].irq_vec = msix_ent[i].vector;
11454 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11455 pci_disable_msix(tp->pdev);
11456 return false;
11459 if (tp->irq_cnt == 1)
11460 return true;
11462 tg3_flag_set(tp, ENABLE_RSS);
11464 if (tp->txq_cnt > 1)
11465 tg3_flag_set(tp, ENABLE_TSS);
11467 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11469 return true;
11472 static void tg3_ints_init(struct tg3 *tp)
11474 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11475 !tg3_flag(tp, TAGGED_STATUS)) {
11476 /* All MSI supporting chips should support tagged
11477 * status. Assert that this is the case.
11479 netdev_warn(tp->dev,
11480 "MSI without TAGGED_STATUS? Not using MSI\n");
11481 goto defcfg;
11484 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11485 tg3_flag_set(tp, USING_MSIX);
11486 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11487 tg3_flag_set(tp, USING_MSI);
11489 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11490 u32 msi_mode = tr32(MSGINT_MODE);
11491 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11492 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11493 if (!tg3_flag(tp, 1SHOT_MSI))
11494 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11495 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11497 defcfg:
11498 if (!tg3_flag(tp, USING_MSIX)) {
11499 tp->irq_cnt = 1;
11500 tp->napi[0].irq_vec = tp->pdev->irq;
11503 if (tp->irq_cnt == 1) {
11504 tp->txq_cnt = 1;
11505 tp->rxq_cnt = 1;
11506 netif_set_real_num_tx_queues(tp->dev, 1);
11507 netif_set_real_num_rx_queues(tp->dev, 1);
11511 static void tg3_ints_fini(struct tg3 *tp)
11513 if (tg3_flag(tp, USING_MSIX))
11514 pci_disable_msix(tp->pdev);
11515 else if (tg3_flag(tp, USING_MSI))
11516 pci_disable_msi(tp->pdev);
11517 tg3_flag_clear(tp, USING_MSI);
11518 tg3_flag_clear(tp, USING_MSIX);
11519 tg3_flag_clear(tp, ENABLE_RSS);
11520 tg3_flag_clear(tp, ENABLE_TSS);
11523 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11524 bool init)
11526 struct net_device *dev = tp->dev;
11527 int i, err;
11530 * Setup interrupts first so we know how
11531 * many NAPI resources to allocate
11533 tg3_ints_init(tp);
11535 tg3_rss_check_indir_tbl(tp);
11537 /* The placement of this call is tied
11538 * to the setup and use of Host TX descriptors.
11540 err = tg3_alloc_consistent(tp);
11541 if (err)
11542 goto out_ints_fini;
11544 tg3_napi_init(tp);
11546 tg3_napi_enable(tp);
11548 for (i = 0; i < tp->irq_cnt; i++) {
11549 err = tg3_request_irq(tp, i);
11550 if (err) {
11551 for (i--; i >= 0; i--) {
11552 struct tg3_napi *tnapi = &tp->napi[i];
11554 free_irq(tnapi->irq_vec, tnapi);
11556 goto out_napi_fini;
11560 tg3_full_lock(tp, 0);
11562 if (init)
11563 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11565 err = tg3_init_hw(tp, reset_phy);
11566 if (err) {
11567 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11568 tg3_free_rings(tp);
11571 tg3_full_unlock(tp);
11573 if (err)
11574 goto out_free_irq;
11576 if (test_irq && tg3_flag(tp, USING_MSI)) {
11577 err = tg3_test_msi(tp);
11579 if (err) {
11580 tg3_full_lock(tp, 0);
11581 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11582 tg3_free_rings(tp);
11583 tg3_full_unlock(tp);
11585 goto out_napi_fini;
11588 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11589 u32 val = tr32(PCIE_TRANSACTION_CFG);
11591 tw32(PCIE_TRANSACTION_CFG,
11592 val | PCIE_TRANS_CFG_1SHOT_MSI);
11596 tg3_phy_start(tp);
11598 tg3_hwmon_open(tp);
11600 tg3_full_lock(tp, 0);
11602 tg3_timer_start(tp);
11603 tg3_flag_set(tp, INIT_COMPLETE);
11604 tg3_enable_ints(tp);
11606 tg3_ptp_resume(tp);
11608 tg3_full_unlock(tp);
11610 netif_tx_start_all_queues(dev);
11613 * Reset loopback feature if it was turned on while the device was down
11614 * make sure that it's installed properly now.
11616 if (dev->features & NETIF_F_LOOPBACK)
11617 tg3_set_loopback(dev, dev->features);
11619 return 0;
11621 out_free_irq:
11622 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11623 struct tg3_napi *tnapi = &tp->napi[i];
11624 free_irq(tnapi->irq_vec, tnapi);
11627 out_napi_fini:
11628 tg3_napi_disable(tp);
11629 tg3_napi_fini(tp);
11630 tg3_free_consistent(tp);
11632 out_ints_fini:
11633 tg3_ints_fini(tp);
11635 return err;
11638 static void tg3_stop(struct tg3 *tp)
11640 int i;
11642 tg3_reset_task_cancel(tp);
11643 tg3_netif_stop(tp);
11645 tg3_timer_stop(tp);
11647 tg3_hwmon_close(tp);
11649 tg3_phy_stop(tp);
11651 tg3_full_lock(tp, 1);
11653 tg3_disable_ints(tp);
11655 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11656 tg3_free_rings(tp);
11657 tg3_flag_clear(tp, INIT_COMPLETE);
11659 tg3_full_unlock(tp);
11661 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11662 struct tg3_napi *tnapi = &tp->napi[i];
11663 free_irq(tnapi->irq_vec, tnapi);
11666 tg3_ints_fini(tp);
11668 tg3_napi_fini(tp);
11670 tg3_free_consistent(tp);
11673 static int tg3_open(struct net_device *dev)
11675 struct tg3 *tp = netdev_priv(dev);
11676 int err;
11678 if (tp->pcierr_recovery) {
11679 netdev_err(dev, "Failed to open device. PCI error recovery "
11680 "in progress\n");
11681 return -EAGAIN;
11684 if (tp->fw_needed) {
11685 err = tg3_request_firmware(tp);
11686 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11687 if (err) {
11688 netdev_warn(tp->dev, "EEE capability disabled\n");
11689 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11690 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11691 netdev_warn(tp->dev, "EEE capability restored\n");
11692 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11694 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11695 if (err)
11696 return err;
11697 } else if (err) {
11698 netdev_warn(tp->dev, "TSO capability disabled\n");
11699 tg3_flag_clear(tp, TSO_CAPABLE);
11700 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11701 netdev_notice(tp->dev, "TSO capability restored\n");
11702 tg3_flag_set(tp, TSO_CAPABLE);
11706 tg3_carrier_off(tp);
11708 err = tg3_power_up(tp);
11709 if (err)
11710 return err;
11712 tg3_full_lock(tp, 0);
11714 tg3_disable_ints(tp);
11715 tg3_flag_clear(tp, INIT_COMPLETE);
11717 tg3_full_unlock(tp);
11719 err = tg3_start(tp,
11720 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11721 true, true);
11722 if (err) {
11723 tg3_frob_aux_power(tp, false);
11724 pci_set_power_state(tp->pdev, PCI_D3hot);
11727 return err;
11730 static int tg3_close(struct net_device *dev)
11732 struct tg3 *tp = netdev_priv(dev);
11734 if (tp->pcierr_recovery) {
11735 netdev_err(dev, "Failed to close device. PCI error recovery "
11736 "in progress\n");
11737 return -EAGAIN;
11740 tg3_stop(tp);
11742 if (pci_device_is_present(tp->pdev)) {
11743 tg3_power_down_prepare(tp);
11745 tg3_carrier_off(tp);
11747 return 0;
11750 static inline u64 get_stat64(tg3_stat64_t *val)
11752 return ((u64)val->high << 32) | ((u64)val->low);
11755 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11757 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11759 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11760 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11761 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11762 u32 val;
11764 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11765 tg3_writephy(tp, MII_TG3_TEST1,
11766 val | MII_TG3_TEST1_CRC_EN);
11767 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11768 } else
11769 val = 0;
11771 tp->phy_crc_errors += val;
11773 return tp->phy_crc_errors;
11776 return get_stat64(&hw_stats->rx_fcs_errors);
11779 #define ESTAT_ADD(member) \
11780 estats->member = old_estats->member + \
11781 get_stat64(&hw_stats->member)
11783 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11785 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11786 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11788 ESTAT_ADD(rx_octets);
11789 ESTAT_ADD(rx_fragments);
11790 ESTAT_ADD(rx_ucast_packets);
11791 ESTAT_ADD(rx_mcast_packets);
11792 ESTAT_ADD(rx_bcast_packets);
11793 ESTAT_ADD(rx_fcs_errors);
11794 ESTAT_ADD(rx_align_errors);
11795 ESTAT_ADD(rx_xon_pause_rcvd);
11796 ESTAT_ADD(rx_xoff_pause_rcvd);
11797 ESTAT_ADD(rx_mac_ctrl_rcvd);
11798 ESTAT_ADD(rx_xoff_entered);
11799 ESTAT_ADD(rx_frame_too_long_errors);
11800 ESTAT_ADD(rx_jabbers);
11801 ESTAT_ADD(rx_undersize_packets);
11802 ESTAT_ADD(rx_in_length_errors);
11803 ESTAT_ADD(rx_out_length_errors);
11804 ESTAT_ADD(rx_64_or_less_octet_packets);
11805 ESTAT_ADD(rx_65_to_127_octet_packets);
11806 ESTAT_ADD(rx_128_to_255_octet_packets);
11807 ESTAT_ADD(rx_256_to_511_octet_packets);
11808 ESTAT_ADD(rx_512_to_1023_octet_packets);
11809 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11810 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11811 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11812 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11813 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11815 ESTAT_ADD(tx_octets);
11816 ESTAT_ADD(tx_collisions);
11817 ESTAT_ADD(tx_xon_sent);
11818 ESTAT_ADD(tx_xoff_sent);
11819 ESTAT_ADD(tx_flow_control);
11820 ESTAT_ADD(tx_mac_errors);
11821 ESTAT_ADD(tx_single_collisions);
11822 ESTAT_ADD(tx_mult_collisions);
11823 ESTAT_ADD(tx_deferred);
11824 ESTAT_ADD(tx_excessive_collisions);
11825 ESTAT_ADD(tx_late_collisions);
11826 ESTAT_ADD(tx_collide_2times);
11827 ESTAT_ADD(tx_collide_3times);
11828 ESTAT_ADD(tx_collide_4times);
11829 ESTAT_ADD(tx_collide_5times);
11830 ESTAT_ADD(tx_collide_6times);
11831 ESTAT_ADD(tx_collide_7times);
11832 ESTAT_ADD(tx_collide_8times);
11833 ESTAT_ADD(tx_collide_9times);
11834 ESTAT_ADD(tx_collide_10times);
11835 ESTAT_ADD(tx_collide_11times);
11836 ESTAT_ADD(tx_collide_12times);
11837 ESTAT_ADD(tx_collide_13times);
11838 ESTAT_ADD(tx_collide_14times);
11839 ESTAT_ADD(tx_collide_15times);
11840 ESTAT_ADD(tx_ucast_packets);
11841 ESTAT_ADD(tx_mcast_packets);
11842 ESTAT_ADD(tx_bcast_packets);
11843 ESTAT_ADD(tx_carrier_sense_errors);
11844 ESTAT_ADD(tx_discards);
11845 ESTAT_ADD(tx_errors);
11847 ESTAT_ADD(dma_writeq_full);
11848 ESTAT_ADD(dma_write_prioq_full);
11849 ESTAT_ADD(rxbds_empty);
11850 ESTAT_ADD(rx_discards);
11851 ESTAT_ADD(rx_errors);
11852 ESTAT_ADD(rx_threshold_hit);
11854 ESTAT_ADD(dma_readq_full);
11855 ESTAT_ADD(dma_read_prioq_full);
11856 ESTAT_ADD(tx_comp_queue_full);
11858 ESTAT_ADD(ring_set_send_prod_index);
11859 ESTAT_ADD(ring_status_update);
11860 ESTAT_ADD(nic_irqs);
11861 ESTAT_ADD(nic_avoided_irqs);
11862 ESTAT_ADD(nic_tx_threshold_hit);
11864 ESTAT_ADD(mbuf_lwm_thresh_hit);
11867 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11869 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11870 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11872 stats->rx_packets = old_stats->rx_packets +
11873 get_stat64(&hw_stats->rx_ucast_packets) +
11874 get_stat64(&hw_stats->rx_mcast_packets) +
11875 get_stat64(&hw_stats->rx_bcast_packets);
11877 stats->tx_packets = old_stats->tx_packets +
11878 get_stat64(&hw_stats->tx_ucast_packets) +
11879 get_stat64(&hw_stats->tx_mcast_packets) +
11880 get_stat64(&hw_stats->tx_bcast_packets);
11882 stats->rx_bytes = old_stats->rx_bytes +
11883 get_stat64(&hw_stats->rx_octets);
11884 stats->tx_bytes = old_stats->tx_bytes +
11885 get_stat64(&hw_stats->tx_octets);
11887 stats->rx_errors = old_stats->rx_errors +
11888 get_stat64(&hw_stats->rx_errors);
11889 stats->tx_errors = old_stats->tx_errors +
11890 get_stat64(&hw_stats->tx_errors) +
11891 get_stat64(&hw_stats->tx_mac_errors) +
11892 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11893 get_stat64(&hw_stats->tx_discards);
11895 stats->multicast = old_stats->multicast +
11896 get_stat64(&hw_stats->rx_mcast_packets);
11897 stats->collisions = old_stats->collisions +
11898 get_stat64(&hw_stats->tx_collisions);
11900 stats->rx_length_errors = old_stats->rx_length_errors +
11901 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11902 get_stat64(&hw_stats->rx_undersize_packets);
11904 stats->rx_frame_errors = old_stats->rx_frame_errors +
11905 get_stat64(&hw_stats->rx_align_errors);
11906 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11907 get_stat64(&hw_stats->tx_discards);
11908 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11909 get_stat64(&hw_stats->tx_carrier_sense_errors);
11911 stats->rx_crc_errors = old_stats->rx_crc_errors +
11912 tg3_calc_crc_errors(tp);
11914 stats->rx_missed_errors = old_stats->rx_missed_errors +
11915 get_stat64(&hw_stats->rx_discards);
11917 stats->rx_dropped = tp->rx_dropped;
11918 stats->tx_dropped = tp->tx_dropped;
11921 static int tg3_get_regs_len(struct net_device *dev)
11923 return TG3_REG_BLK_SIZE;
11926 static void tg3_get_regs(struct net_device *dev,
11927 struct ethtool_regs *regs, void *_p)
11929 struct tg3 *tp = netdev_priv(dev);
11931 regs->version = 0;
11933 memset(_p, 0, TG3_REG_BLK_SIZE);
11935 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11936 return;
11938 tg3_full_lock(tp, 0);
11940 tg3_dump_legacy_regs(tp, (u32 *)_p);
11942 tg3_full_unlock(tp);
11945 static int tg3_get_eeprom_len(struct net_device *dev)
11947 struct tg3 *tp = netdev_priv(dev);
11949 return tp->nvram_size;
11952 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11954 struct tg3 *tp = netdev_priv(dev);
11955 int ret, cpmu_restore = 0;
11956 u8 *pd;
11957 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11958 __be32 val;
11960 if (tg3_flag(tp, NO_NVRAM))
11961 return -EINVAL;
11963 offset = eeprom->offset;
11964 len = eeprom->len;
11965 eeprom->len = 0;
11967 eeprom->magic = TG3_EEPROM_MAGIC;
11969 /* Override clock, link aware and link idle modes */
11970 if (tg3_flag(tp, CPMU_PRESENT)) {
11971 cpmu_val = tr32(TG3_CPMU_CTRL);
11972 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11973 CPMU_CTRL_LINK_IDLE_MODE)) {
11974 tw32(TG3_CPMU_CTRL, cpmu_val &
11975 ~(CPMU_CTRL_LINK_AWARE_MODE |
11976 CPMU_CTRL_LINK_IDLE_MODE));
11977 cpmu_restore = 1;
11980 tg3_override_clk(tp);
11982 if (offset & 3) {
11983 /* adjustments to start on required 4 byte boundary */
11984 b_offset = offset & 3;
11985 b_count = 4 - b_offset;
11986 if (b_count > len) {
11987 /* i.e. offset=1 len=2 */
11988 b_count = len;
11990 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11991 if (ret)
11992 goto eeprom_done;
11993 memcpy(data, ((char *)&val) + b_offset, b_count);
11994 len -= b_count;
11995 offset += b_count;
11996 eeprom->len += b_count;
11999 /* read bytes up to the last 4 byte boundary */
12000 pd = &data[eeprom->len];
12001 for (i = 0; i < (len - (len & 3)); i += 4) {
12002 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12003 if (ret) {
12004 if (i)
12005 i -= 4;
12006 eeprom->len += i;
12007 goto eeprom_done;
12009 memcpy(pd + i, &val, 4);
12010 if (need_resched()) {
12011 if (signal_pending(current)) {
12012 eeprom->len += i;
12013 ret = -EINTR;
12014 goto eeprom_done;
12016 cond_resched();
12019 eeprom->len += i;
12021 if (len & 3) {
12022 /* read last bytes not ending on 4 byte boundary */
12023 pd = &data[eeprom->len];
12024 b_count = len & 3;
12025 b_offset = offset + len - b_count;
12026 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12027 if (ret)
12028 goto eeprom_done;
12029 memcpy(pd, &val, b_count);
12030 eeprom->len += b_count;
12032 ret = 0;
12034 eeprom_done:
12035 /* Restore clock, link aware and link idle modes */
12036 tg3_restore_clk(tp);
12037 if (cpmu_restore)
12038 tw32(TG3_CPMU_CTRL, cpmu_val);
12040 return ret;
12043 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12045 struct tg3 *tp = netdev_priv(dev);
12046 int ret;
12047 u32 offset, len, b_offset, odd_len;
12048 u8 *buf;
12049 __be32 start = 0, end;
12051 if (tg3_flag(tp, NO_NVRAM) ||
12052 eeprom->magic != TG3_EEPROM_MAGIC)
12053 return -EINVAL;
12055 offset = eeprom->offset;
12056 len = eeprom->len;
12058 if ((b_offset = (offset & 3))) {
12059 /* adjustments to start on required 4 byte boundary */
12060 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12061 if (ret)
12062 return ret;
12063 len += b_offset;
12064 offset &= ~3;
12065 if (len < 4)
12066 len = 4;
12069 odd_len = 0;
12070 if (len & 3) {
12071 /* adjustments to end on required 4 byte boundary */
12072 odd_len = 1;
12073 len = (len + 3) & ~3;
12074 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12075 if (ret)
12076 return ret;
12079 buf = data;
12080 if (b_offset || odd_len) {
12081 buf = kmalloc(len, GFP_KERNEL);
12082 if (!buf)
12083 return -ENOMEM;
12084 if (b_offset)
12085 memcpy(buf, &start, 4);
12086 if (odd_len)
12087 memcpy(buf+len-4, &end, 4);
12088 memcpy(buf + b_offset, data, eeprom->len);
12091 ret = tg3_nvram_write_block(tp, offset, len, buf);
12093 if (buf != data)
12094 kfree(buf);
12096 return ret;
12099 static int tg3_get_link_ksettings(struct net_device *dev,
12100 struct ethtool_link_ksettings *cmd)
12102 struct tg3 *tp = netdev_priv(dev);
12103 u32 supported, advertising;
12105 if (tg3_flag(tp, USE_PHYLIB)) {
12106 struct phy_device *phydev;
12107 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12108 return -EAGAIN;
12109 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12110 phy_ethtool_ksettings_get(phydev, cmd);
12112 return 0;
12115 supported = (SUPPORTED_Autoneg);
12117 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12118 supported |= (SUPPORTED_1000baseT_Half |
12119 SUPPORTED_1000baseT_Full);
12121 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12122 supported |= (SUPPORTED_100baseT_Half |
12123 SUPPORTED_100baseT_Full |
12124 SUPPORTED_10baseT_Half |
12125 SUPPORTED_10baseT_Full |
12126 SUPPORTED_TP);
12127 cmd->base.port = PORT_TP;
12128 } else {
12129 supported |= SUPPORTED_FIBRE;
12130 cmd->base.port = PORT_FIBRE;
12132 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12133 supported);
12135 advertising = tp->link_config.advertising;
12136 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12137 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12138 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12139 advertising |= ADVERTISED_Pause;
12140 } else {
12141 advertising |= ADVERTISED_Pause |
12142 ADVERTISED_Asym_Pause;
12144 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12145 advertising |= ADVERTISED_Asym_Pause;
12148 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12149 advertising);
12151 if (netif_running(dev) && tp->link_up) {
12152 cmd->base.speed = tp->link_config.active_speed;
12153 cmd->base.duplex = tp->link_config.active_duplex;
12154 ethtool_convert_legacy_u32_to_link_mode(
12155 cmd->link_modes.lp_advertising,
12156 tp->link_config.rmt_adv);
12158 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12159 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12160 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12161 else
12162 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12164 } else {
12165 cmd->base.speed = SPEED_UNKNOWN;
12166 cmd->base.duplex = DUPLEX_UNKNOWN;
12167 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12169 cmd->base.phy_address = tp->phy_addr;
12170 cmd->base.autoneg = tp->link_config.autoneg;
12171 return 0;
12174 static int tg3_set_link_ksettings(struct net_device *dev,
12175 const struct ethtool_link_ksettings *cmd)
12177 struct tg3 *tp = netdev_priv(dev);
12178 u32 speed = cmd->base.speed;
12179 u32 advertising;
12181 if (tg3_flag(tp, USE_PHYLIB)) {
12182 struct phy_device *phydev;
12183 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12184 return -EAGAIN;
12185 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12186 return phy_ethtool_ksettings_set(phydev, cmd);
12189 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12190 cmd->base.autoneg != AUTONEG_DISABLE)
12191 return -EINVAL;
12193 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12194 cmd->base.duplex != DUPLEX_FULL &&
12195 cmd->base.duplex != DUPLEX_HALF)
12196 return -EINVAL;
12198 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12199 cmd->link_modes.advertising);
12201 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12202 u32 mask = ADVERTISED_Autoneg |
12203 ADVERTISED_Pause |
12204 ADVERTISED_Asym_Pause;
12206 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12207 mask |= ADVERTISED_1000baseT_Half |
12208 ADVERTISED_1000baseT_Full;
12210 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12211 mask |= ADVERTISED_100baseT_Half |
12212 ADVERTISED_100baseT_Full |
12213 ADVERTISED_10baseT_Half |
12214 ADVERTISED_10baseT_Full |
12215 ADVERTISED_TP;
12216 else
12217 mask |= ADVERTISED_FIBRE;
12219 if (advertising & ~mask)
12220 return -EINVAL;
12222 mask &= (ADVERTISED_1000baseT_Half |
12223 ADVERTISED_1000baseT_Full |
12224 ADVERTISED_100baseT_Half |
12225 ADVERTISED_100baseT_Full |
12226 ADVERTISED_10baseT_Half |
12227 ADVERTISED_10baseT_Full);
12229 advertising &= mask;
12230 } else {
12231 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12232 if (speed != SPEED_1000)
12233 return -EINVAL;
12235 if (cmd->base.duplex != DUPLEX_FULL)
12236 return -EINVAL;
12237 } else {
12238 if (speed != SPEED_100 &&
12239 speed != SPEED_10)
12240 return -EINVAL;
12244 tg3_full_lock(tp, 0);
12246 tp->link_config.autoneg = cmd->base.autoneg;
12247 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12248 tp->link_config.advertising = (advertising |
12249 ADVERTISED_Autoneg);
12250 tp->link_config.speed = SPEED_UNKNOWN;
12251 tp->link_config.duplex = DUPLEX_UNKNOWN;
12252 } else {
12253 tp->link_config.advertising = 0;
12254 tp->link_config.speed = speed;
12255 tp->link_config.duplex = cmd->base.duplex;
12258 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12260 tg3_warn_mgmt_link_flap(tp);
12262 if (netif_running(dev))
12263 tg3_setup_phy(tp, true);
12265 tg3_full_unlock(tp);
12267 return 0;
12270 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12272 struct tg3 *tp = netdev_priv(dev);
12274 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12275 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12276 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12277 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12280 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12282 struct tg3 *tp = netdev_priv(dev);
12284 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12285 wol->supported = WAKE_MAGIC;
12286 else
12287 wol->supported = 0;
12288 wol->wolopts = 0;
12289 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12290 wol->wolopts = WAKE_MAGIC;
12291 memset(&wol->sopass, 0, sizeof(wol->sopass));
12294 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12296 struct tg3 *tp = netdev_priv(dev);
12297 struct device *dp = &tp->pdev->dev;
12299 if (wol->wolopts & ~WAKE_MAGIC)
12300 return -EINVAL;
12301 if ((wol->wolopts & WAKE_MAGIC) &&
12302 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12303 return -EINVAL;
12305 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12307 if (device_may_wakeup(dp))
12308 tg3_flag_set(tp, WOL_ENABLE);
12309 else
12310 tg3_flag_clear(tp, WOL_ENABLE);
12312 return 0;
12315 static u32 tg3_get_msglevel(struct net_device *dev)
12317 struct tg3 *tp = netdev_priv(dev);
12318 return tp->msg_enable;
12321 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12323 struct tg3 *tp = netdev_priv(dev);
12324 tp->msg_enable = value;
12327 static int tg3_nway_reset(struct net_device *dev)
12329 struct tg3 *tp = netdev_priv(dev);
12330 int r;
12332 if (!netif_running(dev))
12333 return -EAGAIN;
12335 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12336 return -EINVAL;
12338 tg3_warn_mgmt_link_flap(tp);
12340 if (tg3_flag(tp, USE_PHYLIB)) {
12341 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12342 return -EAGAIN;
12343 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12344 } else {
12345 u32 bmcr;
12347 spin_lock_bh(&tp->lock);
12348 r = -EINVAL;
12349 tg3_readphy(tp, MII_BMCR, &bmcr);
12350 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12351 ((bmcr & BMCR_ANENABLE) ||
12352 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12353 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12354 BMCR_ANENABLE);
12355 r = 0;
12357 spin_unlock_bh(&tp->lock);
12360 return r;
12363 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12365 struct tg3 *tp = netdev_priv(dev);
12367 ering->rx_max_pending = tp->rx_std_ring_mask;
12368 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12369 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12370 else
12371 ering->rx_jumbo_max_pending = 0;
12373 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12375 ering->rx_pending = tp->rx_pending;
12376 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12377 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12378 else
12379 ering->rx_jumbo_pending = 0;
12381 ering->tx_pending = tp->napi[0].tx_pending;
12384 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12386 struct tg3 *tp = netdev_priv(dev);
12387 int i, irq_sync = 0, err = 0;
12389 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12390 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12391 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12392 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12393 (tg3_flag(tp, TSO_BUG) &&
12394 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12395 return -EINVAL;
12397 if (netif_running(dev)) {
12398 tg3_phy_stop(tp);
12399 tg3_netif_stop(tp);
12400 irq_sync = 1;
12403 tg3_full_lock(tp, irq_sync);
12405 tp->rx_pending = ering->rx_pending;
12407 if (tg3_flag(tp, MAX_RXPEND_64) &&
12408 tp->rx_pending > 63)
12409 tp->rx_pending = 63;
12411 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12412 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12414 for (i = 0; i < tp->irq_max; i++)
12415 tp->napi[i].tx_pending = ering->tx_pending;
12417 if (netif_running(dev)) {
12418 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12419 err = tg3_restart_hw(tp, false);
12420 if (!err)
12421 tg3_netif_start(tp);
12424 tg3_full_unlock(tp);
12426 if (irq_sync && !err)
12427 tg3_phy_start(tp);
12429 return err;
12432 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12434 struct tg3 *tp = netdev_priv(dev);
12436 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12438 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12439 epause->rx_pause = 1;
12440 else
12441 epause->rx_pause = 0;
12443 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12444 epause->tx_pause = 1;
12445 else
12446 epause->tx_pause = 0;
12449 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12451 struct tg3 *tp = netdev_priv(dev);
12452 int err = 0;
12454 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12455 tg3_warn_mgmt_link_flap(tp);
12457 if (tg3_flag(tp, USE_PHYLIB)) {
12458 u32 newadv;
12459 struct phy_device *phydev;
12461 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12463 if (!(phydev->supported & SUPPORTED_Pause) ||
12464 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12465 (epause->rx_pause != epause->tx_pause)))
12466 return -EINVAL;
12468 tp->link_config.flowctrl = 0;
12469 if (epause->rx_pause) {
12470 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12472 if (epause->tx_pause) {
12473 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12474 newadv = ADVERTISED_Pause;
12475 } else
12476 newadv = ADVERTISED_Pause |
12477 ADVERTISED_Asym_Pause;
12478 } else if (epause->tx_pause) {
12479 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12480 newadv = ADVERTISED_Asym_Pause;
12481 } else
12482 newadv = 0;
12484 if (epause->autoneg)
12485 tg3_flag_set(tp, PAUSE_AUTONEG);
12486 else
12487 tg3_flag_clear(tp, PAUSE_AUTONEG);
12489 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12490 u32 oldadv = phydev->advertising &
12491 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12492 if (oldadv != newadv) {
12493 phydev->advertising &=
12494 ~(ADVERTISED_Pause |
12495 ADVERTISED_Asym_Pause);
12496 phydev->advertising |= newadv;
12497 if (phydev->autoneg) {
12499 * Always renegotiate the link to
12500 * inform our link partner of our
12501 * flow control settings, even if the
12502 * flow control is forced. Let
12503 * tg3_adjust_link() do the final
12504 * flow control setup.
12506 return phy_start_aneg(phydev);
12510 if (!epause->autoneg)
12511 tg3_setup_flow_control(tp, 0, 0);
12512 } else {
12513 tp->link_config.advertising &=
12514 ~(ADVERTISED_Pause |
12515 ADVERTISED_Asym_Pause);
12516 tp->link_config.advertising |= newadv;
12518 } else {
12519 int irq_sync = 0;
12521 if (netif_running(dev)) {
12522 tg3_netif_stop(tp);
12523 irq_sync = 1;
12526 tg3_full_lock(tp, irq_sync);
12528 if (epause->autoneg)
12529 tg3_flag_set(tp, PAUSE_AUTONEG);
12530 else
12531 tg3_flag_clear(tp, PAUSE_AUTONEG);
12532 if (epause->rx_pause)
12533 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12534 else
12535 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12536 if (epause->tx_pause)
12537 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12538 else
12539 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12541 if (netif_running(dev)) {
12542 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12543 err = tg3_restart_hw(tp, false);
12544 if (!err)
12545 tg3_netif_start(tp);
12548 tg3_full_unlock(tp);
12551 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12553 return err;
12556 static int tg3_get_sset_count(struct net_device *dev, int sset)
12558 switch (sset) {
12559 case ETH_SS_TEST:
12560 return TG3_NUM_TEST;
12561 case ETH_SS_STATS:
12562 return TG3_NUM_STATS;
12563 default:
12564 return -EOPNOTSUPP;
12568 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12569 u32 *rules __always_unused)
12571 struct tg3 *tp = netdev_priv(dev);
12573 if (!tg3_flag(tp, SUPPORT_MSIX))
12574 return -EOPNOTSUPP;
12576 switch (info->cmd) {
12577 case ETHTOOL_GRXRINGS:
12578 if (netif_running(tp->dev))
12579 info->data = tp->rxq_cnt;
12580 else {
12581 info->data = num_online_cpus();
12582 if (info->data > TG3_RSS_MAX_NUM_QS)
12583 info->data = TG3_RSS_MAX_NUM_QS;
12586 return 0;
12588 default:
12589 return -EOPNOTSUPP;
12593 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12595 u32 size = 0;
12596 struct tg3 *tp = netdev_priv(dev);
12598 if (tg3_flag(tp, SUPPORT_MSIX))
12599 size = TG3_RSS_INDIR_TBL_SIZE;
12601 return size;
12604 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12606 struct tg3 *tp = netdev_priv(dev);
12607 int i;
12609 if (hfunc)
12610 *hfunc = ETH_RSS_HASH_TOP;
12611 if (!indir)
12612 return 0;
12614 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12615 indir[i] = tp->rss_ind_tbl[i];
12617 return 0;
12620 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12621 const u8 hfunc)
12623 struct tg3 *tp = netdev_priv(dev);
12624 size_t i;
12626 /* We require at least one supported parameter to be changed and no
12627 * change in any of the unsupported parameters
12629 if (key ||
12630 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12631 return -EOPNOTSUPP;
12633 if (!indir)
12634 return 0;
12636 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12637 tp->rss_ind_tbl[i] = indir[i];
12639 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12640 return 0;
12642 /* It is legal to write the indirection
12643 * table while the device is running.
12645 tg3_full_lock(tp, 0);
12646 tg3_rss_write_indir_tbl(tp);
12647 tg3_full_unlock(tp);
12649 return 0;
12652 static void tg3_get_channels(struct net_device *dev,
12653 struct ethtool_channels *channel)
12655 struct tg3 *tp = netdev_priv(dev);
12656 u32 deflt_qs = netif_get_num_default_rss_queues();
12658 channel->max_rx = tp->rxq_max;
12659 channel->max_tx = tp->txq_max;
12661 if (netif_running(dev)) {
12662 channel->rx_count = tp->rxq_cnt;
12663 channel->tx_count = tp->txq_cnt;
12664 } else {
12665 if (tp->rxq_req)
12666 channel->rx_count = tp->rxq_req;
12667 else
12668 channel->rx_count = min(deflt_qs, tp->rxq_max);
12670 if (tp->txq_req)
12671 channel->tx_count = tp->txq_req;
12672 else
12673 channel->tx_count = min(deflt_qs, tp->txq_max);
12677 static int tg3_set_channels(struct net_device *dev,
12678 struct ethtool_channels *channel)
12680 struct tg3 *tp = netdev_priv(dev);
12682 if (!tg3_flag(tp, SUPPORT_MSIX))
12683 return -EOPNOTSUPP;
12685 if (channel->rx_count > tp->rxq_max ||
12686 channel->tx_count > tp->txq_max)
12687 return -EINVAL;
12689 tp->rxq_req = channel->rx_count;
12690 tp->txq_req = channel->tx_count;
12692 if (!netif_running(dev))
12693 return 0;
12695 tg3_stop(tp);
12697 tg3_carrier_off(tp);
12699 tg3_start(tp, true, false, false);
12701 return 0;
12704 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12706 switch (stringset) {
12707 case ETH_SS_STATS:
12708 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12709 break;
12710 case ETH_SS_TEST:
12711 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12712 break;
12713 default:
12714 WARN_ON(1); /* we need a WARN() */
12715 break;
12719 static int tg3_set_phys_id(struct net_device *dev,
12720 enum ethtool_phys_id_state state)
12722 struct tg3 *tp = netdev_priv(dev);
12724 if (!netif_running(tp->dev))
12725 return -EAGAIN;
12727 switch (state) {
12728 case ETHTOOL_ID_ACTIVE:
12729 return 1; /* cycle on/off once per second */
12731 case ETHTOOL_ID_ON:
12732 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12733 LED_CTRL_1000MBPS_ON |
12734 LED_CTRL_100MBPS_ON |
12735 LED_CTRL_10MBPS_ON |
12736 LED_CTRL_TRAFFIC_OVERRIDE |
12737 LED_CTRL_TRAFFIC_BLINK |
12738 LED_CTRL_TRAFFIC_LED);
12739 break;
12741 case ETHTOOL_ID_OFF:
12742 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12743 LED_CTRL_TRAFFIC_OVERRIDE);
12744 break;
12746 case ETHTOOL_ID_INACTIVE:
12747 tw32(MAC_LED_CTRL, tp->led_ctrl);
12748 break;
12751 return 0;
12754 static void tg3_get_ethtool_stats(struct net_device *dev,
12755 struct ethtool_stats *estats, u64 *tmp_stats)
12757 struct tg3 *tp = netdev_priv(dev);
12759 if (tp->hw_stats)
12760 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12761 else
12762 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12765 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12767 int i;
12768 __be32 *buf;
12769 u32 offset = 0, len = 0;
12770 u32 magic, val;
12772 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12773 return NULL;
12775 if (magic == TG3_EEPROM_MAGIC) {
12776 for (offset = TG3_NVM_DIR_START;
12777 offset < TG3_NVM_DIR_END;
12778 offset += TG3_NVM_DIRENT_SIZE) {
12779 if (tg3_nvram_read(tp, offset, &val))
12780 return NULL;
12782 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12783 TG3_NVM_DIRTYPE_EXTVPD)
12784 break;
12787 if (offset != TG3_NVM_DIR_END) {
12788 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12789 if (tg3_nvram_read(tp, offset + 4, &offset))
12790 return NULL;
12792 offset = tg3_nvram_logical_addr(tp, offset);
12796 if (!offset || !len) {
12797 offset = TG3_NVM_VPD_OFF;
12798 len = TG3_NVM_VPD_LEN;
12801 buf = kmalloc(len, GFP_KERNEL);
12802 if (buf == NULL)
12803 return NULL;
12805 if (magic == TG3_EEPROM_MAGIC) {
12806 for (i = 0; i < len; i += 4) {
12807 /* The data is in little-endian format in NVRAM.
12808 * Use the big-endian read routines to preserve
12809 * the byte order as it exists in NVRAM.
12811 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12812 goto error;
12814 } else {
12815 u8 *ptr;
12816 ssize_t cnt;
12817 unsigned int pos = 0;
12819 ptr = (u8 *)&buf[0];
12820 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12821 cnt = pci_read_vpd(tp->pdev, pos,
12822 len - pos, ptr);
12823 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12824 cnt = 0;
12825 else if (cnt < 0)
12826 goto error;
12828 if (pos != len)
12829 goto error;
12832 *vpdlen = len;
12834 return buf;
12836 error:
12837 kfree(buf);
12838 return NULL;
12841 #define NVRAM_TEST_SIZE 0x100
12842 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12843 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12844 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12845 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12846 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12847 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12848 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12849 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12851 static int tg3_test_nvram(struct tg3 *tp)
12853 u32 csum, magic, len;
12854 __be32 *buf;
12855 int i, j, k, err = 0, size;
12857 if (tg3_flag(tp, NO_NVRAM))
12858 return 0;
12860 if (tg3_nvram_read(tp, 0, &magic) != 0)
12861 return -EIO;
12863 if (magic == TG3_EEPROM_MAGIC)
12864 size = NVRAM_TEST_SIZE;
12865 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12866 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12867 TG3_EEPROM_SB_FORMAT_1) {
12868 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12869 case TG3_EEPROM_SB_REVISION_0:
12870 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12871 break;
12872 case TG3_EEPROM_SB_REVISION_2:
12873 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12874 break;
12875 case TG3_EEPROM_SB_REVISION_3:
12876 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12877 break;
12878 case TG3_EEPROM_SB_REVISION_4:
12879 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12880 break;
12881 case TG3_EEPROM_SB_REVISION_5:
12882 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12883 break;
12884 case TG3_EEPROM_SB_REVISION_6:
12885 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12886 break;
12887 default:
12888 return -EIO;
12890 } else
12891 return 0;
12892 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12893 size = NVRAM_SELFBOOT_HW_SIZE;
12894 else
12895 return -EIO;
12897 buf = kmalloc(size, GFP_KERNEL);
12898 if (buf == NULL)
12899 return -ENOMEM;
12901 err = -EIO;
12902 for (i = 0, j = 0; i < size; i += 4, j++) {
12903 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12904 if (err)
12905 break;
12907 if (i < size)
12908 goto out;
12910 /* Selfboot format */
12911 magic = be32_to_cpu(buf[0]);
12912 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12913 TG3_EEPROM_MAGIC_FW) {
12914 u8 *buf8 = (u8 *) buf, csum8 = 0;
12916 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12917 TG3_EEPROM_SB_REVISION_2) {
12918 /* For rev 2, the csum doesn't include the MBA. */
12919 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12920 csum8 += buf8[i];
12921 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12922 csum8 += buf8[i];
12923 } else {
12924 for (i = 0; i < size; i++)
12925 csum8 += buf8[i];
12928 if (csum8 == 0) {
12929 err = 0;
12930 goto out;
12933 err = -EIO;
12934 goto out;
12937 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12938 TG3_EEPROM_MAGIC_HW) {
12939 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12940 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12941 u8 *buf8 = (u8 *) buf;
12943 /* Separate the parity bits and the data bytes. */
12944 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12945 if ((i == 0) || (i == 8)) {
12946 int l;
12947 u8 msk;
12949 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12950 parity[k++] = buf8[i] & msk;
12951 i++;
12952 } else if (i == 16) {
12953 int l;
12954 u8 msk;
12956 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12957 parity[k++] = buf8[i] & msk;
12958 i++;
12960 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12961 parity[k++] = buf8[i] & msk;
12962 i++;
12964 data[j++] = buf8[i];
12967 err = -EIO;
12968 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12969 u8 hw8 = hweight8(data[i]);
12971 if ((hw8 & 0x1) && parity[i])
12972 goto out;
12973 else if (!(hw8 & 0x1) && !parity[i])
12974 goto out;
12976 err = 0;
12977 goto out;
12980 err = -EIO;
12982 /* Bootstrap checksum at offset 0x10 */
12983 csum = calc_crc((unsigned char *) buf, 0x10);
12984 if (csum != le32_to_cpu(buf[0x10/4]))
12985 goto out;
12987 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12988 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12989 if (csum != le32_to_cpu(buf[0xfc/4]))
12990 goto out;
12992 kfree(buf);
12994 buf = tg3_vpd_readblock(tp, &len);
12995 if (!buf)
12996 return -ENOMEM;
12998 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12999 if (i > 0) {
13000 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13001 if (j < 0)
13002 goto out;
13004 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13005 goto out;
13007 i += PCI_VPD_LRDT_TAG_SIZE;
13008 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13009 PCI_VPD_RO_KEYWORD_CHKSUM);
13010 if (j > 0) {
13011 u8 csum8 = 0;
13013 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13015 for (i = 0; i <= j; i++)
13016 csum8 += ((u8 *)buf)[i];
13018 if (csum8)
13019 goto out;
13023 err = 0;
13025 out:
13026 kfree(buf);
13027 return err;
13030 #define TG3_SERDES_TIMEOUT_SEC 2
13031 #define TG3_COPPER_TIMEOUT_SEC 6
13033 static int tg3_test_link(struct tg3 *tp)
13035 int i, max;
13037 if (!netif_running(tp->dev))
13038 return -ENODEV;
13040 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13041 max = TG3_SERDES_TIMEOUT_SEC;
13042 else
13043 max = TG3_COPPER_TIMEOUT_SEC;
13045 for (i = 0; i < max; i++) {
13046 if (tp->link_up)
13047 return 0;
13049 if (msleep_interruptible(1000))
13050 break;
13053 return -EIO;
13056 /* Only test the commonly used registers */
13057 static int tg3_test_registers(struct tg3 *tp)
13059 int i, is_5705, is_5750;
13060 u32 offset, read_mask, write_mask, val, save_val, read_val;
13061 static struct {
13062 u16 offset;
13063 u16 flags;
13064 #define TG3_FL_5705 0x1
13065 #define TG3_FL_NOT_5705 0x2
13066 #define TG3_FL_NOT_5788 0x4
13067 #define TG3_FL_NOT_5750 0x8
13068 u32 read_mask;
13069 u32 write_mask;
13070 } reg_tbl[] = {
13071 /* MAC Control Registers */
13072 { MAC_MODE, TG3_FL_NOT_5705,
13073 0x00000000, 0x00ef6f8c },
13074 { MAC_MODE, TG3_FL_5705,
13075 0x00000000, 0x01ef6b8c },
13076 { MAC_STATUS, TG3_FL_NOT_5705,
13077 0x03800107, 0x00000000 },
13078 { MAC_STATUS, TG3_FL_5705,
13079 0x03800100, 0x00000000 },
13080 { MAC_ADDR_0_HIGH, 0x0000,
13081 0x00000000, 0x0000ffff },
13082 { MAC_ADDR_0_LOW, 0x0000,
13083 0x00000000, 0xffffffff },
13084 { MAC_RX_MTU_SIZE, 0x0000,
13085 0x00000000, 0x0000ffff },
13086 { MAC_TX_MODE, 0x0000,
13087 0x00000000, 0x00000070 },
13088 { MAC_TX_LENGTHS, 0x0000,
13089 0x00000000, 0x00003fff },
13090 { MAC_RX_MODE, TG3_FL_NOT_5705,
13091 0x00000000, 0x000007fc },
13092 { MAC_RX_MODE, TG3_FL_5705,
13093 0x00000000, 0x000007dc },
13094 { MAC_HASH_REG_0, 0x0000,
13095 0x00000000, 0xffffffff },
13096 { MAC_HASH_REG_1, 0x0000,
13097 0x00000000, 0xffffffff },
13098 { MAC_HASH_REG_2, 0x0000,
13099 0x00000000, 0xffffffff },
13100 { MAC_HASH_REG_3, 0x0000,
13101 0x00000000, 0xffffffff },
13103 /* Receive Data and Receive BD Initiator Control Registers. */
13104 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13105 0x00000000, 0xffffffff },
13106 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13107 0x00000000, 0xffffffff },
13108 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13109 0x00000000, 0x00000003 },
13110 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13111 0x00000000, 0xffffffff },
13112 { RCVDBDI_STD_BD+0, 0x0000,
13113 0x00000000, 0xffffffff },
13114 { RCVDBDI_STD_BD+4, 0x0000,
13115 0x00000000, 0xffffffff },
13116 { RCVDBDI_STD_BD+8, 0x0000,
13117 0x00000000, 0xffff0002 },
13118 { RCVDBDI_STD_BD+0xc, 0x0000,
13119 0x00000000, 0xffffffff },
13121 /* Receive BD Initiator Control Registers. */
13122 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13123 0x00000000, 0xffffffff },
13124 { RCVBDI_STD_THRESH, TG3_FL_5705,
13125 0x00000000, 0x000003ff },
13126 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13127 0x00000000, 0xffffffff },
13129 /* Host Coalescing Control Registers. */
13130 { HOSTCC_MODE, TG3_FL_NOT_5705,
13131 0x00000000, 0x00000004 },
13132 { HOSTCC_MODE, TG3_FL_5705,
13133 0x00000000, 0x000000f6 },
13134 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13135 0x00000000, 0xffffffff },
13136 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13137 0x00000000, 0x000003ff },
13138 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13139 0x00000000, 0xffffffff },
13140 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13141 0x00000000, 0x000003ff },
13142 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13143 0x00000000, 0xffffffff },
13144 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13145 0x00000000, 0x000000ff },
13146 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13147 0x00000000, 0xffffffff },
13148 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13149 0x00000000, 0x000000ff },
13150 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13151 0x00000000, 0xffffffff },
13152 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13153 0x00000000, 0xffffffff },
13154 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13155 0x00000000, 0xffffffff },
13156 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13157 0x00000000, 0x000000ff },
13158 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13159 0x00000000, 0xffffffff },
13160 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13161 0x00000000, 0x000000ff },
13162 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13163 0x00000000, 0xffffffff },
13164 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13165 0x00000000, 0xffffffff },
13166 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13167 0x00000000, 0xffffffff },
13168 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13169 0x00000000, 0xffffffff },
13170 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13171 0x00000000, 0xffffffff },
13172 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13173 0xffffffff, 0x00000000 },
13174 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13175 0xffffffff, 0x00000000 },
13177 /* Buffer Manager Control Registers. */
13178 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13179 0x00000000, 0x007fff80 },
13180 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13181 0x00000000, 0x007fffff },
13182 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13183 0x00000000, 0x0000003f },
13184 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13185 0x00000000, 0x000001ff },
13186 { BUFMGR_MB_HIGH_WATER, 0x0000,
13187 0x00000000, 0x000001ff },
13188 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13189 0xffffffff, 0x00000000 },
13190 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13191 0xffffffff, 0x00000000 },
13193 /* Mailbox Registers */
13194 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13195 0x00000000, 0x000001ff },
13196 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13197 0x00000000, 0x000001ff },
13198 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13199 0x00000000, 0x000007ff },
13200 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13201 0x00000000, 0x000001ff },
13203 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13206 is_5705 = is_5750 = 0;
13207 if (tg3_flag(tp, 5705_PLUS)) {
13208 is_5705 = 1;
13209 if (tg3_flag(tp, 5750_PLUS))
13210 is_5750 = 1;
13213 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13214 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13215 continue;
13217 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13218 continue;
13220 if (tg3_flag(tp, IS_5788) &&
13221 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13222 continue;
13224 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13225 continue;
13227 offset = (u32) reg_tbl[i].offset;
13228 read_mask = reg_tbl[i].read_mask;
13229 write_mask = reg_tbl[i].write_mask;
13231 /* Save the original register content */
13232 save_val = tr32(offset);
13234 /* Determine the read-only value. */
13235 read_val = save_val & read_mask;
13237 /* Write zero to the register, then make sure the read-only bits
13238 * are not changed and the read/write bits are all zeros.
13240 tw32(offset, 0);
13242 val = tr32(offset);
13244 /* Test the read-only and read/write bits. */
13245 if (((val & read_mask) != read_val) || (val & write_mask))
13246 goto out;
13248 /* Write ones to all the bits defined by RdMask and WrMask, then
13249 * make sure the read-only bits are not changed and the
13250 * read/write bits are all ones.
13252 tw32(offset, read_mask | write_mask);
13254 val = tr32(offset);
13256 /* Test the read-only bits. */
13257 if ((val & read_mask) != read_val)
13258 goto out;
13260 /* Test the read/write bits. */
13261 if ((val & write_mask) != write_mask)
13262 goto out;
13264 tw32(offset, save_val);
13267 return 0;
13269 out:
13270 if (netif_msg_hw(tp))
13271 netdev_err(tp->dev,
13272 "Register test failed at offset %x\n", offset);
13273 tw32(offset, save_val);
13274 return -EIO;
13277 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13279 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13280 int i;
13281 u32 j;
13283 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13284 for (j = 0; j < len; j += 4) {
13285 u32 val;
13287 tg3_write_mem(tp, offset + j, test_pattern[i]);
13288 tg3_read_mem(tp, offset + j, &val);
13289 if (val != test_pattern[i])
13290 return -EIO;
13293 return 0;
13296 static int tg3_test_memory(struct tg3 *tp)
13298 static struct mem_entry {
13299 u32 offset;
13300 u32 len;
13301 } mem_tbl_570x[] = {
13302 { 0x00000000, 0x00b50},
13303 { 0x00002000, 0x1c000},
13304 { 0xffffffff, 0x00000}
13305 }, mem_tbl_5705[] = {
13306 { 0x00000100, 0x0000c},
13307 { 0x00000200, 0x00008},
13308 { 0x00004000, 0x00800},
13309 { 0x00006000, 0x01000},
13310 { 0x00008000, 0x02000},
13311 { 0x00010000, 0x0e000},
13312 { 0xffffffff, 0x00000}
13313 }, mem_tbl_5755[] = {
13314 { 0x00000200, 0x00008},
13315 { 0x00004000, 0x00800},
13316 { 0x00006000, 0x00800},
13317 { 0x00008000, 0x02000},
13318 { 0x00010000, 0x0c000},
13319 { 0xffffffff, 0x00000}
13320 }, mem_tbl_5906[] = {
13321 { 0x00000200, 0x00008},
13322 { 0x00004000, 0x00400},
13323 { 0x00006000, 0x00400},
13324 { 0x00008000, 0x01000},
13325 { 0x00010000, 0x01000},
13326 { 0xffffffff, 0x00000}
13327 }, mem_tbl_5717[] = {
13328 { 0x00000200, 0x00008},
13329 { 0x00010000, 0x0a000},
13330 { 0x00020000, 0x13c00},
13331 { 0xffffffff, 0x00000}
13332 }, mem_tbl_57765[] = {
13333 { 0x00000200, 0x00008},
13334 { 0x00004000, 0x00800},
13335 { 0x00006000, 0x09800},
13336 { 0x00010000, 0x0a000},
13337 { 0xffffffff, 0x00000}
13339 struct mem_entry *mem_tbl;
13340 int err = 0;
13341 int i;
13343 if (tg3_flag(tp, 5717_PLUS))
13344 mem_tbl = mem_tbl_5717;
13345 else if (tg3_flag(tp, 57765_CLASS) ||
13346 tg3_asic_rev(tp) == ASIC_REV_5762)
13347 mem_tbl = mem_tbl_57765;
13348 else if (tg3_flag(tp, 5755_PLUS))
13349 mem_tbl = mem_tbl_5755;
13350 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13351 mem_tbl = mem_tbl_5906;
13352 else if (tg3_flag(tp, 5705_PLUS))
13353 mem_tbl = mem_tbl_5705;
13354 else
13355 mem_tbl = mem_tbl_570x;
13357 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13358 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13359 if (err)
13360 break;
13363 return err;
13366 #define TG3_TSO_MSS 500
13368 #define TG3_TSO_IP_HDR_LEN 20
13369 #define TG3_TSO_TCP_HDR_LEN 20
13370 #define TG3_TSO_TCP_OPT_LEN 12
13372 static const u8 tg3_tso_header[] = {
13373 0x08, 0x00,
13374 0x45, 0x00, 0x00, 0x00,
13375 0x00, 0x00, 0x40, 0x00,
13376 0x40, 0x06, 0x00, 0x00,
13377 0x0a, 0x00, 0x00, 0x01,
13378 0x0a, 0x00, 0x00, 0x02,
13379 0x0d, 0x00, 0xe0, 0x00,
13380 0x00, 0x00, 0x01, 0x00,
13381 0x00, 0x00, 0x02, 0x00,
13382 0x80, 0x10, 0x10, 0x00,
13383 0x14, 0x09, 0x00, 0x00,
13384 0x01, 0x01, 0x08, 0x0a,
13385 0x11, 0x11, 0x11, 0x11,
13386 0x11, 0x11, 0x11, 0x11,
13389 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13391 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13392 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13393 u32 budget;
13394 struct sk_buff *skb;
13395 u8 *tx_data, *rx_data;
13396 dma_addr_t map;
13397 int num_pkts, tx_len, rx_len, i, err;
13398 struct tg3_rx_buffer_desc *desc;
13399 struct tg3_napi *tnapi, *rnapi;
13400 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13402 tnapi = &tp->napi[0];
13403 rnapi = &tp->napi[0];
13404 if (tp->irq_cnt > 1) {
13405 if (tg3_flag(tp, ENABLE_RSS))
13406 rnapi = &tp->napi[1];
13407 if (tg3_flag(tp, ENABLE_TSS))
13408 tnapi = &tp->napi[1];
13410 coal_now = tnapi->coal_now | rnapi->coal_now;
13412 err = -EIO;
13414 tx_len = pktsz;
13415 skb = netdev_alloc_skb(tp->dev, tx_len);
13416 if (!skb)
13417 return -ENOMEM;
13419 tx_data = skb_put(skb, tx_len);
13420 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13421 memset(tx_data + ETH_ALEN, 0x0, 8);
13423 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13425 if (tso_loopback) {
13426 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13428 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13429 TG3_TSO_TCP_OPT_LEN;
13431 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13432 sizeof(tg3_tso_header));
13433 mss = TG3_TSO_MSS;
13435 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13436 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13438 /* Set the total length field in the IP header */
13439 iph->tot_len = htons((u16)(mss + hdr_len));
13441 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13442 TXD_FLAG_CPU_POST_DMA);
13444 if (tg3_flag(tp, HW_TSO_1) ||
13445 tg3_flag(tp, HW_TSO_2) ||
13446 tg3_flag(tp, HW_TSO_3)) {
13447 struct tcphdr *th;
13448 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13449 th = (struct tcphdr *)&tx_data[val];
13450 th->check = 0;
13451 } else
13452 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13454 if (tg3_flag(tp, HW_TSO_3)) {
13455 mss |= (hdr_len & 0xc) << 12;
13456 if (hdr_len & 0x10)
13457 base_flags |= 0x00000010;
13458 base_flags |= (hdr_len & 0x3e0) << 5;
13459 } else if (tg3_flag(tp, HW_TSO_2))
13460 mss |= hdr_len << 9;
13461 else if (tg3_flag(tp, HW_TSO_1) ||
13462 tg3_asic_rev(tp) == ASIC_REV_5705) {
13463 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13464 } else {
13465 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13468 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13469 } else {
13470 num_pkts = 1;
13471 data_off = ETH_HLEN;
13473 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13474 tx_len > VLAN_ETH_FRAME_LEN)
13475 base_flags |= TXD_FLAG_JMB_PKT;
13478 for (i = data_off; i < tx_len; i++)
13479 tx_data[i] = (u8) (i & 0xff);
13481 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13482 if (pci_dma_mapping_error(tp->pdev, map)) {
13483 dev_kfree_skb(skb);
13484 return -EIO;
13487 val = tnapi->tx_prod;
13488 tnapi->tx_buffers[val].skb = skb;
13489 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13491 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13492 rnapi->coal_now);
13494 udelay(10);
13496 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13498 budget = tg3_tx_avail(tnapi);
13499 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13500 base_flags | TXD_FLAG_END, mss, 0)) {
13501 tnapi->tx_buffers[val].skb = NULL;
13502 dev_kfree_skb(skb);
13503 return -EIO;
13506 tnapi->tx_prod++;
13508 /* Sync BD data before updating mailbox */
13509 wmb();
13511 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13512 tr32_mailbox(tnapi->prodmbox);
13514 udelay(10);
13516 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13517 for (i = 0; i < 35; i++) {
13518 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13519 coal_now);
13521 udelay(10);
13523 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13524 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13525 if ((tx_idx == tnapi->tx_prod) &&
13526 (rx_idx == (rx_start_idx + num_pkts)))
13527 break;
13530 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13531 dev_kfree_skb(skb);
13533 if (tx_idx != tnapi->tx_prod)
13534 goto out;
13536 if (rx_idx != rx_start_idx + num_pkts)
13537 goto out;
13539 val = data_off;
13540 while (rx_idx != rx_start_idx) {
13541 desc = &rnapi->rx_rcb[rx_start_idx++];
13542 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13543 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13545 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13546 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13547 goto out;
13549 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13550 - ETH_FCS_LEN;
13552 if (!tso_loopback) {
13553 if (rx_len != tx_len)
13554 goto out;
13556 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13557 if (opaque_key != RXD_OPAQUE_RING_STD)
13558 goto out;
13559 } else {
13560 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13561 goto out;
13563 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13564 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13565 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13566 goto out;
13569 if (opaque_key == RXD_OPAQUE_RING_STD) {
13570 rx_data = tpr->rx_std_buffers[desc_idx].data;
13571 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13572 mapping);
13573 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13574 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13575 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13576 mapping);
13577 } else
13578 goto out;
13580 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13581 PCI_DMA_FROMDEVICE);
13583 rx_data += TG3_RX_OFFSET(tp);
13584 for (i = data_off; i < rx_len; i++, val++) {
13585 if (*(rx_data + i) != (u8) (val & 0xff))
13586 goto out;
13590 err = 0;
13592 /* tg3_free_rings will unmap and free the rx_data */
13593 out:
13594 return err;
13597 #define TG3_STD_LOOPBACK_FAILED 1
13598 #define TG3_JMB_LOOPBACK_FAILED 2
13599 #define TG3_TSO_LOOPBACK_FAILED 4
13600 #define TG3_LOOPBACK_FAILED \
13601 (TG3_STD_LOOPBACK_FAILED | \
13602 TG3_JMB_LOOPBACK_FAILED | \
13603 TG3_TSO_LOOPBACK_FAILED)
13605 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13607 int err = -EIO;
13608 u32 eee_cap;
13609 u32 jmb_pkt_sz = 9000;
13611 if (tp->dma_limit)
13612 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13614 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13615 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13617 if (!netif_running(tp->dev)) {
13618 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13619 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13620 if (do_extlpbk)
13621 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13622 goto done;
13625 err = tg3_reset_hw(tp, true);
13626 if (err) {
13627 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13628 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13629 if (do_extlpbk)
13630 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13631 goto done;
13634 if (tg3_flag(tp, ENABLE_RSS)) {
13635 int i;
13637 /* Reroute all rx packets to the 1st queue */
13638 for (i = MAC_RSS_INDIR_TBL_0;
13639 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13640 tw32(i, 0x0);
13643 /* HW errata - mac loopback fails in some cases on 5780.
13644 * Normal traffic and PHY loopback are not affected by
13645 * errata. Also, the MAC loopback test is deprecated for
13646 * all newer ASIC revisions.
13648 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13649 !tg3_flag(tp, CPMU_PRESENT)) {
13650 tg3_mac_loopback(tp, true);
13652 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13653 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13655 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13656 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13657 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13659 tg3_mac_loopback(tp, false);
13662 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13663 !tg3_flag(tp, USE_PHYLIB)) {
13664 int i;
13666 tg3_phy_lpbk_set(tp, 0, false);
13668 /* Wait for link */
13669 for (i = 0; i < 100; i++) {
13670 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13671 break;
13672 mdelay(1);
13675 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13676 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13677 if (tg3_flag(tp, TSO_CAPABLE) &&
13678 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13679 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13680 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13681 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13682 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13684 if (do_extlpbk) {
13685 tg3_phy_lpbk_set(tp, 0, true);
13687 /* All link indications report up, but the hardware
13688 * isn't really ready for about 20 msec. Double it
13689 * to be sure.
13691 mdelay(40);
13693 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13694 data[TG3_EXT_LOOPB_TEST] |=
13695 TG3_STD_LOOPBACK_FAILED;
13696 if (tg3_flag(tp, TSO_CAPABLE) &&
13697 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13698 data[TG3_EXT_LOOPB_TEST] |=
13699 TG3_TSO_LOOPBACK_FAILED;
13700 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13701 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13702 data[TG3_EXT_LOOPB_TEST] |=
13703 TG3_JMB_LOOPBACK_FAILED;
13706 /* Re-enable gphy autopowerdown. */
13707 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13708 tg3_phy_toggle_apd(tp, true);
13711 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13712 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13714 done:
13715 tp->phy_flags |= eee_cap;
13717 return err;
13720 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13721 u64 *data)
13723 struct tg3 *tp = netdev_priv(dev);
13724 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13726 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13727 if (tg3_power_up(tp)) {
13728 etest->flags |= ETH_TEST_FL_FAILED;
13729 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13730 return;
13732 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13735 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13737 if (tg3_test_nvram(tp) != 0) {
13738 etest->flags |= ETH_TEST_FL_FAILED;
13739 data[TG3_NVRAM_TEST] = 1;
13741 if (!doextlpbk && tg3_test_link(tp)) {
13742 etest->flags |= ETH_TEST_FL_FAILED;
13743 data[TG3_LINK_TEST] = 1;
13745 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13746 int err, err2 = 0, irq_sync = 0;
13748 if (netif_running(dev)) {
13749 tg3_phy_stop(tp);
13750 tg3_netif_stop(tp);
13751 irq_sync = 1;
13754 tg3_full_lock(tp, irq_sync);
13755 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13756 err = tg3_nvram_lock(tp);
13757 tg3_halt_cpu(tp, RX_CPU_BASE);
13758 if (!tg3_flag(tp, 5705_PLUS))
13759 tg3_halt_cpu(tp, TX_CPU_BASE);
13760 if (!err)
13761 tg3_nvram_unlock(tp);
13763 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13764 tg3_phy_reset(tp);
13766 if (tg3_test_registers(tp) != 0) {
13767 etest->flags |= ETH_TEST_FL_FAILED;
13768 data[TG3_REGISTER_TEST] = 1;
13771 if (tg3_test_memory(tp) != 0) {
13772 etest->flags |= ETH_TEST_FL_FAILED;
13773 data[TG3_MEMORY_TEST] = 1;
13776 if (doextlpbk)
13777 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13779 if (tg3_test_loopback(tp, data, doextlpbk))
13780 etest->flags |= ETH_TEST_FL_FAILED;
13782 tg3_full_unlock(tp);
13784 if (tg3_test_interrupt(tp) != 0) {
13785 etest->flags |= ETH_TEST_FL_FAILED;
13786 data[TG3_INTERRUPT_TEST] = 1;
13789 tg3_full_lock(tp, 0);
13791 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13792 if (netif_running(dev)) {
13793 tg3_flag_set(tp, INIT_COMPLETE);
13794 err2 = tg3_restart_hw(tp, true);
13795 if (!err2)
13796 tg3_netif_start(tp);
13799 tg3_full_unlock(tp);
13801 if (irq_sync && !err2)
13802 tg3_phy_start(tp);
13804 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13805 tg3_power_down_prepare(tp);
13809 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13811 struct tg3 *tp = netdev_priv(dev);
13812 struct hwtstamp_config stmpconf;
13814 if (!tg3_flag(tp, PTP_CAPABLE))
13815 return -EOPNOTSUPP;
13817 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13818 return -EFAULT;
13820 if (stmpconf.flags)
13821 return -EINVAL;
13823 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13824 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13825 return -ERANGE;
13827 switch (stmpconf.rx_filter) {
13828 case HWTSTAMP_FILTER_NONE:
13829 tp->rxptpctl = 0;
13830 break;
13831 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13832 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13833 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13834 break;
13835 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13836 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13837 TG3_RX_PTP_CTL_SYNC_EVNT;
13838 break;
13839 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13840 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13841 TG3_RX_PTP_CTL_DELAY_REQ;
13842 break;
13843 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13844 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13845 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13846 break;
13847 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13848 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13849 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13850 break;
13851 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13852 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13853 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13854 break;
13855 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13856 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13857 TG3_RX_PTP_CTL_SYNC_EVNT;
13858 break;
13859 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13860 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13861 TG3_RX_PTP_CTL_SYNC_EVNT;
13862 break;
13863 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13864 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13865 TG3_RX_PTP_CTL_SYNC_EVNT;
13866 break;
13867 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13868 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13869 TG3_RX_PTP_CTL_DELAY_REQ;
13870 break;
13871 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13872 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13873 TG3_RX_PTP_CTL_DELAY_REQ;
13874 break;
13875 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13876 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13877 TG3_RX_PTP_CTL_DELAY_REQ;
13878 break;
13879 default:
13880 return -ERANGE;
13883 if (netif_running(dev) && tp->rxptpctl)
13884 tw32(TG3_RX_PTP_CTL,
13885 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13887 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13888 tg3_flag_set(tp, TX_TSTAMP_EN);
13889 else
13890 tg3_flag_clear(tp, TX_TSTAMP_EN);
13892 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13893 -EFAULT : 0;
13896 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13898 struct tg3 *tp = netdev_priv(dev);
13899 struct hwtstamp_config stmpconf;
13901 if (!tg3_flag(tp, PTP_CAPABLE))
13902 return -EOPNOTSUPP;
13904 stmpconf.flags = 0;
13905 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13906 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13908 switch (tp->rxptpctl) {
13909 case 0:
13910 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13911 break;
13912 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13913 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13914 break;
13915 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13916 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13917 break;
13918 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13919 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13920 break;
13921 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13922 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13923 break;
13924 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13925 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13926 break;
13927 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13928 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13929 break;
13930 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13931 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13932 break;
13933 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13934 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13935 break;
13936 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13937 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13938 break;
13939 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13940 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13941 break;
13942 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13943 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13944 break;
13945 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13946 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13947 break;
13948 default:
13949 WARN_ON_ONCE(1);
13950 return -ERANGE;
13953 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13954 -EFAULT : 0;
13957 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13959 struct mii_ioctl_data *data = if_mii(ifr);
13960 struct tg3 *tp = netdev_priv(dev);
13961 int err;
13963 if (tg3_flag(tp, USE_PHYLIB)) {
13964 struct phy_device *phydev;
13965 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13966 return -EAGAIN;
13967 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13968 return phy_mii_ioctl(phydev, ifr, cmd);
13971 switch (cmd) {
13972 case SIOCGMIIPHY:
13973 data->phy_id = tp->phy_addr;
13975 /* fallthru */
13976 case SIOCGMIIREG: {
13977 u32 mii_regval;
13979 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13980 break; /* We have no PHY */
13982 if (!netif_running(dev))
13983 return -EAGAIN;
13985 spin_lock_bh(&tp->lock);
13986 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13987 data->reg_num & 0x1f, &mii_regval);
13988 spin_unlock_bh(&tp->lock);
13990 data->val_out = mii_regval;
13992 return err;
13995 case SIOCSMIIREG:
13996 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13997 break; /* We have no PHY */
13999 if (!netif_running(dev))
14000 return -EAGAIN;
14002 spin_lock_bh(&tp->lock);
14003 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14004 data->reg_num & 0x1f, data->val_in);
14005 spin_unlock_bh(&tp->lock);
14007 return err;
14009 case SIOCSHWTSTAMP:
14010 return tg3_hwtstamp_set(dev, ifr);
14012 case SIOCGHWTSTAMP:
14013 return tg3_hwtstamp_get(dev, ifr);
14015 default:
14016 /* do nothing */
14017 break;
14019 return -EOPNOTSUPP;
14022 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14024 struct tg3 *tp = netdev_priv(dev);
14026 memcpy(ec, &tp->coal, sizeof(*ec));
14027 return 0;
14030 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14032 struct tg3 *tp = netdev_priv(dev);
14033 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14034 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14036 if (!tg3_flag(tp, 5705_PLUS)) {
14037 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14038 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14039 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14040 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14043 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14044 (!ec->rx_coalesce_usecs) ||
14045 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14046 (!ec->tx_coalesce_usecs) ||
14047 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14048 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14049 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14050 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14051 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14052 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14053 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14054 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14055 return -EINVAL;
14057 /* Only copy relevant parameters, ignore all others. */
14058 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14059 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14060 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14061 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14062 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14063 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14064 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14065 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14066 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14068 if (netif_running(dev)) {
14069 tg3_full_lock(tp, 0);
14070 __tg3_set_coalesce(tp, &tp->coal);
14071 tg3_full_unlock(tp);
14073 return 0;
14076 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14078 struct tg3 *tp = netdev_priv(dev);
14080 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14081 netdev_warn(tp->dev, "Board does not support EEE!\n");
14082 return -EOPNOTSUPP;
14085 if (edata->advertised != tp->eee.advertised) {
14086 netdev_warn(tp->dev,
14087 "Direct manipulation of EEE advertisement is not supported\n");
14088 return -EINVAL;
14091 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14092 netdev_warn(tp->dev,
14093 "Maximal Tx Lpi timer supported is %#x(u)\n",
14094 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14095 return -EINVAL;
14098 tp->eee = *edata;
14100 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14101 tg3_warn_mgmt_link_flap(tp);
14103 if (netif_running(tp->dev)) {
14104 tg3_full_lock(tp, 0);
14105 tg3_setup_eee(tp);
14106 tg3_phy_reset(tp);
14107 tg3_full_unlock(tp);
14110 return 0;
14113 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14115 struct tg3 *tp = netdev_priv(dev);
14117 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14118 netdev_warn(tp->dev,
14119 "Board does not support EEE!\n");
14120 return -EOPNOTSUPP;
14123 *edata = tp->eee;
14124 return 0;
14127 static const struct ethtool_ops tg3_ethtool_ops = {
14128 .get_drvinfo = tg3_get_drvinfo,
14129 .get_regs_len = tg3_get_regs_len,
14130 .get_regs = tg3_get_regs,
14131 .get_wol = tg3_get_wol,
14132 .set_wol = tg3_set_wol,
14133 .get_msglevel = tg3_get_msglevel,
14134 .set_msglevel = tg3_set_msglevel,
14135 .nway_reset = tg3_nway_reset,
14136 .get_link = ethtool_op_get_link,
14137 .get_eeprom_len = tg3_get_eeprom_len,
14138 .get_eeprom = tg3_get_eeprom,
14139 .set_eeprom = tg3_set_eeprom,
14140 .get_ringparam = tg3_get_ringparam,
14141 .set_ringparam = tg3_set_ringparam,
14142 .get_pauseparam = tg3_get_pauseparam,
14143 .set_pauseparam = tg3_set_pauseparam,
14144 .self_test = tg3_self_test,
14145 .get_strings = tg3_get_strings,
14146 .set_phys_id = tg3_set_phys_id,
14147 .get_ethtool_stats = tg3_get_ethtool_stats,
14148 .get_coalesce = tg3_get_coalesce,
14149 .set_coalesce = tg3_set_coalesce,
14150 .get_sset_count = tg3_get_sset_count,
14151 .get_rxnfc = tg3_get_rxnfc,
14152 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14153 .get_rxfh = tg3_get_rxfh,
14154 .set_rxfh = tg3_set_rxfh,
14155 .get_channels = tg3_get_channels,
14156 .set_channels = tg3_set_channels,
14157 .get_ts_info = tg3_get_ts_info,
14158 .get_eee = tg3_get_eee,
14159 .set_eee = tg3_set_eee,
14160 .get_link_ksettings = tg3_get_link_ksettings,
14161 .set_link_ksettings = tg3_set_link_ksettings,
14164 static void tg3_get_stats64(struct net_device *dev,
14165 struct rtnl_link_stats64 *stats)
14167 struct tg3 *tp = netdev_priv(dev);
14169 spin_lock_bh(&tp->lock);
14170 if (!tp->hw_stats) {
14171 *stats = tp->net_stats_prev;
14172 spin_unlock_bh(&tp->lock);
14173 return;
14176 tg3_get_nstats(tp, stats);
14177 spin_unlock_bh(&tp->lock);
14180 static void tg3_set_rx_mode(struct net_device *dev)
14182 struct tg3 *tp = netdev_priv(dev);
14184 if (!netif_running(dev))
14185 return;
14187 tg3_full_lock(tp, 0);
14188 __tg3_set_rx_mode(dev);
14189 tg3_full_unlock(tp);
14192 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14193 int new_mtu)
14195 dev->mtu = new_mtu;
14197 if (new_mtu > ETH_DATA_LEN) {
14198 if (tg3_flag(tp, 5780_CLASS)) {
14199 netdev_update_features(dev);
14200 tg3_flag_clear(tp, TSO_CAPABLE);
14201 } else {
14202 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14204 } else {
14205 if (tg3_flag(tp, 5780_CLASS)) {
14206 tg3_flag_set(tp, TSO_CAPABLE);
14207 netdev_update_features(dev);
14209 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14213 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14215 struct tg3 *tp = netdev_priv(dev);
14216 int err;
14217 bool reset_phy = false;
14219 if (!netif_running(dev)) {
14220 /* We'll just catch it later when the
14221 * device is up'd.
14223 tg3_set_mtu(dev, tp, new_mtu);
14224 return 0;
14227 tg3_phy_stop(tp);
14229 tg3_netif_stop(tp);
14231 tg3_set_mtu(dev, tp, new_mtu);
14233 tg3_full_lock(tp, 1);
14235 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14237 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14238 * breaks all requests to 256 bytes.
14240 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14241 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14242 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14243 tg3_asic_rev(tp) == ASIC_REV_5720)
14244 reset_phy = true;
14246 err = tg3_restart_hw(tp, reset_phy);
14248 if (!err)
14249 tg3_netif_start(tp);
14251 tg3_full_unlock(tp);
14253 if (!err)
14254 tg3_phy_start(tp);
14256 return err;
14259 static const struct net_device_ops tg3_netdev_ops = {
14260 .ndo_open = tg3_open,
14261 .ndo_stop = tg3_close,
14262 .ndo_start_xmit = tg3_start_xmit,
14263 .ndo_get_stats64 = tg3_get_stats64,
14264 .ndo_validate_addr = eth_validate_addr,
14265 .ndo_set_rx_mode = tg3_set_rx_mode,
14266 .ndo_set_mac_address = tg3_set_mac_addr,
14267 .ndo_do_ioctl = tg3_ioctl,
14268 .ndo_tx_timeout = tg3_tx_timeout,
14269 .ndo_change_mtu = tg3_change_mtu,
14270 .ndo_fix_features = tg3_fix_features,
14271 .ndo_set_features = tg3_set_features,
14272 #ifdef CONFIG_NET_POLL_CONTROLLER
14273 .ndo_poll_controller = tg3_poll_controller,
14274 #endif
14277 static void tg3_get_eeprom_size(struct tg3 *tp)
14279 u32 cursize, val, magic;
14281 tp->nvram_size = EEPROM_CHIP_SIZE;
14283 if (tg3_nvram_read(tp, 0, &magic) != 0)
14284 return;
14286 if ((magic != TG3_EEPROM_MAGIC) &&
14287 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14288 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14289 return;
14292 * Size the chip by reading offsets at increasing powers of two.
14293 * When we encounter our validation signature, we know the addressing
14294 * has wrapped around, and thus have our chip size.
14296 cursize = 0x10;
14298 while (cursize < tp->nvram_size) {
14299 if (tg3_nvram_read(tp, cursize, &val) != 0)
14300 return;
14302 if (val == magic)
14303 break;
14305 cursize <<= 1;
14308 tp->nvram_size = cursize;
14311 static void tg3_get_nvram_size(struct tg3 *tp)
14313 u32 val;
14315 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14316 return;
14318 /* Selfboot format */
14319 if (val != TG3_EEPROM_MAGIC) {
14320 tg3_get_eeprom_size(tp);
14321 return;
14324 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14325 if (val != 0) {
14326 /* This is confusing. We want to operate on the
14327 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14328 * call will read from NVRAM and byteswap the data
14329 * according to the byteswapping settings for all
14330 * other register accesses. This ensures the data we
14331 * want will always reside in the lower 16-bits.
14332 * However, the data in NVRAM is in LE format, which
14333 * means the data from the NVRAM read will always be
14334 * opposite the endianness of the CPU. The 16-bit
14335 * byteswap then brings the data to CPU endianness.
14337 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14338 return;
14341 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14344 static void tg3_get_nvram_info(struct tg3 *tp)
14346 u32 nvcfg1;
14348 nvcfg1 = tr32(NVRAM_CFG1);
14349 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14350 tg3_flag_set(tp, FLASH);
14351 } else {
14352 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14353 tw32(NVRAM_CFG1, nvcfg1);
14356 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14357 tg3_flag(tp, 5780_CLASS)) {
14358 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14359 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14360 tp->nvram_jedecnum = JEDEC_ATMEL;
14361 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14362 tg3_flag_set(tp, NVRAM_BUFFERED);
14363 break;
14364 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14365 tp->nvram_jedecnum = JEDEC_ATMEL;
14366 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14367 break;
14368 case FLASH_VENDOR_ATMEL_EEPROM:
14369 tp->nvram_jedecnum = JEDEC_ATMEL;
14370 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14371 tg3_flag_set(tp, NVRAM_BUFFERED);
14372 break;
14373 case FLASH_VENDOR_ST:
14374 tp->nvram_jedecnum = JEDEC_ST;
14375 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14376 tg3_flag_set(tp, NVRAM_BUFFERED);
14377 break;
14378 case FLASH_VENDOR_SAIFUN:
14379 tp->nvram_jedecnum = JEDEC_SAIFUN;
14380 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14381 break;
14382 case FLASH_VENDOR_SST_SMALL:
14383 case FLASH_VENDOR_SST_LARGE:
14384 tp->nvram_jedecnum = JEDEC_SST;
14385 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14386 break;
14388 } else {
14389 tp->nvram_jedecnum = JEDEC_ATMEL;
14390 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14391 tg3_flag_set(tp, NVRAM_BUFFERED);
14395 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14397 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14398 case FLASH_5752PAGE_SIZE_256:
14399 tp->nvram_pagesize = 256;
14400 break;
14401 case FLASH_5752PAGE_SIZE_512:
14402 tp->nvram_pagesize = 512;
14403 break;
14404 case FLASH_5752PAGE_SIZE_1K:
14405 tp->nvram_pagesize = 1024;
14406 break;
14407 case FLASH_5752PAGE_SIZE_2K:
14408 tp->nvram_pagesize = 2048;
14409 break;
14410 case FLASH_5752PAGE_SIZE_4K:
14411 tp->nvram_pagesize = 4096;
14412 break;
14413 case FLASH_5752PAGE_SIZE_264:
14414 tp->nvram_pagesize = 264;
14415 break;
14416 case FLASH_5752PAGE_SIZE_528:
14417 tp->nvram_pagesize = 528;
14418 break;
14422 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14424 u32 nvcfg1;
14426 nvcfg1 = tr32(NVRAM_CFG1);
14428 /* NVRAM protection for TPM */
14429 if (nvcfg1 & (1 << 27))
14430 tg3_flag_set(tp, PROTECTED_NVRAM);
14432 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14433 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14434 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14435 tp->nvram_jedecnum = JEDEC_ATMEL;
14436 tg3_flag_set(tp, NVRAM_BUFFERED);
14437 break;
14438 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14439 tp->nvram_jedecnum = JEDEC_ATMEL;
14440 tg3_flag_set(tp, NVRAM_BUFFERED);
14441 tg3_flag_set(tp, FLASH);
14442 break;
14443 case FLASH_5752VENDOR_ST_M45PE10:
14444 case FLASH_5752VENDOR_ST_M45PE20:
14445 case FLASH_5752VENDOR_ST_M45PE40:
14446 tp->nvram_jedecnum = JEDEC_ST;
14447 tg3_flag_set(tp, NVRAM_BUFFERED);
14448 tg3_flag_set(tp, FLASH);
14449 break;
14452 if (tg3_flag(tp, FLASH)) {
14453 tg3_nvram_get_pagesize(tp, nvcfg1);
14454 } else {
14455 /* For eeprom, set pagesize to maximum eeprom size */
14456 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14458 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14459 tw32(NVRAM_CFG1, nvcfg1);
14463 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14465 u32 nvcfg1, protect = 0;
14467 nvcfg1 = tr32(NVRAM_CFG1);
14469 /* NVRAM protection for TPM */
14470 if (nvcfg1 & (1 << 27)) {
14471 tg3_flag_set(tp, PROTECTED_NVRAM);
14472 protect = 1;
14475 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14476 switch (nvcfg1) {
14477 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14478 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14479 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14480 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14481 tp->nvram_jedecnum = JEDEC_ATMEL;
14482 tg3_flag_set(tp, NVRAM_BUFFERED);
14483 tg3_flag_set(tp, FLASH);
14484 tp->nvram_pagesize = 264;
14485 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14486 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14487 tp->nvram_size = (protect ? 0x3e200 :
14488 TG3_NVRAM_SIZE_512KB);
14489 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14490 tp->nvram_size = (protect ? 0x1f200 :
14491 TG3_NVRAM_SIZE_256KB);
14492 else
14493 tp->nvram_size = (protect ? 0x1f200 :
14494 TG3_NVRAM_SIZE_128KB);
14495 break;
14496 case FLASH_5752VENDOR_ST_M45PE10:
14497 case FLASH_5752VENDOR_ST_M45PE20:
14498 case FLASH_5752VENDOR_ST_M45PE40:
14499 tp->nvram_jedecnum = JEDEC_ST;
14500 tg3_flag_set(tp, NVRAM_BUFFERED);
14501 tg3_flag_set(tp, FLASH);
14502 tp->nvram_pagesize = 256;
14503 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14504 tp->nvram_size = (protect ?
14505 TG3_NVRAM_SIZE_64KB :
14506 TG3_NVRAM_SIZE_128KB);
14507 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14508 tp->nvram_size = (protect ?
14509 TG3_NVRAM_SIZE_64KB :
14510 TG3_NVRAM_SIZE_256KB);
14511 else
14512 tp->nvram_size = (protect ?
14513 TG3_NVRAM_SIZE_128KB :
14514 TG3_NVRAM_SIZE_512KB);
14515 break;
14519 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14521 u32 nvcfg1;
14523 nvcfg1 = tr32(NVRAM_CFG1);
14525 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14526 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14527 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14528 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14529 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14530 tp->nvram_jedecnum = JEDEC_ATMEL;
14531 tg3_flag_set(tp, NVRAM_BUFFERED);
14532 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14534 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14535 tw32(NVRAM_CFG1, nvcfg1);
14536 break;
14537 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14538 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14539 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14540 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14541 tp->nvram_jedecnum = JEDEC_ATMEL;
14542 tg3_flag_set(tp, NVRAM_BUFFERED);
14543 tg3_flag_set(tp, FLASH);
14544 tp->nvram_pagesize = 264;
14545 break;
14546 case FLASH_5752VENDOR_ST_M45PE10:
14547 case FLASH_5752VENDOR_ST_M45PE20:
14548 case FLASH_5752VENDOR_ST_M45PE40:
14549 tp->nvram_jedecnum = JEDEC_ST;
14550 tg3_flag_set(tp, NVRAM_BUFFERED);
14551 tg3_flag_set(tp, FLASH);
14552 tp->nvram_pagesize = 256;
14553 break;
14557 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14559 u32 nvcfg1, protect = 0;
14561 nvcfg1 = tr32(NVRAM_CFG1);
14563 /* NVRAM protection for TPM */
14564 if (nvcfg1 & (1 << 27)) {
14565 tg3_flag_set(tp, PROTECTED_NVRAM);
14566 protect = 1;
14569 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14570 switch (nvcfg1) {
14571 case FLASH_5761VENDOR_ATMEL_ADB021D:
14572 case FLASH_5761VENDOR_ATMEL_ADB041D:
14573 case FLASH_5761VENDOR_ATMEL_ADB081D:
14574 case FLASH_5761VENDOR_ATMEL_ADB161D:
14575 case FLASH_5761VENDOR_ATMEL_MDB021D:
14576 case FLASH_5761VENDOR_ATMEL_MDB041D:
14577 case FLASH_5761VENDOR_ATMEL_MDB081D:
14578 case FLASH_5761VENDOR_ATMEL_MDB161D:
14579 tp->nvram_jedecnum = JEDEC_ATMEL;
14580 tg3_flag_set(tp, NVRAM_BUFFERED);
14581 tg3_flag_set(tp, FLASH);
14582 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14583 tp->nvram_pagesize = 256;
14584 break;
14585 case FLASH_5761VENDOR_ST_A_M45PE20:
14586 case FLASH_5761VENDOR_ST_A_M45PE40:
14587 case FLASH_5761VENDOR_ST_A_M45PE80:
14588 case FLASH_5761VENDOR_ST_A_M45PE16:
14589 case FLASH_5761VENDOR_ST_M_M45PE20:
14590 case FLASH_5761VENDOR_ST_M_M45PE40:
14591 case FLASH_5761VENDOR_ST_M_M45PE80:
14592 case FLASH_5761VENDOR_ST_M_M45PE16:
14593 tp->nvram_jedecnum = JEDEC_ST;
14594 tg3_flag_set(tp, NVRAM_BUFFERED);
14595 tg3_flag_set(tp, FLASH);
14596 tp->nvram_pagesize = 256;
14597 break;
14600 if (protect) {
14601 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14602 } else {
14603 switch (nvcfg1) {
14604 case FLASH_5761VENDOR_ATMEL_ADB161D:
14605 case FLASH_5761VENDOR_ATMEL_MDB161D:
14606 case FLASH_5761VENDOR_ST_A_M45PE16:
14607 case FLASH_5761VENDOR_ST_M_M45PE16:
14608 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14609 break;
14610 case FLASH_5761VENDOR_ATMEL_ADB081D:
14611 case FLASH_5761VENDOR_ATMEL_MDB081D:
14612 case FLASH_5761VENDOR_ST_A_M45PE80:
14613 case FLASH_5761VENDOR_ST_M_M45PE80:
14614 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14615 break;
14616 case FLASH_5761VENDOR_ATMEL_ADB041D:
14617 case FLASH_5761VENDOR_ATMEL_MDB041D:
14618 case FLASH_5761VENDOR_ST_A_M45PE40:
14619 case FLASH_5761VENDOR_ST_M_M45PE40:
14620 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14621 break;
14622 case FLASH_5761VENDOR_ATMEL_ADB021D:
14623 case FLASH_5761VENDOR_ATMEL_MDB021D:
14624 case FLASH_5761VENDOR_ST_A_M45PE20:
14625 case FLASH_5761VENDOR_ST_M_M45PE20:
14626 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14627 break;
14632 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14634 tp->nvram_jedecnum = JEDEC_ATMEL;
14635 tg3_flag_set(tp, NVRAM_BUFFERED);
14636 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14639 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14641 u32 nvcfg1;
14643 nvcfg1 = tr32(NVRAM_CFG1);
14645 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14646 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14647 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14648 tp->nvram_jedecnum = JEDEC_ATMEL;
14649 tg3_flag_set(tp, NVRAM_BUFFERED);
14650 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14652 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14653 tw32(NVRAM_CFG1, nvcfg1);
14654 return;
14655 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14656 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14657 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14658 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14659 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14660 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14661 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14662 tp->nvram_jedecnum = JEDEC_ATMEL;
14663 tg3_flag_set(tp, NVRAM_BUFFERED);
14664 tg3_flag_set(tp, FLASH);
14666 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14667 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14668 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14669 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14670 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14671 break;
14672 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14673 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14674 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14675 break;
14676 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14677 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14678 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14679 break;
14681 break;
14682 case FLASH_5752VENDOR_ST_M45PE10:
14683 case FLASH_5752VENDOR_ST_M45PE20:
14684 case FLASH_5752VENDOR_ST_M45PE40:
14685 tp->nvram_jedecnum = JEDEC_ST;
14686 tg3_flag_set(tp, NVRAM_BUFFERED);
14687 tg3_flag_set(tp, FLASH);
14689 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14690 case FLASH_5752VENDOR_ST_M45PE10:
14691 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14692 break;
14693 case FLASH_5752VENDOR_ST_M45PE20:
14694 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14695 break;
14696 case FLASH_5752VENDOR_ST_M45PE40:
14697 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14698 break;
14700 break;
14701 default:
14702 tg3_flag_set(tp, NO_NVRAM);
14703 return;
14706 tg3_nvram_get_pagesize(tp, nvcfg1);
14707 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14708 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14712 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14714 u32 nvcfg1;
14716 nvcfg1 = tr32(NVRAM_CFG1);
14718 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14719 case FLASH_5717VENDOR_ATMEL_EEPROM:
14720 case FLASH_5717VENDOR_MICRO_EEPROM:
14721 tp->nvram_jedecnum = JEDEC_ATMEL;
14722 tg3_flag_set(tp, NVRAM_BUFFERED);
14723 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14725 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14726 tw32(NVRAM_CFG1, nvcfg1);
14727 return;
14728 case FLASH_5717VENDOR_ATMEL_MDB011D:
14729 case FLASH_5717VENDOR_ATMEL_ADB011B:
14730 case FLASH_5717VENDOR_ATMEL_ADB011D:
14731 case FLASH_5717VENDOR_ATMEL_MDB021D:
14732 case FLASH_5717VENDOR_ATMEL_ADB021B:
14733 case FLASH_5717VENDOR_ATMEL_ADB021D:
14734 case FLASH_5717VENDOR_ATMEL_45USPT:
14735 tp->nvram_jedecnum = JEDEC_ATMEL;
14736 tg3_flag_set(tp, NVRAM_BUFFERED);
14737 tg3_flag_set(tp, FLASH);
14739 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14740 case FLASH_5717VENDOR_ATMEL_MDB021D:
14741 /* Detect size with tg3_nvram_get_size() */
14742 break;
14743 case FLASH_5717VENDOR_ATMEL_ADB021B:
14744 case FLASH_5717VENDOR_ATMEL_ADB021D:
14745 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14746 break;
14747 default:
14748 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14749 break;
14751 break;
14752 case FLASH_5717VENDOR_ST_M_M25PE10:
14753 case FLASH_5717VENDOR_ST_A_M25PE10:
14754 case FLASH_5717VENDOR_ST_M_M45PE10:
14755 case FLASH_5717VENDOR_ST_A_M45PE10:
14756 case FLASH_5717VENDOR_ST_M_M25PE20:
14757 case FLASH_5717VENDOR_ST_A_M25PE20:
14758 case FLASH_5717VENDOR_ST_M_M45PE20:
14759 case FLASH_5717VENDOR_ST_A_M45PE20:
14760 case FLASH_5717VENDOR_ST_25USPT:
14761 case FLASH_5717VENDOR_ST_45USPT:
14762 tp->nvram_jedecnum = JEDEC_ST;
14763 tg3_flag_set(tp, NVRAM_BUFFERED);
14764 tg3_flag_set(tp, FLASH);
14766 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14767 case FLASH_5717VENDOR_ST_M_M25PE20:
14768 case FLASH_5717VENDOR_ST_M_M45PE20:
14769 /* Detect size with tg3_nvram_get_size() */
14770 break;
14771 case FLASH_5717VENDOR_ST_A_M25PE20:
14772 case FLASH_5717VENDOR_ST_A_M45PE20:
14773 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14774 break;
14775 default:
14776 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14777 break;
14779 break;
14780 default:
14781 tg3_flag_set(tp, NO_NVRAM);
14782 return;
14785 tg3_nvram_get_pagesize(tp, nvcfg1);
14786 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14787 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14790 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14792 u32 nvcfg1, nvmpinstrp, nv_status;
14794 nvcfg1 = tr32(NVRAM_CFG1);
14795 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14797 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14798 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14799 tg3_flag_set(tp, NO_NVRAM);
14800 return;
14803 switch (nvmpinstrp) {
14804 case FLASH_5762_MX25L_100:
14805 case FLASH_5762_MX25L_200:
14806 case FLASH_5762_MX25L_400:
14807 case FLASH_5762_MX25L_800:
14808 case FLASH_5762_MX25L_160_320:
14809 tp->nvram_pagesize = 4096;
14810 tp->nvram_jedecnum = JEDEC_MACRONIX;
14811 tg3_flag_set(tp, NVRAM_BUFFERED);
14812 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14813 tg3_flag_set(tp, FLASH);
14814 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14815 tp->nvram_size =
14816 (1 << (nv_status >> AUTOSENSE_DEVID &
14817 AUTOSENSE_DEVID_MASK)
14818 << AUTOSENSE_SIZE_IN_MB);
14819 return;
14821 case FLASH_5762_EEPROM_HD:
14822 nvmpinstrp = FLASH_5720_EEPROM_HD;
14823 break;
14824 case FLASH_5762_EEPROM_LD:
14825 nvmpinstrp = FLASH_5720_EEPROM_LD;
14826 break;
14827 case FLASH_5720VENDOR_M_ST_M45PE20:
14828 /* This pinstrap supports multiple sizes, so force it
14829 * to read the actual size from location 0xf0.
14831 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14832 break;
14836 switch (nvmpinstrp) {
14837 case FLASH_5720_EEPROM_HD:
14838 case FLASH_5720_EEPROM_LD:
14839 tp->nvram_jedecnum = JEDEC_ATMEL;
14840 tg3_flag_set(tp, NVRAM_BUFFERED);
14842 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14843 tw32(NVRAM_CFG1, nvcfg1);
14844 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14845 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14846 else
14847 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14848 return;
14849 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14850 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14851 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14852 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14853 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14854 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14855 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14856 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14857 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14858 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14859 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14860 case FLASH_5720VENDOR_ATMEL_45USPT:
14861 tp->nvram_jedecnum = JEDEC_ATMEL;
14862 tg3_flag_set(tp, NVRAM_BUFFERED);
14863 tg3_flag_set(tp, FLASH);
14865 switch (nvmpinstrp) {
14866 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14867 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14868 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14869 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14870 break;
14871 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14872 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14873 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14874 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14875 break;
14876 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14877 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14878 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14879 break;
14880 default:
14881 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14882 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14883 break;
14885 break;
14886 case FLASH_5720VENDOR_M_ST_M25PE10:
14887 case FLASH_5720VENDOR_M_ST_M45PE10:
14888 case FLASH_5720VENDOR_A_ST_M25PE10:
14889 case FLASH_5720VENDOR_A_ST_M45PE10:
14890 case FLASH_5720VENDOR_M_ST_M25PE20:
14891 case FLASH_5720VENDOR_M_ST_M45PE20:
14892 case FLASH_5720VENDOR_A_ST_M25PE20:
14893 case FLASH_5720VENDOR_A_ST_M45PE20:
14894 case FLASH_5720VENDOR_M_ST_M25PE40:
14895 case FLASH_5720VENDOR_M_ST_M45PE40:
14896 case FLASH_5720VENDOR_A_ST_M25PE40:
14897 case FLASH_5720VENDOR_A_ST_M45PE40:
14898 case FLASH_5720VENDOR_M_ST_M25PE80:
14899 case FLASH_5720VENDOR_M_ST_M45PE80:
14900 case FLASH_5720VENDOR_A_ST_M25PE80:
14901 case FLASH_5720VENDOR_A_ST_M45PE80:
14902 case FLASH_5720VENDOR_ST_25USPT:
14903 case FLASH_5720VENDOR_ST_45USPT:
14904 tp->nvram_jedecnum = JEDEC_ST;
14905 tg3_flag_set(tp, NVRAM_BUFFERED);
14906 tg3_flag_set(tp, FLASH);
14908 switch (nvmpinstrp) {
14909 case FLASH_5720VENDOR_M_ST_M25PE20:
14910 case FLASH_5720VENDOR_M_ST_M45PE20:
14911 case FLASH_5720VENDOR_A_ST_M25PE20:
14912 case FLASH_5720VENDOR_A_ST_M45PE20:
14913 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14914 break;
14915 case FLASH_5720VENDOR_M_ST_M25PE40:
14916 case FLASH_5720VENDOR_M_ST_M45PE40:
14917 case FLASH_5720VENDOR_A_ST_M25PE40:
14918 case FLASH_5720VENDOR_A_ST_M45PE40:
14919 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14920 break;
14921 case FLASH_5720VENDOR_M_ST_M25PE80:
14922 case FLASH_5720VENDOR_M_ST_M45PE80:
14923 case FLASH_5720VENDOR_A_ST_M25PE80:
14924 case FLASH_5720VENDOR_A_ST_M45PE80:
14925 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14926 break;
14927 default:
14928 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14929 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14930 break;
14932 break;
14933 default:
14934 tg3_flag_set(tp, NO_NVRAM);
14935 return;
14938 tg3_nvram_get_pagesize(tp, nvcfg1);
14939 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14940 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14942 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14943 u32 val;
14945 if (tg3_nvram_read(tp, 0, &val))
14946 return;
14948 if (val != TG3_EEPROM_MAGIC &&
14949 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14950 tg3_flag_set(tp, NO_NVRAM);
14954 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14955 static void tg3_nvram_init(struct tg3 *tp)
14957 if (tg3_flag(tp, IS_SSB_CORE)) {
14958 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14959 tg3_flag_clear(tp, NVRAM);
14960 tg3_flag_clear(tp, NVRAM_BUFFERED);
14961 tg3_flag_set(tp, NO_NVRAM);
14962 return;
14965 tw32_f(GRC_EEPROM_ADDR,
14966 (EEPROM_ADDR_FSM_RESET |
14967 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14968 EEPROM_ADDR_CLKPERD_SHIFT)));
14970 msleep(1);
14972 /* Enable seeprom accesses. */
14973 tw32_f(GRC_LOCAL_CTRL,
14974 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14975 udelay(100);
14977 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14978 tg3_asic_rev(tp) != ASIC_REV_5701) {
14979 tg3_flag_set(tp, NVRAM);
14981 if (tg3_nvram_lock(tp)) {
14982 netdev_warn(tp->dev,
14983 "Cannot get nvram lock, %s failed\n",
14984 __func__);
14985 return;
14987 tg3_enable_nvram_access(tp);
14989 tp->nvram_size = 0;
14991 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14992 tg3_get_5752_nvram_info(tp);
14993 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14994 tg3_get_5755_nvram_info(tp);
14995 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14996 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14997 tg3_asic_rev(tp) == ASIC_REV_5785)
14998 tg3_get_5787_nvram_info(tp);
14999 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15000 tg3_get_5761_nvram_info(tp);
15001 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15002 tg3_get_5906_nvram_info(tp);
15003 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15004 tg3_flag(tp, 57765_CLASS))
15005 tg3_get_57780_nvram_info(tp);
15006 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15007 tg3_asic_rev(tp) == ASIC_REV_5719)
15008 tg3_get_5717_nvram_info(tp);
15009 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15010 tg3_asic_rev(tp) == ASIC_REV_5762)
15011 tg3_get_5720_nvram_info(tp);
15012 else
15013 tg3_get_nvram_info(tp);
15015 if (tp->nvram_size == 0)
15016 tg3_get_nvram_size(tp);
15018 tg3_disable_nvram_access(tp);
15019 tg3_nvram_unlock(tp);
15021 } else {
15022 tg3_flag_clear(tp, NVRAM);
15023 tg3_flag_clear(tp, NVRAM_BUFFERED);
15025 tg3_get_eeprom_size(tp);
15029 struct subsys_tbl_ent {
15030 u16 subsys_vendor, subsys_devid;
15031 u32 phy_id;
15034 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15035 /* Broadcom boards. */
15036 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15037 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15038 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15039 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15040 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15041 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15042 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15043 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15044 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15045 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15046 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15047 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15048 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15049 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15050 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15051 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15052 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15053 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15054 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15055 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15056 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15057 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15059 /* 3com boards. */
15060 { TG3PCI_SUBVENDOR_ID_3COM,
15061 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15062 { TG3PCI_SUBVENDOR_ID_3COM,
15063 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15064 { TG3PCI_SUBVENDOR_ID_3COM,
15065 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15066 { TG3PCI_SUBVENDOR_ID_3COM,
15067 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15068 { TG3PCI_SUBVENDOR_ID_3COM,
15069 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15071 /* DELL boards. */
15072 { TG3PCI_SUBVENDOR_ID_DELL,
15073 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15074 { TG3PCI_SUBVENDOR_ID_DELL,
15075 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15076 { TG3PCI_SUBVENDOR_ID_DELL,
15077 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15078 { TG3PCI_SUBVENDOR_ID_DELL,
15079 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15081 /* Compaq boards. */
15082 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15083 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15084 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15085 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15086 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15087 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15088 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15089 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15090 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15091 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15093 /* IBM boards. */
15094 { TG3PCI_SUBVENDOR_ID_IBM,
15095 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15098 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15100 int i;
15102 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15103 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15104 tp->pdev->subsystem_vendor) &&
15105 (subsys_id_to_phy_id[i].subsys_devid ==
15106 tp->pdev->subsystem_device))
15107 return &subsys_id_to_phy_id[i];
15109 return NULL;
15112 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15114 u32 val;
15116 tp->phy_id = TG3_PHY_ID_INVALID;
15117 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15119 /* Assume an onboard device and WOL capable by default. */
15120 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15121 tg3_flag_set(tp, WOL_CAP);
15123 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15124 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15125 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15126 tg3_flag_set(tp, IS_NIC);
15128 val = tr32(VCPU_CFGSHDW);
15129 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15130 tg3_flag_set(tp, ASPM_WORKAROUND);
15131 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15132 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15133 tg3_flag_set(tp, WOL_ENABLE);
15134 device_set_wakeup_enable(&tp->pdev->dev, true);
15136 goto done;
15139 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15140 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15141 u32 nic_cfg, led_cfg;
15142 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15143 u32 nic_phy_id, ver, eeprom_phy_id;
15144 int eeprom_phy_serdes = 0;
15146 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15147 tp->nic_sram_data_cfg = nic_cfg;
15149 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15150 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15151 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15152 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15153 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15154 (ver > 0) && (ver < 0x100))
15155 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15157 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15158 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15160 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15161 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15162 tg3_asic_rev(tp) == ASIC_REV_5720)
15163 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15165 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15166 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15167 eeprom_phy_serdes = 1;
15169 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15170 if (nic_phy_id != 0) {
15171 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15172 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15174 eeprom_phy_id = (id1 >> 16) << 10;
15175 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15176 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15177 } else
15178 eeprom_phy_id = 0;
15180 tp->phy_id = eeprom_phy_id;
15181 if (eeprom_phy_serdes) {
15182 if (!tg3_flag(tp, 5705_PLUS))
15183 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15184 else
15185 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15188 if (tg3_flag(tp, 5750_PLUS))
15189 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15190 SHASTA_EXT_LED_MODE_MASK);
15191 else
15192 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15194 switch (led_cfg) {
15195 default:
15196 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15197 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15198 break;
15200 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15201 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15202 break;
15204 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15205 tp->led_ctrl = LED_CTRL_MODE_MAC;
15207 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15208 * read on some older 5700/5701 bootcode.
15210 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15211 tg3_asic_rev(tp) == ASIC_REV_5701)
15212 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15214 break;
15216 case SHASTA_EXT_LED_SHARED:
15217 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15218 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15219 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15220 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15221 LED_CTRL_MODE_PHY_2);
15223 if (tg3_flag(tp, 5717_PLUS) ||
15224 tg3_asic_rev(tp) == ASIC_REV_5762)
15225 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15226 LED_CTRL_BLINK_RATE_MASK;
15228 break;
15230 case SHASTA_EXT_LED_MAC:
15231 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15232 break;
15234 case SHASTA_EXT_LED_COMBO:
15235 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15236 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15237 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15238 LED_CTRL_MODE_PHY_2);
15239 break;
15243 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15244 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15245 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15246 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15248 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15249 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15251 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15252 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15253 if ((tp->pdev->subsystem_vendor ==
15254 PCI_VENDOR_ID_ARIMA) &&
15255 (tp->pdev->subsystem_device == 0x205a ||
15256 tp->pdev->subsystem_device == 0x2063))
15257 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15258 } else {
15259 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15260 tg3_flag_set(tp, IS_NIC);
15263 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15264 tg3_flag_set(tp, ENABLE_ASF);
15265 if (tg3_flag(tp, 5750_PLUS))
15266 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15269 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15270 tg3_flag(tp, 5750_PLUS))
15271 tg3_flag_set(tp, ENABLE_APE);
15273 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15274 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15275 tg3_flag_clear(tp, WOL_CAP);
15277 if (tg3_flag(tp, WOL_CAP) &&
15278 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15279 tg3_flag_set(tp, WOL_ENABLE);
15280 device_set_wakeup_enable(&tp->pdev->dev, true);
15283 if (cfg2 & (1 << 17))
15284 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15286 /* serdes signal pre-emphasis in register 0x590 set by */
15287 /* bootcode if bit 18 is set */
15288 if (cfg2 & (1 << 18))
15289 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15291 if ((tg3_flag(tp, 57765_PLUS) ||
15292 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15293 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15294 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15295 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15297 if (tg3_flag(tp, PCI_EXPRESS)) {
15298 u32 cfg3;
15300 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15301 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15302 !tg3_flag(tp, 57765_PLUS) &&
15303 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15304 tg3_flag_set(tp, ASPM_WORKAROUND);
15305 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15306 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15307 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15308 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15311 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15312 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15313 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15314 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15315 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15316 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15318 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15319 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15321 done:
15322 if (tg3_flag(tp, WOL_CAP))
15323 device_set_wakeup_enable(&tp->pdev->dev,
15324 tg3_flag(tp, WOL_ENABLE));
15325 else
15326 device_set_wakeup_capable(&tp->pdev->dev, false);
15329 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15331 int i, err;
15332 u32 val2, off = offset * 8;
15334 err = tg3_nvram_lock(tp);
15335 if (err)
15336 return err;
15338 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15339 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15340 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15341 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15342 udelay(10);
15344 for (i = 0; i < 100; i++) {
15345 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15346 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15347 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15348 break;
15350 udelay(10);
15353 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15355 tg3_nvram_unlock(tp);
15356 if (val2 & APE_OTP_STATUS_CMD_DONE)
15357 return 0;
15359 return -EBUSY;
15362 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15364 int i;
15365 u32 val;
15367 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15368 tw32(OTP_CTRL, cmd);
15370 /* Wait for up to 1 ms for command to execute. */
15371 for (i = 0; i < 100; i++) {
15372 val = tr32(OTP_STATUS);
15373 if (val & OTP_STATUS_CMD_DONE)
15374 break;
15375 udelay(10);
15378 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15381 /* Read the gphy configuration from the OTP region of the chip. The gphy
15382 * configuration is a 32-bit value that straddles the alignment boundary.
15383 * We do two 32-bit reads and then shift and merge the results.
15385 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15387 u32 bhalf_otp, thalf_otp;
15389 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15391 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15392 return 0;
15394 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15396 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15397 return 0;
15399 thalf_otp = tr32(OTP_READ_DATA);
15401 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15403 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15404 return 0;
15406 bhalf_otp = tr32(OTP_READ_DATA);
15408 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15411 static void tg3_phy_init_link_config(struct tg3 *tp)
15413 u32 adv = ADVERTISED_Autoneg;
15415 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15416 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15417 adv |= ADVERTISED_1000baseT_Half;
15418 adv |= ADVERTISED_1000baseT_Full;
15421 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15422 adv |= ADVERTISED_100baseT_Half |
15423 ADVERTISED_100baseT_Full |
15424 ADVERTISED_10baseT_Half |
15425 ADVERTISED_10baseT_Full |
15426 ADVERTISED_TP;
15427 else
15428 adv |= ADVERTISED_FIBRE;
15430 tp->link_config.advertising = adv;
15431 tp->link_config.speed = SPEED_UNKNOWN;
15432 tp->link_config.duplex = DUPLEX_UNKNOWN;
15433 tp->link_config.autoneg = AUTONEG_ENABLE;
15434 tp->link_config.active_speed = SPEED_UNKNOWN;
15435 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15437 tp->old_link = -1;
15440 static int tg3_phy_probe(struct tg3 *tp)
15442 u32 hw_phy_id_1, hw_phy_id_2;
15443 u32 hw_phy_id, hw_phy_id_masked;
15444 int err;
15446 /* flow control autonegotiation is default behavior */
15447 tg3_flag_set(tp, PAUSE_AUTONEG);
15448 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15450 if (tg3_flag(tp, ENABLE_APE)) {
15451 switch (tp->pci_fn) {
15452 case 0:
15453 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15454 break;
15455 case 1:
15456 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15457 break;
15458 case 2:
15459 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15460 break;
15461 case 3:
15462 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15463 break;
15467 if (!tg3_flag(tp, ENABLE_ASF) &&
15468 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15469 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15470 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15471 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15473 if (tg3_flag(tp, USE_PHYLIB))
15474 return tg3_phy_init(tp);
15476 /* Reading the PHY ID register can conflict with ASF
15477 * firmware access to the PHY hardware.
15479 err = 0;
15480 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15481 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15482 } else {
15483 /* Now read the physical PHY_ID from the chip and verify
15484 * that it is sane. If it doesn't look good, we fall back
15485 * to either the hard-coded table based PHY_ID and failing
15486 * that the value found in the eeprom area.
15488 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15489 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15491 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15492 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15493 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15495 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15498 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15499 tp->phy_id = hw_phy_id;
15500 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15501 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15502 else
15503 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15504 } else {
15505 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15506 /* Do nothing, phy ID already set up in
15507 * tg3_get_eeprom_hw_cfg().
15509 } else {
15510 struct subsys_tbl_ent *p;
15512 /* No eeprom signature? Try the hardcoded
15513 * subsys device table.
15515 p = tg3_lookup_by_subsys(tp);
15516 if (p) {
15517 tp->phy_id = p->phy_id;
15518 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15519 /* For now we saw the IDs 0xbc050cd0,
15520 * 0xbc050f80 and 0xbc050c30 on devices
15521 * connected to an BCM4785 and there are
15522 * probably more. Just assume that the phy is
15523 * supported when it is connected to a SSB core
15524 * for now.
15526 return -ENODEV;
15529 if (!tp->phy_id ||
15530 tp->phy_id == TG3_PHY_ID_BCM8002)
15531 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15535 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15536 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15537 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15538 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15539 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15540 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15541 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15542 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15543 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15544 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15546 tp->eee.supported = SUPPORTED_100baseT_Full |
15547 SUPPORTED_1000baseT_Full;
15548 tp->eee.advertised = ADVERTISED_100baseT_Full |
15549 ADVERTISED_1000baseT_Full;
15550 tp->eee.eee_enabled = 1;
15551 tp->eee.tx_lpi_enabled = 1;
15552 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15555 tg3_phy_init_link_config(tp);
15557 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15558 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15559 !tg3_flag(tp, ENABLE_APE) &&
15560 !tg3_flag(tp, ENABLE_ASF)) {
15561 u32 bmsr, dummy;
15563 tg3_readphy(tp, MII_BMSR, &bmsr);
15564 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15565 (bmsr & BMSR_LSTATUS))
15566 goto skip_phy_reset;
15568 err = tg3_phy_reset(tp);
15569 if (err)
15570 return err;
15572 tg3_phy_set_wirespeed(tp);
15574 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15575 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15576 tp->link_config.flowctrl);
15578 tg3_writephy(tp, MII_BMCR,
15579 BMCR_ANENABLE | BMCR_ANRESTART);
15583 skip_phy_reset:
15584 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15585 err = tg3_init_5401phy_dsp(tp);
15586 if (err)
15587 return err;
15589 err = tg3_init_5401phy_dsp(tp);
15592 return err;
15595 static void tg3_read_vpd(struct tg3 *tp)
15597 u8 *vpd_data;
15598 unsigned int block_end, rosize, len;
15599 u32 vpdlen;
15600 int j, i = 0;
15602 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15603 if (!vpd_data)
15604 goto out_no_vpd;
15606 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15607 if (i < 0)
15608 goto out_not_found;
15610 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15611 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15612 i += PCI_VPD_LRDT_TAG_SIZE;
15614 if (block_end > vpdlen)
15615 goto out_not_found;
15617 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15618 PCI_VPD_RO_KEYWORD_MFR_ID);
15619 if (j > 0) {
15620 len = pci_vpd_info_field_size(&vpd_data[j]);
15622 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15623 if (j + len > block_end || len != 4 ||
15624 memcmp(&vpd_data[j], "1028", 4))
15625 goto partno;
15627 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15628 PCI_VPD_RO_KEYWORD_VENDOR0);
15629 if (j < 0)
15630 goto partno;
15632 len = pci_vpd_info_field_size(&vpd_data[j]);
15634 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15635 if (j + len > block_end)
15636 goto partno;
15638 if (len >= sizeof(tp->fw_ver))
15639 len = sizeof(tp->fw_ver) - 1;
15640 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15641 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15642 &vpd_data[j]);
15645 partno:
15646 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15647 PCI_VPD_RO_KEYWORD_PARTNO);
15648 if (i < 0)
15649 goto out_not_found;
15651 len = pci_vpd_info_field_size(&vpd_data[i]);
15653 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15654 if (len > TG3_BPN_SIZE ||
15655 (len + i) > vpdlen)
15656 goto out_not_found;
15658 memcpy(tp->board_part_number, &vpd_data[i], len);
15660 out_not_found:
15661 kfree(vpd_data);
15662 if (tp->board_part_number[0])
15663 return;
15665 out_no_vpd:
15666 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15667 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15668 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15669 strcpy(tp->board_part_number, "BCM5717");
15670 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15671 strcpy(tp->board_part_number, "BCM5718");
15672 else
15673 goto nomatch;
15674 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15675 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15676 strcpy(tp->board_part_number, "BCM57780");
15677 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15678 strcpy(tp->board_part_number, "BCM57760");
15679 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15680 strcpy(tp->board_part_number, "BCM57790");
15681 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15682 strcpy(tp->board_part_number, "BCM57788");
15683 else
15684 goto nomatch;
15685 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15686 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15687 strcpy(tp->board_part_number, "BCM57761");
15688 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15689 strcpy(tp->board_part_number, "BCM57765");
15690 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15691 strcpy(tp->board_part_number, "BCM57781");
15692 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15693 strcpy(tp->board_part_number, "BCM57785");
15694 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15695 strcpy(tp->board_part_number, "BCM57791");
15696 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15697 strcpy(tp->board_part_number, "BCM57795");
15698 else
15699 goto nomatch;
15700 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15701 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15702 strcpy(tp->board_part_number, "BCM57762");
15703 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15704 strcpy(tp->board_part_number, "BCM57766");
15705 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15706 strcpy(tp->board_part_number, "BCM57782");
15707 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15708 strcpy(tp->board_part_number, "BCM57786");
15709 else
15710 goto nomatch;
15711 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15712 strcpy(tp->board_part_number, "BCM95906");
15713 } else {
15714 nomatch:
15715 strcpy(tp->board_part_number, "none");
15719 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15721 u32 val;
15723 if (tg3_nvram_read(tp, offset, &val) ||
15724 (val & 0xfc000000) != 0x0c000000 ||
15725 tg3_nvram_read(tp, offset + 4, &val) ||
15726 val != 0)
15727 return 0;
15729 return 1;
15732 static void tg3_read_bc_ver(struct tg3 *tp)
15734 u32 val, offset, start, ver_offset;
15735 int i, dst_off;
15736 bool newver = false;
15738 if (tg3_nvram_read(tp, 0xc, &offset) ||
15739 tg3_nvram_read(tp, 0x4, &start))
15740 return;
15742 offset = tg3_nvram_logical_addr(tp, offset);
15744 if (tg3_nvram_read(tp, offset, &val))
15745 return;
15747 if ((val & 0xfc000000) == 0x0c000000) {
15748 if (tg3_nvram_read(tp, offset + 4, &val))
15749 return;
15751 if (val == 0)
15752 newver = true;
15755 dst_off = strlen(tp->fw_ver);
15757 if (newver) {
15758 if (TG3_VER_SIZE - dst_off < 16 ||
15759 tg3_nvram_read(tp, offset + 8, &ver_offset))
15760 return;
15762 offset = offset + ver_offset - start;
15763 for (i = 0; i < 16; i += 4) {
15764 __be32 v;
15765 if (tg3_nvram_read_be32(tp, offset + i, &v))
15766 return;
15768 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15770 } else {
15771 u32 major, minor;
15773 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15774 return;
15776 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15777 TG3_NVM_BCVER_MAJSFT;
15778 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15779 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15780 "v%d.%02d", major, minor);
15784 static void tg3_read_hwsb_ver(struct tg3 *tp)
15786 u32 val, major, minor;
15788 /* Use native endian representation */
15789 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15790 return;
15792 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15793 TG3_NVM_HWSB_CFG1_MAJSFT;
15794 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15795 TG3_NVM_HWSB_CFG1_MINSFT;
15797 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15800 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15802 u32 offset, major, minor, build;
15804 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15806 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15807 return;
15809 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15810 case TG3_EEPROM_SB_REVISION_0:
15811 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15812 break;
15813 case TG3_EEPROM_SB_REVISION_2:
15814 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15815 break;
15816 case TG3_EEPROM_SB_REVISION_3:
15817 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15818 break;
15819 case TG3_EEPROM_SB_REVISION_4:
15820 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15821 break;
15822 case TG3_EEPROM_SB_REVISION_5:
15823 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15824 break;
15825 case TG3_EEPROM_SB_REVISION_6:
15826 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15827 break;
15828 default:
15829 return;
15832 if (tg3_nvram_read(tp, offset, &val))
15833 return;
15835 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15836 TG3_EEPROM_SB_EDH_BLD_SHFT;
15837 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15838 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15839 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15841 if (minor > 99 || build > 26)
15842 return;
15844 offset = strlen(tp->fw_ver);
15845 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15846 " v%d.%02d", major, minor);
15848 if (build > 0) {
15849 offset = strlen(tp->fw_ver);
15850 if (offset < TG3_VER_SIZE - 1)
15851 tp->fw_ver[offset] = 'a' + build - 1;
15855 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15857 u32 val, offset, start;
15858 int i, vlen;
15860 for (offset = TG3_NVM_DIR_START;
15861 offset < TG3_NVM_DIR_END;
15862 offset += TG3_NVM_DIRENT_SIZE) {
15863 if (tg3_nvram_read(tp, offset, &val))
15864 return;
15866 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15867 break;
15870 if (offset == TG3_NVM_DIR_END)
15871 return;
15873 if (!tg3_flag(tp, 5705_PLUS))
15874 start = 0x08000000;
15875 else if (tg3_nvram_read(tp, offset - 4, &start))
15876 return;
15878 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15879 !tg3_fw_img_is_valid(tp, offset) ||
15880 tg3_nvram_read(tp, offset + 8, &val))
15881 return;
15883 offset += val - start;
15885 vlen = strlen(tp->fw_ver);
15887 tp->fw_ver[vlen++] = ',';
15888 tp->fw_ver[vlen++] = ' ';
15890 for (i = 0; i < 4; i++) {
15891 __be32 v;
15892 if (tg3_nvram_read_be32(tp, offset, &v))
15893 return;
15895 offset += sizeof(v);
15897 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15898 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15899 break;
15902 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15903 vlen += sizeof(v);
15907 static void tg3_probe_ncsi(struct tg3 *tp)
15909 u32 apedata;
15911 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15912 if (apedata != APE_SEG_SIG_MAGIC)
15913 return;
15915 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15916 if (!(apedata & APE_FW_STATUS_READY))
15917 return;
15919 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15920 tg3_flag_set(tp, APE_HAS_NCSI);
15923 static void tg3_read_dash_ver(struct tg3 *tp)
15925 int vlen;
15926 u32 apedata;
15927 char *fwtype;
15929 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15931 if (tg3_flag(tp, APE_HAS_NCSI))
15932 fwtype = "NCSI";
15933 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15934 fwtype = "SMASH";
15935 else
15936 fwtype = "DASH";
15938 vlen = strlen(tp->fw_ver);
15940 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15941 fwtype,
15942 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15943 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15944 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15945 (apedata & APE_FW_VERSION_BLDMSK));
15948 static void tg3_read_otp_ver(struct tg3 *tp)
15950 u32 val, val2;
15952 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15953 return;
15955 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15956 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15957 TG3_OTP_MAGIC0_VALID(val)) {
15958 u64 val64 = (u64) val << 32 | val2;
15959 u32 ver = 0;
15960 int i, vlen;
15962 for (i = 0; i < 7; i++) {
15963 if ((val64 & 0xff) == 0)
15964 break;
15965 ver = val64 & 0xff;
15966 val64 >>= 8;
15968 vlen = strlen(tp->fw_ver);
15969 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15973 static void tg3_read_fw_ver(struct tg3 *tp)
15975 u32 val;
15976 bool vpd_vers = false;
15978 if (tp->fw_ver[0] != 0)
15979 vpd_vers = true;
15981 if (tg3_flag(tp, NO_NVRAM)) {
15982 strcat(tp->fw_ver, "sb");
15983 tg3_read_otp_ver(tp);
15984 return;
15987 if (tg3_nvram_read(tp, 0, &val))
15988 return;
15990 if (val == TG3_EEPROM_MAGIC)
15991 tg3_read_bc_ver(tp);
15992 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15993 tg3_read_sb_ver(tp, val);
15994 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15995 tg3_read_hwsb_ver(tp);
15997 if (tg3_flag(tp, ENABLE_ASF)) {
15998 if (tg3_flag(tp, ENABLE_APE)) {
15999 tg3_probe_ncsi(tp);
16000 if (!vpd_vers)
16001 tg3_read_dash_ver(tp);
16002 } else if (!vpd_vers) {
16003 tg3_read_mgmtfw_ver(tp);
16007 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16010 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16012 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16013 return TG3_RX_RET_MAX_SIZE_5717;
16014 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16015 return TG3_RX_RET_MAX_SIZE_5700;
16016 else
16017 return TG3_RX_RET_MAX_SIZE_5705;
16020 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16021 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16022 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16023 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16024 { },
16027 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16029 struct pci_dev *peer;
16030 unsigned int func, devnr = tp->pdev->devfn & ~7;
16032 for (func = 0; func < 8; func++) {
16033 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16034 if (peer && peer != tp->pdev)
16035 break;
16036 pci_dev_put(peer);
16038 /* 5704 can be configured in single-port mode, set peer to
16039 * tp->pdev in that case.
16041 if (!peer) {
16042 peer = tp->pdev;
16043 return peer;
16047 * We don't need to keep the refcount elevated; there's no way
16048 * to remove one half of this device without removing the other
16050 pci_dev_put(peer);
16052 return peer;
16055 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16057 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16058 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16059 u32 reg;
16061 /* All devices that use the alternate
16062 * ASIC REV location have a CPMU.
16064 tg3_flag_set(tp, CPMU_PRESENT);
16066 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16067 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16068 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16069 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16070 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16071 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16072 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16073 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16074 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16075 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16076 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16077 reg = TG3PCI_GEN2_PRODID_ASICREV;
16078 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16079 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16080 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16081 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16082 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16083 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16084 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16085 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16086 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16087 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16088 reg = TG3PCI_GEN15_PRODID_ASICREV;
16089 else
16090 reg = TG3PCI_PRODID_ASICREV;
16092 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16095 /* Wrong chip ID in 5752 A0. This code can be removed later
16096 * as A0 is not in production.
16098 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16099 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16101 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16102 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16104 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16105 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16106 tg3_asic_rev(tp) == ASIC_REV_5720)
16107 tg3_flag_set(tp, 5717_PLUS);
16109 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16110 tg3_asic_rev(tp) == ASIC_REV_57766)
16111 tg3_flag_set(tp, 57765_CLASS);
16113 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16114 tg3_asic_rev(tp) == ASIC_REV_5762)
16115 tg3_flag_set(tp, 57765_PLUS);
16117 /* Intentionally exclude ASIC_REV_5906 */
16118 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16119 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16120 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16121 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16122 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16123 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16124 tg3_flag(tp, 57765_PLUS))
16125 tg3_flag_set(tp, 5755_PLUS);
16127 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16128 tg3_asic_rev(tp) == ASIC_REV_5714)
16129 tg3_flag_set(tp, 5780_CLASS);
16131 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16132 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16133 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16134 tg3_flag(tp, 5755_PLUS) ||
16135 tg3_flag(tp, 5780_CLASS))
16136 tg3_flag_set(tp, 5750_PLUS);
16138 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16139 tg3_flag(tp, 5750_PLUS))
16140 tg3_flag_set(tp, 5705_PLUS);
16143 static bool tg3_10_100_only_device(struct tg3 *tp,
16144 const struct pci_device_id *ent)
16146 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16148 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16149 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16150 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16151 return true;
16153 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16154 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16155 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16156 return true;
16157 } else {
16158 return true;
16162 return false;
16165 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16167 u32 misc_ctrl_reg;
16168 u32 pci_state_reg, grc_misc_cfg;
16169 u32 val;
16170 u16 pci_cmd;
16171 int err;
16173 /* Force memory write invalidate off. If we leave it on,
16174 * then on 5700_BX chips we have to enable a workaround.
16175 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16176 * to match the cacheline size. The Broadcom driver have this
16177 * workaround but turns MWI off all the times so never uses
16178 * it. This seems to suggest that the workaround is insufficient.
16180 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16181 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16182 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16184 /* Important! -- Make sure register accesses are byteswapped
16185 * correctly. Also, for those chips that require it, make
16186 * sure that indirect register accesses are enabled before
16187 * the first operation.
16189 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16190 &misc_ctrl_reg);
16191 tp->misc_host_ctrl |= (misc_ctrl_reg &
16192 MISC_HOST_CTRL_CHIPREV);
16193 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16194 tp->misc_host_ctrl);
16196 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16198 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16199 * we need to disable memory and use config. cycles
16200 * only to access all registers. The 5702/03 chips
16201 * can mistakenly decode the special cycles from the
16202 * ICH chipsets as memory write cycles, causing corruption
16203 * of register and memory space. Only certain ICH bridges
16204 * will drive special cycles with non-zero data during the
16205 * address phase which can fall within the 5703's address
16206 * range. This is not an ICH bug as the PCI spec allows
16207 * non-zero address during special cycles. However, only
16208 * these ICH bridges are known to drive non-zero addresses
16209 * during special cycles.
16211 * Since special cycles do not cross PCI bridges, we only
16212 * enable this workaround if the 5703 is on the secondary
16213 * bus of these ICH bridges.
16215 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16216 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16217 static struct tg3_dev_id {
16218 u32 vendor;
16219 u32 device;
16220 u32 rev;
16221 } ich_chipsets[] = {
16222 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16223 PCI_ANY_ID },
16224 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16225 PCI_ANY_ID },
16226 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16227 0xa },
16228 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16229 PCI_ANY_ID },
16230 { },
16232 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16233 struct pci_dev *bridge = NULL;
16235 while (pci_id->vendor != 0) {
16236 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16237 bridge);
16238 if (!bridge) {
16239 pci_id++;
16240 continue;
16242 if (pci_id->rev != PCI_ANY_ID) {
16243 if (bridge->revision > pci_id->rev)
16244 continue;
16246 if (bridge->subordinate &&
16247 (bridge->subordinate->number ==
16248 tp->pdev->bus->number)) {
16249 tg3_flag_set(tp, ICH_WORKAROUND);
16250 pci_dev_put(bridge);
16251 break;
16256 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16257 static struct tg3_dev_id {
16258 u32 vendor;
16259 u32 device;
16260 } bridge_chipsets[] = {
16261 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16262 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16263 { },
16265 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16266 struct pci_dev *bridge = NULL;
16268 while (pci_id->vendor != 0) {
16269 bridge = pci_get_device(pci_id->vendor,
16270 pci_id->device,
16271 bridge);
16272 if (!bridge) {
16273 pci_id++;
16274 continue;
16276 if (bridge->subordinate &&
16277 (bridge->subordinate->number <=
16278 tp->pdev->bus->number) &&
16279 (bridge->subordinate->busn_res.end >=
16280 tp->pdev->bus->number)) {
16281 tg3_flag_set(tp, 5701_DMA_BUG);
16282 pci_dev_put(bridge);
16283 break;
16288 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16289 * DMA addresses > 40-bit. This bridge may have other additional
16290 * 57xx devices behind it in some 4-port NIC designs for example.
16291 * Any tg3 device found behind the bridge will also need the 40-bit
16292 * DMA workaround.
16294 if (tg3_flag(tp, 5780_CLASS)) {
16295 tg3_flag_set(tp, 40BIT_DMA_BUG);
16296 tp->msi_cap = tp->pdev->msi_cap;
16297 } else {
16298 struct pci_dev *bridge = NULL;
16300 do {
16301 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16302 PCI_DEVICE_ID_SERVERWORKS_EPB,
16303 bridge);
16304 if (bridge && bridge->subordinate &&
16305 (bridge->subordinate->number <=
16306 tp->pdev->bus->number) &&
16307 (bridge->subordinate->busn_res.end >=
16308 tp->pdev->bus->number)) {
16309 tg3_flag_set(tp, 40BIT_DMA_BUG);
16310 pci_dev_put(bridge);
16311 break;
16313 } while (bridge);
16316 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16317 tg3_asic_rev(tp) == ASIC_REV_5714)
16318 tp->pdev_peer = tg3_find_peer(tp);
16320 /* Determine TSO capabilities */
16321 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16322 ; /* Do nothing. HW bug. */
16323 else if (tg3_flag(tp, 57765_PLUS))
16324 tg3_flag_set(tp, HW_TSO_3);
16325 else if (tg3_flag(tp, 5755_PLUS) ||
16326 tg3_asic_rev(tp) == ASIC_REV_5906)
16327 tg3_flag_set(tp, HW_TSO_2);
16328 else if (tg3_flag(tp, 5750_PLUS)) {
16329 tg3_flag_set(tp, HW_TSO_1);
16330 tg3_flag_set(tp, TSO_BUG);
16331 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16332 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16333 tg3_flag_clear(tp, TSO_BUG);
16334 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16335 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16336 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16337 tg3_flag_set(tp, FW_TSO);
16338 tg3_flag_set(tp, TSO_BUG);
16339 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16340 tp->fw_needed = FIRMWARE_TG3TSO5;
16341 else
16342 tp->fw_needed = FIRMWARE_TG3TSO;
16345 /* Selectively allow TSO based on operating conditions */
16346 if (tg3_flag(tp, HW_TSO_1) ||
16347 tg3_flag(tp, HW_TSO_2) ||
16348 tg3_flag(tp, HW_TSO_3) ||
16349 tg3_flag(tp, FW_TSO)) {
16350 /* For firmware TSO, assume ASF is disabled.
16351 * We'll disable TSO later if we discover ASF
16352 * is enabled in tg3_get_eeprom_hw_cfg().
16354 tg3_flag_set(tp, TSO_CAPABLE);
16355 } else {
16356 tg3_flag_clear(tp, TSO_CAPABLE);
16357 tg3_flag_clear(tp, TSO_BUG);
16358 tp->fw_needed = NULL;
16361 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16362 tp->fw_needed = FIRMWARE_TG3;
16364 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16365 tp->fw_needed = FIRMWARE_TG357766;
16367 tp->irq_max = 1;
16369 if (tg3_flag(tp, 5750_PLUS)) {
16370 tg3_flag_set(tp, SUPPORT_MSI);
16371 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16372 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16373 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16374 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16375 tp->pdev_peer == tp->pdev))
16376 tg3_flag_clear(tp, SUPPORT_MSI);
16378 if (tg3_flag(tp, 5755_PLUS) ||
16379 tg3_asic_rev(tp) == ASIC_REV_5906) {
16380 tg3_flag_set(tp, 1SHOT_MSI);
16383 if (tg3_flag(tp, 57765_PLUS)) {
16384 tg3_flag_set(tp, SUPPORT_MSIX);
16385 tp->irq_max = TG3_IRQ_MAX_VECS;
16389 tp->txq_max = 1;
16390 tp->rxq_max = 1;
16391 if (tp->irq_max > 1) {
16392 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16393 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16395 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16396 tg3_asic_rev(tp) == ASIC_REV_5720)
16397 tp->txq_max = tp->irq_max - 1;
16400 if (tg3_flag(tp, 5755_PLUS) ||
16401 tg3_asic_rev(tp) == ASIC_REV_5906)
16402 tg3_flag_set(tp, SHORT_DMA_BUG);
16404 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16405 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16407 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16408 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16409 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16410 tg3_asic_rev(tp) == ASIC_REV_5762)
16411 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16413 if (tg3_flag(tp, 57765_PLUS) &&
16414 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16415 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16417 if (!tg3_flag(tp, 5705_PLUS) ||
16418 tg3_flag(tp, 5780_CLASS) ||
16419 tg3_flag(tp, USE_JUMBO_BDFLAG))
16420 tg3_flag_set(tp, JUMBO_CAPABLE);
16422 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16423 &pci_state_reg);
16425 if (pci_is_pcie(tp->pdev)) {
16426 u16 lnkctl;
16428 tg3_flag_set(tp, PCI_EXPRESS);
16430 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16431 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16432 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16433 tg3_flag_clear(tp, HW_TSO_2);
16434 tg3_flag_clear(tp, TSO_CAPABLE);
16436 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16437 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16438 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16439 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16440 tg3_flag_set(tp, CLKREQ_BUG);
16441 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16442 tg3_flag_set(tp, L1PLLPD_EN);
16444 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16445 /* BCM5785 devices are effectively PCIe devices, and should
16446 * follow PCIe codepaths, but do not have a PCIe capabilities
16447 * section.
16449 tg3_flag_set(tp, PCI_EXPRESS);
16450 } else if (!tg3_flag(tp, 5705_PLUS) ||
16451 tg3_flag(tp, 5780_CLASS)) {
16452 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16453 if (!tp->pcix_cap) {
16454 dev_err(&tp->pdev->dev,
16455 "Cannot find PCI-X capability, aborting\n");
16456 return -EIO;
16459 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16460 tg3_flag_set(tp, PCIX_MODE);
16463 /* If we have an AMD 762 or VIA K8T800 chipset, write
16464 * reordering to the mailbox registers done by the host
16465 * controller can cause major troubles. We read back from
16466 * every mailbox register write to force the writes to be
16467 * posted to the chip in order.
16469 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16470 !tg3_flag(tp, PCI_EXPRESS))
16471 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16473 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16474 &tp->pci_cacheline_sz);
16475 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16476 &tp->pci_lat_timer);
16477 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16478 tp->pci_lat_timer < 64) {
16479 tp->pci_lat_timer = 64;
16480 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16481 tp->pci_lat_timer);
16484 /* Important! -- It is critical that the PCI-X hw workaround
16485 * situation is decided before the first MMIO register access.
16487 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16488 /* 5700 BX chips need to have their TX producer index
16489 * mailboxes written twice to workaround a bug.
16491 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16493 /* If we are in PCI-X mode, enable register write workaround.
16495 * The workaround is to use indirect register accesses
16496 * for all chip writes not to mailbox registers.
16498 if (tg3_flag(tp, PCIX_MODE)) {
16499 u32 pm_reg;
16501 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16503 /* The chip can have it's power management PCI config
16504 * space registers clobbered due to this bug.
16505 * So explicitly force the chip into D0 here.
16507 pci_read_config_dword(tp->pdev,
16508 tp->pdev->pm_cap + PCI_PM_CTRL,
16509 &pm_reg);
16510 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16511 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16512 pci_write_config_dword(tp->pdev,
16513 tp->pdev->pm_cap + PCI_PM_CTRL,
16514 pm_reg);
16516 /* Also, force SERR#/PERR# in PCI command. */
16517 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16518 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16519 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16523 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16524 tg3_flag_set(tp, PCI_HIGH_SPEED);
16525 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16526 tg3_flag_set(tp, PCI_32BIT);
16528 /* Chip-specific fixup from Broadcom driver */
16529 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16530 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16531 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16532 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16535 /* Default fast path register access methods */
16536 tp->read32 = tg3_read32;
16537 tp->write32 = tg3_write32;
16538 tp->read32_mbox = tg3_read32;
16539 tp->write32_mbox = tg3_write32;
16540 tp->write32_tx_mbox = tg3_write32;
16541 tp->write32_rx_mbox = tg3_write32;
16543 /* Various workaround register access methods */
16544 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16545 tp->write32 = tg3_write_indirect_reg32;
16546 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16547 (tg3_flag(tp, PCI_EXPRESS) &&
16548 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16550 * Back to back register writes can cause problems on these
16551 * chips, the workaround is to read back all reg writes
16552 * except those to mailbox regs.
16554 * See tg3_write_indirect_reg32().
16556 tp->write32 = tg3_write_flush_reg32;
16559 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16560 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16561 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16562 tp->write32_rx_mbox = tg3_write_flush_reg32;
16565 if (tg3_flag(tp, ICH_WORKAROUND)) {
16566 tp->read32 = tg3_read_indirect_reg32;
16567 tp->write32 = tg3_write_indirect_reg32;
16568 tp->read32_mbox = tg3_read_indirect_mbox;
16569 tp->write32_mbox = tg3_write_indirect_mbox;
16570 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16571 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16573 iounmap(tp->regs);
16574 tp->regs = NULL;
16576 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16577 pci_cmd &= ~PCI_COMMAND_MEMORY;
16578 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16580 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16581 tp->read32_mbox = tg3_read32_mbox_5906;
16582 tp->write32_mbox = tg3_write32_mbox_5906;
16583 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16584 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16587 if (tp->write32 == tg3_write_indirect_reg32 ||
16588 (tg3_flag(tp, PCIX_MODE) &&
16589 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16590 tg3_asic_rev(tp) == ASIC_REV_5701)))
16591 tg3_flag_set(tp, SRAM_USE_CONFIG);
16593 /* The memory arbiter has to be enabled in order for SRAM accesses
16594 * to succeed. Normally on powerup the tg3 chip firmware will make
16595 * sure it is enabled, but other entities such as system netboot
16596 * code might disable it.
16598 val = tr32(MEMARB_MODE);
16599 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16601 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16602 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16603 tg3_flag(tp, 5780_CLASS)) {
16604 if (tg3_flag(tp, PCIX_MODE)) {
16605 pci_read_config_dword(tp->pdev,
16606 tp->pcix_cap + PCI_X_STATUS,
16607 &val);
16608 tp->pci_fn = val & 0x7;
16610 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16611 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16612 tg3_asic_rev(tp) == ASIC_REV_5720) {
16613 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16614 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16615 val = tr32(TG3_CPMU_STATUS);
16617 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16618 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16619 else
16620 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16621 TG3_CPMU_STATUS_FSHFT_5719;
16624 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16625 tp->write32_tx_mbox = tg3_write_flush_reg32;
16626 tp->write32_rx_mbox = tg3_write_flush_reg32;
16629 /* Get eeprom hw config before calling tg3_set_power_state().
16630 * In particular, the TG3_FLAG_IS_NIC flag must be
16631 * determined before calling tg3_set_power_state() so that
16632 * we know whether or not to switch out of Vaux power.
16633 * When the flag is set, it means that GPIO1 is used for eeprom
16634 * write protect and also implies that it is a LOM where GPIOs
16635 * are not used to switch power.
16637 tg3_get_eeprom_hw_cfg(tp);
16639 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16640 tg3_flag_clear(tp, TSO_CAPABLE);
16641 tg3_flag_clear(tp, TSO_BUG);
16642 tp->fw_needed = NULL;
16645 if (tg3_flag(tp, ENABLE_APE)) {
16646 /* Allow reads and writes to the
16647 * APE register and memory space.
16649 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16650 PCISTATE_ALLOW_APE_SHMEM_WR |
16651 PCISTATE_ALLOW_APE_PSPACE_WR;
16652 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16653 pci_state_reg);
16655 tg3_ape_lock_init(tp);
16658 /* Set up tp->grc_local_ctrl before calling
16659 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16660 * will bring 5700's external PHY out of reset.
16661 * It is also used as eeprom write protect on LOMs.
16663 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16664 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16665 tg3_flag(tp, EEPROM_WRITE_PROT))
16666 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16667 GRC_LCLCTRL_GPIO_OUTPUT1);
16668 /* Unused GPIO3 must be driven as output on 5752 because there
16669 * are no pull-up resistors on unused GPIO pins.
16671 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16672 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16674 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16675 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16676 tg3_flag(tp, 57765_CLASS))
16677 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16679 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16680 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16681 /* Turn off the debug UART. */
16682 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16683 if (tg3_flag(tp, IS_NIC))
16684 /* Keep VMain power. */
16685 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16686 GRC_LCLCTRL_GPIO_OUTPUT0;
16689 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16690 tp->grc_local_ctrl |=
16691 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16693 /* Switch out of Vaux if it is a NIC */
16694 tg3_pwrsrc_switch_to_vmain(tp);
16696 /* Derive initial jumbo mode from MTU assigned in
16697 * ether_setup() via the alloc_etherdev() call
16699 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16700 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16702 /* Determine WakeOnLan speed to use. */
16703 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16704 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16705 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16706 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16707 tg3_flag_clear(tp, WOL_SPEED_100MB);
16708 } else {
16709 tg3_flag_set(tp, WOL_SPEED_100MB);
16712 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16713 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16715 /* A few boards don't want Ethernet@WireSpeed phy feature */
16716 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16717 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16718 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16719 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16720 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16721 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16722 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16724 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16725 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16726 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16727 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16728 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16730 if (tg3_flag(tp, 5705_PLUS) &&
16731 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16732 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16733 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16734 !tg3_flag(tp, 57765_PLUS)) {
16735 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16736 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16737 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16738 tg3_asic_rev(tp) == ASIC_REV_5761) {
16739 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16740 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16741 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16742 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16743 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16744 } else
16745 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16748 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16749 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16750 tp->phy_otp = tg3_read_otp_phycfg(tp);
16751 if (tp->phy_otp == 0)
16752 tp->phy_otp = TG3_OTP_DEFAULT;
16755 if (tg3_flag(tp, CPMU_PRESENT))
16756 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16757 else
16758 tp->mi_mode = MAC_MI_MODE_BASE;
16760 tp->coalesce_mode = 0;
16761 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16762 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16763 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16765 /* Set these bits to enable statistics workaround. */
16766 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16767 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16768 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16769 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16770 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16771 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16774 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16775 tg3_asic_rev(tp) == ASIC_REV_57780)
16776 tg3_flag_set(tp, USE_PHYLIB);
16778 err = tg3_mdio_init(tp);
16779 if (err)
16780 return err;
16782 /* Initialize data/descriptor byte/word swapping. */
16783 val = tr32(GRC_MODE);
16784 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16785 tg3_asic_rev(tp) == ASIC_REV_5762)
16786 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16787 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16788 GRC_MODE_B2HRX_ENABLE |
16789 GRC_MODE_HTX2B_ENABLE |
16790 GRC_MODE_HOST_STACKUP);
16791 else
16792 val &= GRC_MODE_HOST_STACKUP;
16794 tw32(GRC_MODE, val | tp->grc_mode);
16796 tg3_switch_clocks(tp);
16798 /* Clear this out for sanity. */
16799 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16801 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16802 tw32(TG3PCI_REG_BASE_ADDR, 0);
16804 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16805 &pci_state_reg);
16806 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16807 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16808 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16809 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16810 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16811 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16812 void __iomem *sram_base;
16814 /* Write some dummy words into the SRAM status block
16815 * area, see if it reads back correctly. If the return
16816 * value is bad, force enable the PCIX workaround.
16818 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16820 writel(0x00000000, sram_base);
16821 writel(0x00000000, sram_base + 4);
16822 writel(0xffffffff, sram_base + 4);
16823 if (readl(sram_base) != 0x00000000)
16824 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16828 udelay(50);
16829 tg3_nvram_init(tp);
16831 /* If the device has an NVRAM, no need to load patch firmware */
16832 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16833 !tg3_flag(tp, NO_NVRAM))
16834 tp->fw_needed = NULL;
16836 grc_misc_cfg = tr32(GRC_MISC_CFG);
16837 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16839 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16840 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16841 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16842 tg3_flag_set(tp, IS_5788);
16844 if (!tg3_flag(tp, IS_5788) &&
16845 tg3_asic_rev(tp) != ASIC_REV_5700)
16846 tg3_flag_set(tp, TAGGED_STATUS);
16847 if (tg3_flag(tp, TAGGED_STATUS)) {
16848 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16849 HOSTCC_MODE_CLRTICK_TXBD);
16851 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16852 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16853 tp->misc_host_ctrl);
16856 /* Preserve the APE MAC_MODE bits */
16857 if (tg3_flag(tp, ENABLE_APE))
16858 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16859 else
16860 tp->mac_mode = 0;
16862 if (tg3_10_100_only_device(tp, ent))
16863 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16865 err = tg3_phy_probe(tp);
16866 if (err) {
16867 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16868 /* ... but do not return immediately ... */
16869 tg3_mdio_fini(tp);
16872 tg3_read_vpd(tp);
16873 tg3_read_fw_ver(tp);
16875 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16876 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16877 } else {
16878 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16879 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16880 else
16881 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16884 /* 5700 {AX,BX} chips have a broken status block link
16885 * change bit implementation, so we must use the
16886 * status register in those cases.
16888 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16889 tg3_flag_set(tp, USE_LINKCHG_REG);
16890 else
16891 tg3_flag_clear(tp, USE_LINKCHG_REG);
16893 /* The led_ctrl is set during tg3_phy_probe, here we might
16894 * have to force the link status polling mechanism based
16895 * upon subsystem IDs.
16897 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16898 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16899 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16900 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16901 tg3_flag_set(tp, USE_LINKCHG_REG);
16904 /* For all SERDES we poll the MAC status register. */
16905 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16906 tg3_flag_set(tp, POLL_SERDES);
16907 else
16908 tg3_flag_clear(tp, POLL_SERDES);
16910 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16911 tg3_flag_set(tp, POLL_CPMU_LINK);
16913 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16914 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16915 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16916 tg3_flag(tp, PCIX_MODE)) {
16917 tp->rx_offset = NET_SKB_PAD;
16918 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16919 tp->rx_copy_thresh = ~(u16)0;
16920 #endif
16923 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16924 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16925 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16927 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16929 /* Increment the rx prod index on the rx std ring by at most
16930 * 8 for these chips to workaround hw errata.
16932 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16933 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16934 tg3_asic_rev(tp) == ASIC_REV_5755)
16935 tp->rx_std_max_post = 8;
16937 if (tg3_flag(tp, ASPM_WORKAROUND))
16938 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16939 PCIE_PWR_MGMT_L1_THRESH_MSK;
16941 return err;
16944 #ifdef CONFIG_SPARC
16945 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16947 struct net_device *dev = tp->dev;
16948 struct pci_dev *pdev = tp->pdev;
16949 struct device_node *dp = pci_device_to_OF_node(pdev);
16950 const unsigned char *addr;
16951 int len;
16953 addr = of_get_property(dp, "local-mac-address", &len);
16954 if (addr && len == ETH_ALEN) {
16955 memcpy(dev->dev_addr, addr, ETH_ALEN);
16956 return 0;
16958 return -ENODEV;
16961 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16963 struct net_device *dev = tp->dev;
16965 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16966 return 0;
16968 #endif
16970 static int tg3_get_device_address(struct tg3 *tp)
16972 struct net_device *dev = tp->dev;
16973 u32 hi, lo, mac_offset;
16974 int addr_ok = 0;
16975 int err;
16977 #ifdef CONFIG_SPARC
16978 if (!tg3_get_macaddr_sparc(tp))
16979 return 0;
16980 #endif
16982 if (tg3_flag(tp, IS_SSB_CORE)) {
16983 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16984 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16985 return 0;
16988 mac_offset = 0x7c;
16989 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16990 tg3_flag(tp, 5780_CLASS)) {
16991 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16992 mac_offset = 0xcc;
16993 if (tg3_nvram_lock(tp))
16994 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16995 else
16996 tg3_nvram_unlock(tp);
16997 } else if (tg3_flag(tp, 5717_PLUS)) {
16998 if (tp->pci_fn & 1)
16999 mac_offset = 0xcc;
17000 if (tp->pci_fn > 1)
17001 mac_offset += 0x18c;
17002 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17003 mac_offset = 0x10;
17005 /* First try to get it from MAC address mailbox. */
17006 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17007 if ((hi >> 16) == 0x484b) {
17008 dev->dev_addr[0] = (hi >> 8) & 0xff;
17009 dev->dev_addr[1] = (hi >> 0) & 0xff;
17011 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17012 dev->dev_addr[2] = (lo >> 24) & 0xff;
17013 dev->dev_addr[3] = (lo >> 16) & 0xff;
17014 dev->dev_addr[4] = (lo >> 8) & 0xff;
17015 dev->dev_addr[5] = (lo >> 0) & 0xff;
17017 /* Some old bootcode may report a 0 MAC address in SRAM */
17018 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17020 if (!addr_ok) {
17021 /* Next, try NVRAM. */
17022 if (!tg3_flag(tp, NO_NVRAM) &&
17023 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17024 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17025 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17026 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17028 /* Finally just fetch it out of the MAC control regs. */
17029 else {
17030 hi = tr32(MAC_ADDR_0_HIGH);
17031 lo = tr32(MAC_ADDR_0_LOW);
17033 dev->dev_addr[5] = lo & 0xff;
17034 dev->dev_addr[4] = (lo >> 8) & 0xff;
17035 dev->dev_addr[3] = (lo >> 16) & 0xff;
17036 dev->dev_addr[2] = (lo >> 24) & 0xff;
17037 dev->dev_addr[1] = hi & 0xff;
17038 dev->dev_addr[0] = (hi >> 8) & 0xff;
17042 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17043 #ifdef CONFIG_SPARC
17044 if (!tg3_get_default_macaddr_sparc(tp))
17045 return 0;
17046 #endif
17047 return -EINVAL;
17049 return 0;
17052 #define BOUNDARY_SINGLE_CACHELINE 1
17053 #define BOUNDARY_MULTI_CACHELINE 2
17055 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17057 int cacheline_size;
17058 u8 byte;
17059 int goal;
17061 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17062 if (byte == 0)
17063 cacheline_size = 1024;
17064 else
17065 cacheline_size = (int) byte * 4;
17067 /* On 5703 and later chips, the boundary bits have no
17068 * effect.
17070 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17071 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17072 !tg3_flag(tp, PCI_EXPRESS))
17073 goto out;
17075 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17076 goal = BOUNDARY_MULTI_CACHELINE;
17077 #else
17078 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17079 goal = BOUNDARY_SINGLE_CACHELINE;
17080 #else
17081 goal = 0;
17082 #endif
17083 #endif
17085 if (tg3_flag(tp, 57765_PLUS)) {
17086 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17087 goto out;
17090 if (!goal)
17091 goto out;
17093 /* PCI controllers on most RISC systems tend to disconnect
17094 * when a device tries to burst across a cache-line boundary.
17095 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17097 * Unfortunately, for PCI-E there are only limited
17098 * write-side controls for this, and thus for reads
17099 * we will still get the disconnects. We'll also waste
17100 * these PCI cycles for both read and write for chips
17101 * other than 5700 and 5701 which do not implement the
17102 * boundary bits.
17104 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17105 switch (cacheline_size) {
17106 case 16:
17107 case 32:
17108 case 64:
17109 case 128:
17110 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17111 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17112 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17113 } else {
17114 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17115 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17117 break;
17119 case 256:
17120 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17121 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17122 break;
17124 default:
17125 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17126 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17127 break;
17129 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17130 switch (cacheline_size) {
17131 case 16:
17132 case 32:
17133 case 64:
17134 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17135 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17136 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17137 break;
17139 /* fallthrough */
17140 case 128:
17141 default:
17142 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17143 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17144 break;
17146 } else {
17147 switch (cacheline_size) {
17148 case 16:
17149 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17150 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17151 DMA_RWCTRL_WRITE_BNDRY_16);
17152 break;
17154 /* fallthrough */
17155 case 32:
17156 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17157 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17158 DMA_RWCTRL_WRITE_BNDRY_32);
17159 break;
17161 /* fallthrough */
17162 case 64:
17163 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17164 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17165 DMA_RWCTRL_WRITE_BNDRY_64);
17166 break;
17168 /* fallthrough */
17169 case 128:
17170 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17171 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17172 DMA_RWCTRL_WRITE_BNDRY_128);
17173 break;
17175 /* fallthrough */
17176 case 256:
17177 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17178 DMA_RWCTRL_WRITE_BNDRY_256);
17179 break;
17180 case 512:
17181 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17182 DMA_RWCTRL_WRITE_BNDRY_512);
17183 break;
17184 case 1024:
17185 default:
17186 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17187 DMA_RWCTRL_WRITE_BNDRY_1024);
17188 break;
17192 out:
17193 return val;
17196 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17197 int size, bool to_device)
17199 struct tg3_internal_buffer_desc test_desc;
17200 u32 sram_dma_descs;
17201 int i, ret;
17203 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17205 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17206 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17207 tw32(RDMAC_STATUS, 0);
17208 tw32(WDMAC_STATUS, 0);
17210 tw32(BUFMGR_MODE, 0);
17211 tw32(FTQ_RESET, 0);
17213 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17214 test_desc.addr_lo = buf_dma & 0xffffffff;
17215 test_desc.nic_mbuf = 0x00002100;
17216 test_desc.len = size;
17219 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17220 * the *second* time the tg3 driver was getting loaded after an
17221 * initial scan.
17223 * Broadcom tells me:
17224 * ...the DMA engine is connected to the GRC block and a DMA
17225 * reset may affect the GRC block in some unpredictable way...
17226 * The behavior of resets to individual blocks has not been tested.
17228 * Broadcom noted the GRC reset will also reset all sub-components.
17230 if (to_device) {
17231 test_desc.cqid_sqid = (13 << 8) | 2;
17233 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17234 udelay(40);
17235 } else {
17236 test_desc.cqid_sqid = (16 << 8) | 7;
17238 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17239 udelay(40);
17241 test_desc.flags = 0x00000005;
17243 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17244 u32 val;
17246 val = *(((u32 *)&test_desc) + i);
17247 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17248 sram_dma_descs + (i * sizeof(u32)));
17249 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17251 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17253 if (to_device)
17254 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17255 else
17256 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17258 ret = -ENODEV;
17259 for (i = 0; i < 40; i++) {
17260 u32 val;
17262 if (to_device)
17263 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17264 else
17265 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17266 if ((val & 0xffff) == sram_dma_descs) {
17267 ret = 0;
17268 break;
17271 udelay(100);
17274 return ret;
17277 #define TEST_BUFFER_SIZE 0x2000
17279 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17280 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17281 { },
17284 static int tg3_test_dma(struct tg3 *tp)
17286 dma_addr_t buf_dma;
17287 u32 *buf, saved_dma_rwctrl;
17288 int ret = 0;
17290 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17291 &buf_dma, GFP_KERNEL);
17292 if (!buf) {
17293 ret = -ENOMEM;
17294 goto out_nofree;
17297 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17298 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17300 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17302 if (tg3_flag(tp, 57765_PLUS))
17303 goto out;
17305 if (tg3_flag(tp, PCI_EXPRESS)) {
17306 /* DMA read watermark not used on PCIE */
17307 tp->dma_rwctrl |= 0x00180000;
17308 } else if (!tg3_flag(tp, PCIX_MODE)) {
17309 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17310 tg3_asic_rev(tp) == ASIC_REV_5750)
17311 tp->dma_rwctrl |= 0x003f0000;
17312 else
17313 tp->dma_rwctrl |= 0x003f000f;
17314 } else {
17315 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17316 tg3_asic_rev(tp) == ASIC_REV_5704) {
17317 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17318 u32 read_water = 0x7;
17320 /* If the 5704 is behind the EPB bridge, we can
17321 * do the less restrictive ONE_DMA workaround for
17322 * better performance.
17324 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17325 tg3_asic_rev(tp) == ASIC_REV_5704)
17326 tp->dma_rwctrl |= 0x8000;
17327 else if (ccval == 0x6 || ccval == 0x7)
17328 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17330 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17331 read_water = 4;
17332 /* Set bit 23 to enable PCIX hw bug fix */
17333 tp->dma_rwctrl |=
17334 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17335 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17336 (1 << 23);
17337 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17338 /* 5780 always in PCIX mode */
17339 tp->dma_rwctrl |= 0x00144000;
17340 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17341 /* 5714 always in PCIX mode */
17342 tp->dma_rwctrl |= 0x00148000;
17343 } else {
17344 tp->dma_rwctrl |= 0x001b000f;
17347 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17348 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17350 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17351 tg3_asic_rev(tp) == ASIC_REV_5704)
17352 tp->dma_rwctrl &= 0xfffffff0;
17354 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17355 tg3_asic_rev(tp) == ASIC_REV_5701) {
17356 /* Remove this if it causes problems for some boards. */
17357 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17359 /* On 5700/5701 chips, we need to set this bit.
17360 * Otherwise the chip will issue cacheline transactions
17361 * to streamable DMA memory with not all the byte
17362 * enables turned on. This is an error on several
17363 * RISC PCI controllers, in particular sparc64.
17365 * On 5703/5704 chips, this bit has been reassigned
17366 * a different meaning. In particular, it is used
17367 * on those chips to enable a PCI-X workaround.
17369 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17372 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17375 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17376 tg3_asic_rev(tp) != ASIC_REV_5701)
17377 goto out;
17379 /* It is best to perform DMA test with maximum write burst size
17380 * to expose the 5700/5701 write DMA bug.
17382 saved_dma_rwctrl = tp->dma_rwctrl;
17383 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17384 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17386 while (1) {
17387 u32 *p = buf, i;
17389 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17390 p[i] = i;
17392 /* Send the buffer to the chip. */
17393 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17394 if (ret) {
17395 dev_err(&tp->pdev->dev,
17396 "%s: Buffer write failed. err = %d\n",
17397 __func__, ret);
17398 break;
17401 /* Now read it back. */
17402 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17403 if (ret) {
17404 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17405 "err = %d\n", __func__, ret);
17406 break;
17409 /* Verify it. */
17410 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17411 if (p[i] == i)
17412 continue;
17414 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17415 DMA_RWCTRL_WRITE_BNDRY_16) {
17416 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17417 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17418 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17419 break;
17420 } else {
17421 dev_err(&tp->pdev->dev,
17422 "%s: Buffer corrupted on read back! "
17423 "(%d != %d)\n", __func__, p[i], i);
17424 ret = -ENODEV;
17425 goto out;
17429 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17430 /* Success. */
17431 ret = 0;
17432 break;
17435 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17436 DMA_RWCTRL_WRITE_BNDRY_16) {
17437 /* DMA test passed without adjusting DMA boundary,
17438 * now look for chipsets that are known to expose the
17439 * DMA bug without failing the test.
17441 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17442 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17443 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17444 } else {
17445 /* Safe to use the calculated DMA boundary. */
17446 tp->dma_rwctrl = saved_dma_rwctrl;
17449 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17452 out:
17453 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17454 out_nofree:
17455 return ret;
17458 static void tg3_init_bufmgr_config(struct tg3 *tp)
17460 if (tg3_flag(tp, 57765_PLUS)) {
17461 tp->bufmgr_config.mbuf_read_dma_low_water =
17462 DEFAULT_MB_RDMA_LOW_WATER_5705;
17463 tp->bufmgr_config.mbuf_mac_rx_low_water =
17464 DEFAULT_MB_MACRX_LOW_WATER_57765;
17465 tp->bufmgr_config.mbuf_high_water =
17466 DEFAULT_MB_HIGH_WATER_57765;
17468 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17469 DEFAULT_MB_RDMA_LOW_WATER_5705;
17470 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17471 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17472 tp->bufmgr_config.mbuf_high_water_jumbo =
17473 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17474 } else if (tg3_flag(tp, 5705_PLUS)) {
17475 tp->bufmgr_config.mbuf_read_dma_low_water =
17476 DEFAULT_MB_RDMA_LOW_WATER_5705;
17477 tp->bufmgr_config.mbuf_mac_rx_low_water =
17478 DEFAULT_MB_MACRX_LOW_WATER_5705;
17479 tp->bufmgr_config.mbuf_high_water =
17480 DEFAULT_MB_HIGH_WATER_5705;
17481 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17482 tp->bufmgr_config.mbuf_mac_rx_low_water =
17483 DEFAULT_MB_MACRX_LOW_WATER_5906;
17484 tp->bufmgr_config.mbuf_high_water =
17485 DEFAULT_MB_HIGH_WATER_5906;
17488 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17489 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17490 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17491 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17492 tp->bufmgr_config.mbuf_high_water_jumbo =
17493 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17494 } else {
17495 tp->bufmgr_config.mbuf_read_dma_low_water =
17496 DEFAULT_MB_RDMA_LOW_WATER;
17497 tp->bufmgr_config.mbuf_mac_rx_low_water =
17498 DEFAULT_MB_MACRX_LOW_WATER;
17499 tp->bufmgr_config.mbuf_high_water =
17500 DEFAULT_MB_HIGH_WATER;
17502 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17503 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17504 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17505 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17506 tp->bufmgr_config.mbuf_high_water_jumbo =
17507 DEFAULT_MB_HIGH_WATER_JUMBO;
17510 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17511 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17514 static char *tg3_phy_string(struct tg3 *tp)
17516 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17517 case TG3_PHY_ID_BCM5400: return "5400";
17518 case TG3_PHY_ID_BCM5401: return "5401";
17519 case TG3_PHY_ID_BCM5411: return "5411";
17520 case TG3_PHY_ID_BCM5701: return "5701";
17521 case TG3_PHY_ID_BCM5703: return "5703";
17522 case TG3_PHY_ID_BCM5704: return "5704";
17523 case TG3_PHY_ID_BCM5705: return "5705";
17524 case TG3_PHY_ID_BCM5750: return "5750";
17525 case TG3_PHY_ID_BCM5752: return "5752";
17526 case TG3_PHY_ID_BCM5714: return "5714";
17527 case TG3_PHY_ID_BCM5780: return "5780";
17528 case TG3_PHY_ID_BCM5755: return "5755";
17529 case TG3_PHY_ID_BCM5787: return "5787";
17530 case TG3_PHY_ID_BCM5784: return "5784";
17531 case TG3_PHY_ID_BCM5756: return "5722/5756";
17532 case TG3_PHY_ID_BCM5906: return "5906";
17533 case TG3_PHY_ID_BCM5761: return "5761";
17534 case TG3_PHY_ID_BCM5718C: return "5718C";
17535 case TG3_PHY_ID_BCM5718S: return "5718S";
17536 case TG3_PHY_ID_BCM57765: return "57765";
17537 case TG3_PHY_ID_BCM5719C: return "5719C";
17538 case TG3_PHY_ID_BCM5720C: return "5720C";
17539 case TG3_PHY_ID_BCM5762: return "5762C";
17540 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17541 case 0: return "serdes";
17542 default: return "unknown";
17546 static char *tg3_bus_string(struct tg3 *tp, char *str)
17548 if (tg3_flag(tp, PCI_EXPRESS)) {
17549 strcpy(str, "PCI Express");
17550 return str;
17551 } else if (tg3_flag(tp, PCIX_MODE)) {
17552 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17554 strcpy(str, "PCIX:");
17556 if ((clock_ctrl == 7) ||
17557 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17558 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17559 strcat(str, "133MHz");
17560 else if (clock_ctrl == 0)
17561 strcat(str, "33MHz");
17562 else if (clock_ctrl == 2)
17563 strcat(str, "50MHz");
17564 else if (clock_ctrl == 4)
17565 strcat(str, "66MHz");
17566 else if (clock_ctrl == 6)
17567 strcat(str, "100MHz");
17568 } else {
17569 strcpy(str, "PCI:");
17570 if (tg3_flag(tp, PCI_HIGH_SPEED))
17571 strcat(str, "66MHz");
17572 else
17573 strcat(str, "33MHz");
17575 if (tg3_flag(tp, PCI_32BIT))
17576 strcat(str, ":32-bit");
17577 else
17578 strcat(str, ":64-bit");
17579 return str;
17582 static void tg3_init_coal(struct tg3 *tp)
17584 struct ethtool_coalesce *ec = &tp->coal;
17586 memset(ec, 0, sizeof(*ec));
17587 ec->cmd = ETHTOOL_GCOALESCE;
17588 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17589 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17590 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17591 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17592 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17593 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17594 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17595 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17596 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17598 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17599 HOSTCC_MODE_CLRTICK_TXBD)) {
17600 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17601 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17602 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17603 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17606 if (tg3_flag(tp, 5705_PLUS)) {
17607 ec->rx_coalesce_usecs_irq = 0;
17608 ec->tx_coalesce_usecs_irq = 0;
17609 ec->stats_block_coalesce_usecs = 0;
17613 static int tg3_init_one(struct pci_dev *pdev,
17614 const struct pci_device_id *ent)
17616 struct net_device *dev;
17617 struct tg3 *tp;
17618 int i, err;
17619 u32 sndmbx, rcvmbx, intmbx;
17620 char str[40];
17621 u64 dma_mask, persist_dma_mask;
17622 netdev_features_t features = 0;
17624 printk_once(KERN_INFO "%s\n", version);
17626 err = pci_enable_device(pdev);
17627 if (err) {
17628 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17629 return err;
17632 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17633 if (err) {
17634 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17635 goto err_out_disable_pdev;
17638 pci_set_master(pdev);
17640 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17641 if (!dev) {
17642 err = -ENOMEM;
17643 goto err_out_free_res;
17646 SET_NETDEV_DEV(dev, &pdev->dev);
17648 tp = netdev_priv(dev);
17649 tp->pdev = pdev;
17650 tp->dev = dev;
17651 tp->rx_mode = TG3_DEF_RX_MODE;
17652 tp->tx_mode = TG3_DEF_TX_MODE;
17653 tp->irq_sync = 1;
17654 tp->pcierr_recovery = false;
17656 if (tg3_debug > 0)
17657 tp->msg_enable = tg3_debug;
17658 else
17659 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17661 if (pdev_is_ssb_gige_core(pdev)) {
17662 tg3_flag_set(tp, IS_SSB_CORE);
17663 if (ssb_gige_must_flush_posted_writes(pdev))
17664 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17665 if (ssb_gige_one_dma_at_once(pdev))
17666 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17667 if (ssb_gige_have_roboswitch(pdev)) {
17668 tg3_flag_set(tp, USE_PHYLIB);
17669 tg3_flag_set(tp, ROBOSWITCH);
17671 if (ssb_gige_is_rgmii(pdev))
17672 tg3_flag_set(tp, RGMII_MODE);
17675 /* The word/byte swap controls here control register access byte
17676 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17677 * setting below.
17679 tp->misc_host_ctrl =
17680 MISC_HOST_CTRL_MASK_PCI_INT |
17681 MISC_HOST_CTRL_WORD_SWAP |
17682 MISC_HOST_CTRL_INDIR_ACCESS |
17683 MISC_HOST_CTRL_PCISTATE_RW;
17685 /* The NONFRM (non-frame) byte/word swap controls take effect
17686 * on descriptor entries, anything which isn't packet data.
17688 * The StrongARM chips on the board (one for tx, one for rx)
17689 * are running in big-endian mode.
17691 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17692 GRC_MODE_WSWAP_NONFRM_DATA);
17693 #ifdef __BIG_ENDIAN
17694 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17695 #endif
17696 spin_lock_init(&tp->lock);
17697 spin_lock_init(&tp->indirect_lock);
17698 INIT_WORK(&tp->reset_task, tg3_reset_task);
17700 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17701 if (!tp->regs) {
17702 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17703 err = -ENOMEM;
17704 goto err_out_free_dev;
17707 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17708 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17709 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17710 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17711 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17712 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17713 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17714 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17715 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17716 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17717 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17718 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17719 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17720 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17721 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17722 tg3_flag_set(tp, ENABLE_APE);
17723 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17724 if (!tp->aperegs) {
17725 dev_err(&pdev->dev,
17726 "Cannot map APE registers, aborting\n");
17727 err = -ENOMEM;
17728 goto err_out_iounmap;
17732 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17733 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17735 dev->ethtool_ops = &tg3_ethtool_ops;
17736 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17737 dev->netdev_ops = &tg3_netdev_ops;
17738 dev->irq = pdev->irq;
17740 err = tg3_get_invariants(tp, ent);
17741 if (err) {
17742 dev_err(&pdev->dev,
17743 "Problem fetching invariants of chip, aborting\n");
17744 goto err_out_apeunmap;
17747 /* The EPB bridge inside 5714, 5715, and 5780 and any
17748 * device behind the EPB cannot support DMA addresses > 40-bit.
17749 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17750 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17751 * do DMA address check in tg3_start_xmit().
17753 if (tg3_flag(tp, IS_5788))
17754 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17755 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17756 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17757 #ifdef CONFIG_HIGHMEM
17758 dma_mask = DMA_BIT_MASK(64);
17759 #endif
17760 } else
17761 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17763 /* Configure DMA attributes. */
17764 if (dma_mask > DMA_BIT_MASK(32)) {
17765 err = pci_set_dma_mask(pdev, dma_mask);
17766 if (!err) {
17767 features |= NETIF_F_HIGHDMA;
17768 err = pci_set_consistent_dma_mask(pdev,
17769 persist_dma_mask);
17770 if (err < 0) {
17771 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17772 "DMA for consistent allocations\n");
17773 goto err_out_apeunmap;
17777 if (err || dma_mask == DMA_BIT_MASK(32)) {
17778 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17779 if (err) {
17780 dev_err(&pdev->dev,
17781 "No usable DMA configuration, aborting\n");
17782 goto err_out_apeunmap;
17786 tg3_init_bufmgr_config(tp);
17788 /* 5700 B0 chips do not support checksumming correctly due
17789 * to hardware bugs.
17791 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17792 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17794 if (tg3_flag(tp, 5755_PLUS))
17795 features |= NETIF_F_IPV6_CSUM;
17798 /* TSO is on by default on chips that support hardware TSO.
17799 * Firmware TSO on older chips gives lower performance, so it
17800 * is off by default, but can be enabled using ethtool.
17802 if ((tg3_flag(tp, HW_TSO_1) ||
17803 tg3_flag(tp, HW_TSO_2) ||
17804 tg3_flag(tp, HW_TSO_3)) &&
17805 (features & NETIF_F_IP_CSUM))
17806 features |= NETIF_F_TSO;
17807 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17808 if (features & NETIF_F_IPV6_CSUM)
17809 features |= NETIF_F_TSO6;
17810 if (tg3_flag(tp, HW_TSO_3) ||
17811 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17812 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17813 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17814 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17815 tg3_asic_rev(tp) == ASIC_REV_57780)
17816 features |= NETIF_F_TSO_ECN;
17819 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17820 NETIF_F_HW_VLAN_CTAG_RX;
17821 dev->vlan_features |= features;
17824 * Add loopback capability only for a subset of devices that support
17825 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17826 * loopback for the remaining devices.
17828 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17829 !tg3_flag(tp, CPMU_PRESENT))
17830 /* Add the loopback capability */
17831 features |= NETIF_F_LOOPBACK;
17833 dev->hw_features |= features;
17834 dev->priv_flags |= IFF_UNICAST_FLT;
17836 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17837 dev->min_mtu = TG3_MIN_MTU;
17838 dev->max_mtu = TG3_MAX_MTU(tp);
17840 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17841 !tg3_flag(tp, TSO_CAPABLE) &&
17842 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17843 tg3_flag_set(tp, MAX_RXPEND_64);
17844 tp->rx_pending = 63;
17847 err = tg3_get_device_address(tp);
17848 if (err) {
17849 dev_err(&pdev->dev,
17850 "Could not obtain valid ethernet address, aborting\n");
17851 goto err_out_apeunmap;
17854 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17855 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17856 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17857 for (i = 0; i < tp->irq_max; i++) {
17858 struct tg3_napi *tnapi = &tp->napi[i];
17860 tnapi->tp = tp;
17861 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17863 tnapi->int_mbox = intmbx;
17864 if (i <= 4)
17865 intmbx += 0x8;
17866 else
17867 intmbx += 0x4;
17869 tnapi->consmbox = rcvmbx;
17870 tnapi->prodmbox = sndmbx;
17872 if (i)
17873 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17874 else
17875 tnapi->coal_now = HOSTCC_MODE_NOW;
17877 if (!tg3_flag(tp, SUPPORT_MSIX))
17878 break;
17881 * If we support MSIX, we'll be using RSS. If we're using
17882 * RSS, the first vector only handles link interrupts and the
17883 * remaining vectors handle rx and tx interrupts. Reuse the
17884 * mailbox values for the next iteration. The values we setup
17885 * above are still useful for the single vectored mode.
17887 if (!i)
17888 continue;
17890 rcvmbx += 0x8;
17892 if (sndmbx & 0x4)
17893 sndmbx -= 0x4;
17894 else
17895 sndmbx += 0xc;
17899 * Reset chip in case UNDI or EFI driver did not shutdown
17900 * DMA self test will enable WDMAC and we'll see (spurious)
17901 * pending DMA on the PCI bus at that point.
17903 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17904 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17905 tg3_full_lock(tp, 0);
17906 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17907 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17908 tg3_full_unlock(tp);
17911 err = tg3_test_dma(tp);
17912 if (err) {
17913 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17914 goto err_out_apeunmap;
17917 tg3_init_coal(tp);
17919 pci_set_drvdata(pdev, dev);
17921 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17922 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17923 tg3_asic_rev(tp) == ASIC_REV_5762)
17924 tg3_flag_set(tp, PTP_CAPABLE);
17926 tg3_timer_init(tp);
17928 tg3_carrier_off(tp);
17930 err = register_netdev(dev);
17931 if (err) {
17932 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17933 goto err_out_apeunmap;
17936 if (tg3_flag(tp, PTP_CAPABLE)) {
17937 tg3_ptp_init(tp);
17938 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17939 &tp->pdev->dev);
17940 if (IS_ERR(tp->ptp_clock))
17941 tp->ptp_clock = NULL;
17944 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17945 tp->board_part_number,
17946 tg3_chip_rev_id(tp),
17947 tg3_bus_string(tp, str),
17948 dev->dev_addr);
17950 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17951 char *ethtype;
17953 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17954 ethtype = "10/100Base-TX";
17955 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17956 ethtype = "1000Base-SX";
17957 else
17958 ethtype = "10/100/1000Base-T";
17960 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17961 "(WireSpeed[%d], EEE[%d])\n",
17962 tg3_phy_string(tp), ethtype,
17963 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17964 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17967 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17968 (dev->features & NETIF_F_RXCSUM) != 0,
17969 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17970 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17971 tg3_flag(tp, ENABLE_ASF) != 0,
17972 tg3_flag(tp, TSO_CAPABLE) != 0);
17973 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17974 tp->dma_rwctrl,
17975 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17976 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17978 pci_save_state(pdev);
17980 return 0;
17982 err_out_apeunmap:
17983 if (tp->aperegs) {
17984 iounmap(tp->aperegs);
17985 tp->aperegs = NULL;
17988 err_out_iounmap:
17989 if (tp->regs) {
17990 iounmap(tp->regs);
17991 tp->regs = NULL;
17994 err_out_free_dev:
17995 free_netdev(dev);
17997 err_out_free_res:
17998 pci_release_regions(pdev);
18000 err_out_disable_pdev:
18001 if (pci_is_enabled(pdev))
18002 pci_disable_device(pdev);
18003 return err;
18006 static void tg3_remove_one(struct pci_dev *pdev)
18008 struct net_device *dev = pci_get_drvdata(pdev);
18010 if (dev) {
18011 struct tg3 *tp = netdev_priv(dev);
18013 tg3_ptp_fini(tp);
18015 release_firmware(tp->fw);
18017 tg3_reset_task_cancel(tp);
18019 if (tg3_flag(tp, USE_PHYLIB)) {
18020 tg3_phy_fini(tp);
18021 tg3_mdio_fini(tp);
18024 unregister_netdev(dev);
18025 if (tp->aperegs) {
18026 iounmap(tp->aperegs);
18027 tp->aperegs = NULL;
18029 if (tp->regs) {
18030 iounmap(tp->regs);
18031 tp->regs = NULL;
18033 free_netdev(dev);
18034 pci_release_regions(pdev);
18035 pci_disable_device(pdev);
18039 #ifdef CONFIG_PM_SLEEP
18040 static int tg3_suspend(struct device *device)
18042 struct pci_dev *pdev = to_pci_dev(device);
18043 struct net_device *dev = pci_get_drvdata(pdev);
18044 struct tg3 *tp = netdev_priv(dev);
18045 int err = 0;
18047 rtnl_lock();
18049 if (!netif_running(dev))
18050 goto unlock;
18052 tg3_reset_task_cancel(tp);
18053 tg3_phy_stop(tp);
18054 tg3_netif_stop(tp);
18056 tg3_timer_stop(tp);
18058 tg3_full_lock(tp, 1);
18059 tg3_disable_ints(tp);
18060 tg3_full_unlock(tp);
18062 netif_device_detach(dev);
18064 tg3_full_lock(tp, 0);
18065 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18066 tg3_flag_clear(tp, INIT_COMPLETE);
18067 tg3_full_unlock(tp);
18069 err = tg3_power_down_prepare(tp);
18070 if (err) {
18071 int err2;
18073 tg3_full_lock(tp, 0);
18075 tg3_flag_set(tp, INIT_COMPLETE);
18076 err2 = tg3_restart_hw(tp, true);
18077 if (err2)
18078 goto out;
18080 tg3_timer_start(tp);
18082 netif_device_attach(dev);
18083 tg3_netif_start(tp);
18085 out:
18086 tg3_full_unlock(tp);
18088 if (!err2)
18089 tg3_phy_start(tp);
18092 unlock:
18093 rtnl_unlock();
18094 return err;
18097 static int tg3_resume(struct device *device)
18099 struct pci_dev *pdev = to_pci_dev(device);
18100 struct net_device *dev = pci_get_drvdata(pdev);
18101 struct tg3 *tp = netdev_priv(dev);
18102 int err = 0;
18104 rtnl_lock();
18106 if (!netif_running(dev))
18107 goto unlock;
18109 netif_device_attach(dev);
18111 tg3_full_lock(tp, 0);
18113 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18115 tg3_flag_set(tp, INIT_COMPLETE);
18116 err = tg3_restart_hw(tp,
18117 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18118 if (err)
18119 goto out;
18121 tg3_timer_start(tp);
18123 tg3_netif_start(tp);
18125 out:
18126 tg3_full_unlock(tp);
18128 if (!err)
18129 tg3_phy_start(tp);
18131 unlock:
18132 rtnl_unlock();
18133 return err;
18135 #endif /* CONFIG_PM_SLEEP */
18137 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18139 static void tg3_shutdown(struct pci_dev *pdev)
18141 struct net_device *dev = pci_get_drvdata(pdev);
18142 struct tg3 *tp = netdev_priv(dev);
18144 rtnl_lock();
18145 netif_device_detach(dev);
18147 if (netif_running(dev))
18148 dev_close(dev);
18150 if (system_state == SYSTEM_POWER_OFF)
18151 tg3_power_down(tp);
18153 rtnl_unlock();
18157 * tg3_io_error_detected - called when PCI error is detected
18158 * @pdev: Pointer to PCI device
18159 * @state: The current pci connection state
18161 * This function is called after a PCI bus error affecting
18162 * this device has been detected.
18164 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18165 pci_channel_state_t state)
18167 struct net_device *netdev = pci_get_drvdata(pdev);
18168 struct tg3 *tp = netdev_priv(netdev);
18169 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18171 netdev_info(netdev, "PCI I/O error detected\n");
18173 rtnl_lock();
18175 /* We probably don't have netdev yet */
18176 if (!netdev || !netif_running(netdev))
18177 goto done;
18179 /* We needn't recover from permanent error */
18180 if (state == pci_channel_io_frozen)
18181 tp->pcierr_recovery = true;
18183 tg3_phy_stop(tp);
18185 tg3_netif_stop(tp);
18187 tg3_timer_stop(tp);
18189 /* Want to make sure that the reset task doesn't run */
18190 tg3_reset_task_cancel(tp);
18192 netif_device_detach(netdev);
18194 /* Clean up software state, even if MMIO is blocked */
18195 tg3_full_lock(tp, 0);
18196 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18197 tg3_full_unlock(tp);
18199 done:
18200 if (state == pci_channel_io_perm_failure) {
18201 if (netdev) {
18202 tg3_napi_enable(tp);
18203 dev_close(netdev);
18205 err = PCI_ERS_RESULT_DISCONNECT;
18206 } else {
18207 pci_disable_device(pdev);
18210 rtnl_unlock();
18212 return err;
18216 * tg3_io_slot_reset - called after the pci bus has been reset.
18217 * @pdev: Pointer to PCI device
18219 * Restart the card from scratch, as if from a cold-boot.
18220 * At this point, the card has exprienced a hard reset,
18221 * followed by fixups by BIOS, and has its config space
18222 * set up identically to what it was at cold boot.
18224 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18226 struct net_device *netdev = pci_get_drvdata(pdev);
18227 struct tg3 *tp = netdev_priv(netdev);
18228 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18229 int err;
18231 rtnl_lock();
18233 if (pci_enable_device(pdev)) {
18234 dev_err(&pdev->dev,
18235 "Cannot re-enable PCI device after reset.\n");
18236 goto done;
18239 pci_set_master(pdev);
18240 pci_restore_state(pdev);
18241 pci_save_state(pdev);
18243 if (!netdev || !netif_running(netdev)) {
18244 rc = PCI_ERS_RESULT_RECOVERED;
18245 goto done;
18248 err = tg3_power_up(tp);
18249 if (err)
18250 goto done;
18252 rc = PCI_ERS_RESULT_RECOVERED;
18254 done:
18255 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18256 tg3_napi_enable(tp);
18257 dev_close(netdev);
18259 rtnl_unlock();
18261 return rc;
18265 * tg3_io_resume - called when traffic can start flowing again.
18266 * @pdev: Pointer to PCI device
18268 * This callback is called when the error recovery driver tells
18269 * us that its OK to resume normal operation.
18271 static void tg3_io_resume(struct pci_dev *pdev)
18273 struct net_device *netdev = pci_get_drvdata(pdev);
18274 struct tg3 *tp = netdev_priv(netdev);
18275 int err;
18277 rtnl_lock();
18279 if (!netdev || !netif_running(netdev))
18280 goto done;
18282 tg3_full_lock(tp, 0);
18283 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18284 tg3_flag_set(tp, INIT_COMPLETE);
18285 err = tg3_restart_hw(tp, true);
18286 if (err) {
18287 tg3_full_unlock(tp);
18288 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18289 goto done;
18292 netif_device_attach(netdev);
18294 tg3_timer_start(tp);
18296 tg3_netif_start(tp);
18298 tg3_full_unlock(tp);
18300 tg3_phy_start(tp);
18302 done:
18303 tp->pcierr_recovery = false;
18304 rtnl_unlock();
18307 static const struct pci_error_handlers tg3_err_handler = {
18308 .error_detected = tg3_io_error_detected,
18309 .slot_reset = tg3_io_slot_reset,
18310 .resume = tg3_io_resume
18313 static struct pci_driver tg3_driver = {
18314 .name = DRV_MODULE_NAME,
18315 .id_table = tg3_pci_tbl,
18316 .probe = tg3_init_one,
18317 .remove = tg3_remove_one,
18318 .err_handler = &tg3_err_handler,
18319 .driver.pm = &tg3_pm_ops,
18320 .shutdown = tg3_shutdown,
18323 module_pci_driver(tg3_driver);