tg3: Convert to use hwmon_device_register_with_groups
[linux-2.6/btrfs-unstable.git] / drivers / net / ethernet / broadcom / tg3.c
blob369b736dde0533740a01718fd2d271fa7f10ad60
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
52 #include <net/ip.h>
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
66 #define BAR_0 0
67 #define BAR_2 2
69 #include "tg3.h"
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
80 set_bit(flag, bits);
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 134
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "Sep 16, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
358 static const struct {
359 const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
361 { "rx_octets" },
362 { "rx_fragments" },
363 { "rx_ucast_packets" },
364 { "rx_mcast_packets" },
365 { "rx_bcast_packets" },
366 { "rx_fcs_errors" },
367 { "rx_align_errors" },
368 { "rx_xon_pause_rcvd" },
369 { "rx_xoff_pause_rcvd" },
370 { "rx_mac_ctrl_rcvd" },
371 { "rx_xoff_entered" },
372 { "rx_frame_too_long_errors" },
373 { "rx_jabbers" },
374 { "rx_undersize_packets" },
375 { "rx_in_length_errors" },
376 { "rx_out_length_errors" },
377 { "rx_64_or_less_octet_packets" },
378 { "rx_65_to_127_octet_packets" },
379 { "rx_128_to_255_octet_packets" },
380 { "rx_256_to_511_octet_packets" },
381 { "rx_512_to_1023_octet_packets" },
382 { "rx_1024_to_1522_octet_packets" },
383 { "rx_1523_to_2047_octet_packets" },
384 { "rx_2048_to_4095_octet_packets" },
385 { "rx_4096_to_8191_octet_packets" },
386 { "rx_8192_to_9022_octet_packets" },
388 { "tx_octets" },
389 { "tx_collisions" },
391 { "tx_xon_sent" },
392 { "tx_xoff_sent" },
393 { "tx_flow_control" },
394 { "tx_mac_errors" },
395 { "tx_single_collisions" },
396 { "tx_mult_collisions" },
397 { "tx_deferred" },
398 { "tx_excessive_collisions" },
399 { "tx_late_collisions" },
400 { "tx_collide_2times" },
401 { "tx_collide_3times" },
402 { "tx_collide_4times" },
403 { "tx_collide_5times" },
404 { "tx_collide_6times" },
405 { "tx_collide_7times" },
406 { "tx_collide_8times" },
407 { "tx_collide_9times" },
408 { "tx_collide_10times" },
409 { "tx_collide_11times" },
410 { "tx_collide_12times" },
411 { "tx_collide_13times" },
412 { "tx_collide_14times" },
413 { "tx_collide_15times" },
414 { "tx_ucast_packets" },
415 { "tx_mcast_packets" },
416 { "tx_bcast_packets" },
417 { "tx_carrier_sense_errors" },
418 { "tx_discards" },
419 { "tx_errors" },
421 { "dma_writeq_full" },
422 { "dma_write_prioq_full" },
423 { "rxbds_empty" },
424 { "rx_discards" },
425 { "rx_errors" },
426 { "rx_threshold_hit" },
428 { "dma_readq_full" },
429 { "dma_read_prioq_full" },
430 { "tx_comp_queue_full" },
432 { "ring_set_send_prod_index" },
433 { "ring_status_update" },
434 { "nic_irqs" },
435 { "nic_avoided_irqs" },
436 { "nic_tx_threshold_hit" },
438 { "mbuf_lwm_thresh_hit" },
441 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST 0
443 #define TG3_LINK_TEST 1
444 #define TG3_REGISTER_TEST 2
445 #define TG3_MEMORY_TEST 3
446 #define TG3_MAC_LOOPB_TEST 4
447 #define TG3_PHY_LOOPB_TEST 5
448 #define TG3_EXT_LOOPB_TEST 6
449 #define TG3_INTERRUPT_TEST 7
452 static const struct {
453 const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455 [TG3_NVRAM_TEST] = { "nvram test (online) " },
456 [TG3_LINK_TEST] = { "link test (online) " },
457 [TG3_REGISTER_TEST] = { "register test (offline)" },
458 [TG3_MEMORY_TEST] = { "memory test (offline)" },
459 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
460 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
461 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
462 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
465 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
470 writel(val, tp->regs + off);
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
475 return readl(tp->regs + off);
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
480 writel(val, tp->aperegs + off);
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
485 return readl(tp->aperegs + off);
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
490 unsigned long flags;
492 spin_lock_irqsave(&tp->indirect_lock, flags);
493 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
500 writel(val, tp->regs + off);
501 readl(tp->regs + off);
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
506 unsigned long flags;
507 u32 val;
509 spin_lock_irqsave(&tp->indirect_lock, flags);
510 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512 spin_unlock_irqrestore(&tp->indirect_lock, flags);
513 return val;
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
518 unsigned long flags;
520 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
525 if (off == TG3_RX_STD_PROD_IDX_REG) {
526 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527 TG3_64BIT_REG_LOW, val);
528 return;
531 spin_lock_irqsave(&tp->indirect_lock, flags);
532 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534 spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 /* In indirect mode when disabling interrupts, we also need
537 * to clear the interrupt bit in the GRC local ctrl register.
539 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
540 (val == 0x1)) {
541 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
548 unsigned long flags;
549 u32 val;
551 spin_lock_irqsave(&tp->indirect_lock, flags);
552 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554 spin_unlock_irqrestore(&tp->indirect_lock, flags);
555 return val;
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559 * where it is unsafe to read back the register without some delay.
560 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
565 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566 /* Non-posted methods */
567 tp->write32(tp, off, val);
568 else {
569 /* Posted method */
570 tg3_write32(tp, off, val);
571 if (usec_wait)
572 udelay(usec_wait);
573 tp->read32(tp, off);
575 /* Wait again after the read for the posted method to guarantee that
576 * the wait time is met.
578 if (usec_wait)
579 udelay(usec_wait);
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
584 tp->write32_mbox(tp, off, val);
585 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587 !tg3_flag(tp, ICH_WORKAROUND)))
588 tp->read32_mbox(tp, off);
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
593 void __iomem *mbox = tp->regs + off;
594 writel(val, mbox);
595 if (tg3_flag(tp, TXD_MBOX_HWBUG))
596 writel(val, mbox);
597 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598 tg3_flag(tp, FLUSH_POSTED_WRITES))
599 readl(mbox);
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
604 return readl(tp->regs + off + GRCMBOX_BASE);
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
609 writel(val, tp->regs + off + GRCMBOX_BASE);
612 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
618 #define tw32(reg, val) tp->write32(tp, reg, val)
619 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg) tp->read32(tp, reg)
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
625 unsigned long flags;
627 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
629 return;
631 spin_lock_irqsave(&tp->indirect_lock, flags);
632 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
636 /* Always leave this as zero. */
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 } else {
639 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640 tw32_f(TG3PCI_MEM_WIN_DATA, val);
642 /* Always leave this as zero. */
643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
645 spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
650 unsigned long flags;
652 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
654 *val = 0;
655 return;
658 spin_lock_irqsave(&tp->indirect_lock, flags);
659 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
663 /* Always leave this as zero. */
664 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 } else {
666 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667 *val = tr32(TG3PCI_MEM_WIN_DATA);
669 /* Always leave this as zero. */
670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
672 spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 static void tg3_ape_lock_init(struct tg3 *tp)
677 int i;
678 u32 regbase, bit;
680 if (tg3_asic_rev(tp) == ASIC_REV_5761)
681 regbase = TG3_APE_LOCK_GRANT;
682 else
683 regbase = TG3_APE_PER_LOCK_GRANT;
685 /* Make sure the driver hasn't any stale locks. */
686 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
687 switch (i) {
688 case TG3_APE_LOCK_PHY0:
689 case TG3_APE_LOCK_PHY1:
690 case TG3_APE_LOCK_PHY2:
691 case TG3_APE_LOCK_PHY3:
692 bit = APE_LOCK_GRANT_DRIVER;
693 break;
694 default:
695 if (!tp->pci_fn)
696 bit = APE_LOCK_GRANT_DRIVER;
697 else
698 bit = 1 << tp->pci_fn;
700 tg3_ape_write32(tp, regbase + 4 * i, bit);
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
707 int i, off;
708 int ret = 0;
709 u32 status, req, gnt, bit;
711 if (!tg3_flag(tp, ENABLE_APE))
712 return 0;
714 switch (locknum) {
715 case TG3_APE_LOCK_GPIO:
716 if (tg3_asic_rev(tp) == ASIC_REV_5761)
717 return 0;
718 case TG3_APE_LOCK_GRC:
719 case TG3_APE_LOCK_MEM:
720 if (!tp->pci_fn)
721 bit = APE_LOCK_REQ_DRIVER;
722 else
723 bit = 1 << tp->pci_fn;
724 break;
725 case TG3_APE_LOCK_PHY0:
726 case TG3_APE_LOCK_PHY1:
727 case TG3_APE_LOCK_PHY2:
728 case TG3_APE_LOCK_PHY3:
729 bit = APE_LOCK_REQ_DRIVER;
730 break;
731 default:
732 return -EINVAL;
735 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
736 req = TG3_APE_LOCK_REQ;
737 gnt = TG3_APE_LOCK_GRANT;
738 } else {
739 req = TG3_APE_PER_LOCK_REQ;
740 gnt = TG3_APE_PER_LOCK_GRANT;
743 off = 4 * locknum;
745 tg3_ape_write32(tp, req + off, bit);
747 /* Wait for up to 1 millisecond to acquire lock. */
748 for (i = 0; i < 100; i++) {
749 status = tg3_ape_read32(tp, gnt + off);
750 if (status == bit)
751 break;
752 if (pci_channel_offline(tp->pdev))
753 break;
755 udelay(10);
758 if (status != bit) {
759 /* Revoke the lock request. */
760 tg3_ape_write32(tp, gnt + off, bit);
761 ret = -EBUSY;
764 return ret;
767 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
769 u32 gnt, bit;
771 if (!tg3_flag(tp, ENABLE_APE))
772 return;
774 switch (locknum) {
775 case TG3_APE_LOCK_GPIO:
776 if (tg3_asic_rev(tp) == ASIC_REV_5761)
777 return;
778 case TG3_APE_LOCK_GRC:
779 case TG3_APE_LOCK_MEM:
780 if (!tp->pci_fn)
781 bit = APE_LOCK_GRANT_DRIVER;
782 else
783 bit = 1 << tp->pci_fn;
784 break;
785 case TG3_APE_LOCK_PHY0:
786 case TG3_APE_LOCK_PHY1:
787 case TG3_APE_LOCK_PHY2:
788 case TG3_APE_LOCK_PHY3:
789 bit = APE_LOCK_GRANT_DRIVER;
790 break;
791 default:
792 return;
795 if (tg3_asic_rev(tp) == ASIC_REV_5761)
796 gnt = TG3_APE_LOCK_GRANT;
797 else
798 gnt = TG3_APE_PER_LOCK_GRANT;
800 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
803 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
805 u32 apedata;
807 while (timeout_us) {
808 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
809 return -EBUSY;
811 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
812 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
813 break;
815 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
817 udelay(10);
818 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
821 return timeout_us ? 0 : -EBUSY;
824 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
826 u32 i, apedata;
828 for (i = 0; i < timeout_us / 10; i++) {
829 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
831 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
832 break;
834 udelay(10);
837 return i == timeout_us / 10;
840 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
841 u32 len)
843 int err;
844 u32 i, bufoff, msgoff, maxlen, apedata;
846 if (!tg3_flag(tp, APE_HAS_NCSI))
847 return 0;
849 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
850 if (apedata != APE_SEG_SIG_MAGIC)
851 return -ENODEV;
853 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
854 if (!(apedata & APE_FW_STATUS_READY))
855 return -EAGAIN;
857 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
858 TG3_APE_SHMEM_BASE;
859 msgoff = bufoff + 2 * sizeof(u32);
860 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
862 while (len) {
863 u32 length;
865 /* Cap xfer sizes to scratchpad limits. */
866 length = (len > maxlen) ? maxlen : len;
867 len -= length;
869 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
870 if (!(apedata & APE_FW_STATUS_READY))
871 return -EAGAIN;
873 /* Wait for up to 1 msec for APE to service previous event. */
874 err = tg3_ape_event_lock(tp, 1000);
875 if (err)
876 return err;
878 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
879 APE_EVENT_STATUS_SCRTCHPD_READ |
880 APE_EVENT_STATUS_EVENT_PENDING;
881 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
883 tg3_ape_write32(tp, bufoff, base_off);
884 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
886 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
887 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
889 base_off += length;
891 if (tg3_ape_wait_for_event(tp, 30000))
892 return -EAGAIN;
894 for (i = 0; length; i += 4, length -= 4) {
895 u32 val = tg3_ape_read32(tp, msgoff + i);
896 memcpy(data, &val, sizeof(u32));
897 data++;
901 return 0;
904 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
906 int err;
907 u32 apedata;
909 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
910 if (apedata != APE_SEG_SIG_MAGIC)
911 return -EAGAIN;
913 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
914 if (!(apedata & APE_FW_STATUS_READY))
915 return -EAGAIN;
917 /* Wait for up to 1 millisecond for APE to service previous event. */
918 err = tg3_ape_event_lock(tp, 1000);
919 if (err)
920 return err;
922 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
923 event | APE_EVENT_STATUS_EVENT_PENDING);
925 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
926 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
928 return 0;
931 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
933 u32 event;
934 u32 apedata;
936 if (!tg3_flag(tp, ENABLE_APE))
937 return;
939 switch (kind) {
940 case RESET_KIND_INIT:
941 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
942 APE_HOST_SEG_SIG_MAGIC);
943 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
944 APE_HOST_SEG_LEN_MAGIC);
945 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
946 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
947 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
948 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
949 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
950 APE_HOST_BEHAV_NO_PHYLOCK);
951 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
952 TG3_APE_HOST_DRVR_STATE_START);
954 event = APE_EVENT_STATUS_STATE_START;
955 break;
956 case RESET_KIND_SHUTDOWN:
957 /* With the interface we are currently using,
958 * APE does not track driver state. Wiping
959 * out the HOST SEGMENT SIGNATURE forces
960 * the APE to assume OS absent status.
962 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
964 if (device_may_wakeup(&tp->pdev->dev) &&
965 tg3_flag(tp, WOL_ENABLE)) {
966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 TG3_APE_HOST_WOL_SPEED_AUTO);
968 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969 } else
970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
974 event = APE_EVENT_STATUS_STATE_UNLOAD;
975 break;
976 default:
977 return;
980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
982 tg3_ape_send_event(tp, event);
985 static void tg3_disable_ints(struct tg3 *tp)
987 int i;
989 tw32(TG3PCI_MISC_HOST_CTRL,
990 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
991 for (i = 0; i < tp->irq_max; i++)
992 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
995 static void tg3_enable_ints(struct tg3 *tp)
997 int i;
999 tp->irq_sync = 0;
1000 wmb();
1002 tw32(TG3PCI_MISC_HOST_CTRL,
1003 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1005 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1006 for (i = 0; i < tp->irq_cnt; i++) {
1007 struct tg3_napi *tnapi = &tp->napi[i];
1009 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010 if (tg3_flag(tp, 1SHOT_MSI))
1011 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013 tp->coal_now |= tnapi->coal_now;
1016 /* Force an initial interrupt */
1017 if (!tg3_flag(tp, TAGGED_STATUS) &&
1018 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1019 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1020 else
1021 tw32(HOSTCC_MODE, tp->coal_now);
1023 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1026 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1028 struct tg3 *tp = tnapi->tp;
1029 struct tg3_hw_status *sblk = tnapi->hw_status;
1030 unsigned int work_exists = 0;
1032 /* check for phy events */
1033 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1034 if (sblk->status & SD_STATUS_LINK_CHG)
1035 work_exists = 1;
1038 /* check for TX work to do */
1039 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1040 work_exists = 1;
1042 /* check for RX work to do */
1043 if (tnapi->rx_rcb_prod_idx &&
1044 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1045 work_exists = 1;
1047 return work_exists;
1050 /* tg3_int_reenable
1051 * similar to tg3_enable_ints, but it accurately determines whether there
1052 * is new work pending and can return without flushing the PIO write
1053 * which reenables interrupts
1055 static void tg3_int_reenable(struct tg3_napi *tnapi)
1057 struct tg3 *tp = tnapi->tp;
1059 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1060 mmiowb();
1062 /* When doing tagged status, this work check is unnecessary.
1063 * The last_tag we write above tells the chip which piece of
1064 * work we've completed.
1066 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1067 tw32(HOSTCC_MODE, tp->coalesce_mode |
1068 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1071 static void tg3_switch_clocks(struct tg3 *tp)
1073 u32 clock_ctrl;
1074 u32 orig_clock_ctrl;
1076 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1077 return;
1079 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1081 orig_clock_ctrl = clock_ctrl;
1082 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1083 CLOCK_CTRL_CLKRUN_OENABLE |
1084 0x1f);
1085 tp->pci_clock_ctrl = clock_ctrl;
1087 if (tg3_flag(tp, 5705_PLUS)) {
1088 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1089 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1092 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1093 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1094 clock_ctrl |
1095 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1096 40);
1097 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1098 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1099 40);
1101 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1104 #define PHY_BUSY_LOOPS 5000
1106 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1107 u32 *val)
1109 u32 frame_val;
1110 unsigned int loops;
1111 int ret;
1113 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1114 tw32_f(MAC_MI_MODE,
1115 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1116 udelay(80);
1119 tg3_ape_lock(tp, tp->phy_ape_lock);
1121 *val = 0x0;
1123 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1124 MI_COM_PHY_ADDR_MASK);
1125 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1126 MI_COM_REG_ADDR_MASK);
1127 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1129 tw32_f(MAC_MI_COM, frame_val);
1131 loops = PHY_BUSY_LOOPS;
1132 while (loops != 0) {
1133 udelay(10);
1134 frame_val = tr32(MAC_MI_COM);
1136 if ((frame_val & MI_COM_BUSY) == 0) {
1137 udelay(5);
1138 frame_val = tr32(MAC_MI_COM);
1139 break;
1141 loops -= 1;
1144 ret = -EBUSY;
1145 if (loops != 0) {
1146 *val = frame_val & MI_COM_DATA_MASK;
1147 ret = 0;
1150 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1151 tw32_f(MAC_MI_MODE, tp->mi_mode);
1152 udelay(80);
1155 tg3_ape_unlock(tp, tp->phy_ape_lock);
1157 return ret;
1160 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1162 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1165 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1166 u32 val)
1168 u32 frame_val;
1169 unsigned int loops;
1170 int ret;
1172 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1173 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1174 return 0;
1176 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1177 tw32_f(MAC_MI_MODE,
1178 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1179 udelay(80);
1182 tg3_ape_lock(tp, tp->phy_ape_lock);
1184 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1185 MI_COM_PHY_ADDR_MASK);
1186 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1187 MI_COM_REG_ADDR_MASK);
1188 frame_val |= (val & MI_COM_DATA_MASK);
1189 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1191 tw32_f(MAC_MI_COM, frame_val);
1193 loops = PHY_BUSY_LOOPS;
1194 while (loops != 0) {
1195 udelay(10);
1196 frame_val = tr32(MAC_MI_COM);
1197 if ((frame_val & MI_COM_BUSY) == 0) {
1198 udelay(5);
1199 frame_val = tr32(MAC_MI_COM);
1200 break;
1202 loops -= 1;
1205 ret = -EBUSY;
1206 if (loops != 0)
1207 ret = 0;
1209 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1210 tw32_f(MAC_MI_MODE, tp->mi_mode);
1211 udelay(80);
1214 tg3_ape_unlock(tp, tp->phy_ape_lock);
1216 return ret;
1219 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1221 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1224 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1226 int err;
1228 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1229 if (err)
1230 goto done;
1232 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1233 if (err)
1234 goto done;
1236 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1237 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1238 if (err)
1239 goto done;
1241 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1243 done:
1244 return err;
1247 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1249 int err;
1251 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1252 if (err)
1253 goto done;
1255 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1256 if (err)
1257 goto done;
1259 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1260 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1261 if (err)
1262 goto done;
1264 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1266 done:
1267 return err;
1270 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1272 int err;
1274 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1275 if (!err)
1276 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1278 return err;
1281 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1283 int err;
1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286 if (!err)
1287 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1289 return err;
1292 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1294 int err;
1296 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1297 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1298 MII_TG3_AUXCTL_SHDWSEL_MISC);
1299 if (!err)
1300 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1302 return err;
1305 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1307 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1308 set |= MII_TG3_AUXCTL_MISC_WREN;
1310 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1313 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1315 u32 val;
1316 int err;
1318 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1320 if (err)
1321 return err;
1323 if (enable)
1324 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325 else
1326 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1328 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1329 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1331 return err;
1334 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1336 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1337 reg | val | MII_TG3_MISC_SHDW_WREN);
1340 static int tg3_bmcr_reset(struct tg3 *tp)
1342 u32 phy_control;
1343 int limit, err;
1345 /* OK, reset it, and poll the BMCR_RESET bit until it
1346 * clears or we time out.
1348 phy_control = BMCR_RESET;
1349 err = tg3_writephy(tp, MII_BMCR, phy_control);
1350 if (err != 0)
1351 return -EBUSY;
1353 limit = 5000;
1354 while (limit--) {
1355 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1356 if (err != 0)
1357 return -EBUSY;
1359 if ((phy_control & BMCR_RESET) == 0) {
1360 udelay(40);
1361 break;
1363 udelay(10);
1365 if (limit < 0)
1366 return -EBUSY;
1368 return 0;
1371 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1373 struct tg3 *tp = bp->priv;
1374 u32 val;
1376 spin_lock_bh(&tp->lock);
1378 if (__tg3_readphy(tp, mii_id, reg, &val))
1379 val = -EIO;
1381 spin_unlock_bh(&tp->lock);
1383 return val;
1386 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1388 struct tg3 *tp = bp->priv;
1389 u32 ret = 0;
1391 spin_lock_bh(&tp->lock);
1393 if (__tg3_writephy(tp, mii_id, reg, val))
1394 ret = -EIO;
1396 spin_unlock_bh(&tp->lock);
1398 return ret;
1401 static int tg3_mdio_reset(struct mii_bus *bp)
1403 return 0;
1406 static void tg3_mdio_config_5785(struct tg3 *tp)
1408 u32 val;
1409 struct phy_device *phydev;
1411 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1412 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1413 case PHY_ID_BCM50610:
1414 case PHY_ID_BCM50610M:
1415 val = MAC_PHYCFG2_50610_LED_MODES;
1416 break;
1417 case PHY_ID_BCMAC131:
1418 val = MAC_PHYCFG2_AC131_LED_MODES;
1419 break;
1420 case PHY_ID_RTL8211C:
1421 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1422 break;
1423 case PHY_ID_RTL8201E:
1424 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1425 break;
1426 default:
1427 return;
1430 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1431 tw32(MAC_PHYCFG2, val);
1433 val = tr32(MAC_PHYCFG1);
1434 val &= ~(MAC_PHYCFG1_RGMII_INT |
1435 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1436 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1437 tw32(MAC_PHYCFG1, val);
1439 return;
1442 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1443 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1444 MAC_PHYCFG2_FMODE_MASK_MASK |
1445 MAC_PHYCFG2_GMODE_MASK_MASK |
1446 MAC_PHYCFG2_ACT_MASK_MASK |
1447 MAC_PHYCFG2_QUAL_MASK_MASK |
1448 MAC_PHYCFG2_INBAND_ENABLE;
1450 tw32(MAC_PHYCFG2, val);
1452 val = tr32(MAC_PHYCFG1);
1453 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1454 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1455 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1456 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1457 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1458 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1459 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1461 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1462 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1463 tw32(MAC_PHYCFG1, val);
1465 val = tr32(MAC_EXT_RGMII_MODE);
1466 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1467 MAC_RGMII_MODE_RX_QUALITY |
1468 MAC_RGMII_MODE_RX_ACTIVITY |
1469 MAC_RGMII_MODE_RX_ENG_DET |
1470 MAC_RGMII_MODE_TX_ENABLE |
1471 MAC_RGMII_MODE_TX_LOWPWR |
1472 MAC_RGMII_MODE_TX_RESET);
1473 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1474 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1475 val |= MAC_RGMII_MODE_RX_INT_B |
1476 MAC_RGMII_MODE_RX_QUALITY |
1477 MAC_RGMII_MODE_RX_ACTIVITY |
1478 MAC_RGMII_MODE_RX_ENG_DET;
1479 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1480 val |= MAC_RGMII_MODE_TX_ENABLE |
1481 MAC_RGMII_MODE_TX_LOWPWR |
1482 MAC_RGMII_MODE_TX_RESET;
1484 tw32(MAC_EXT_RGMII_MODE, val);
1487 static void tg3_mdio_start(struct tg3 *tp)
1489 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1490 tw32_f(MAC_MI_MODE, tp->mi_mode);
1491 udelay(80);
1493 if (tg3_flag(tp, MDIOBUS_INITED) &&
1494 tg3_asic_rev(tp) == ASIC_REV_5785)
1495 tg3_mdio_config_5785(tp);
1498 static int tg3_mdio_init(struct tg3 *tp)
1500 int i;
1501 u32 reg;
1502 struct phy_device *phydev;
1504 if (tg3_flag(tp, 5717_PLUS)) {
1505 u32 is_serdes;
1507 tp->phy_addr = tp->pci_fn + 1;
1509 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1510 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1511 else
1512 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1513 TG3_CPMU_PHY_STRAP_IS_SERDES;
1514 if (is_serdes)
1515 tp->phy_addr += 7;
1516 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1517 int addr;
1519 addr = ssb_gige_get_phyaddr(tp->pdev);
1520 if (addr < 0)
1521 return addr;
1522 tp->phy_addr = addr;
1523 } else
1524 tp->phy_addr = TG3_PHY_MII_ADDR;
1526 tg3_mdio_start(tp);
1528 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1529 return 0;
1531 tp->mdio_bus = mdiobus_alloc();
1532 if (tp->mdio_bus == NULL)
1533 return -ENOMEM;
1535 tp->mdio_bus->name = "tg3 mdio bus";
1536 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1537 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1538 tp->mdio_bus->priv = tp;
1539 tp->mdio_bus->parent = &tp->pdev->dev;
1540 tp->mdio_bus->read = &tg3_mdio_read;
1541 tp->mdio_bus->write = &tg3_mdio_write;
1542 tp->mdio_bus->reset = &tg3_mdio_reset;
1543 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1544 tp->mdio_bus->irq = &tp->mdio_irq[0];
1546 for (i = 0; i < PHY_MAX_ADDR; i++)
1547 tp->mdio_bus->irq[i] = PHY_POLL;
1549 /* The bus registration will look for all the PHYs on the mdio bus.
1550 * Unfortunately, it does not ensure the PHY is powered up before
1551 * accessing the PHY ID registers. A chip reset is the
1552 * quickest way to bring the device back to an operational state..
1554 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1555 tg3_bmcr_reset(tp);
1557 i = mdiobus_register(tp->mdio_bus);
1558 if (i) {
1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 mdiobus_free(tp->mdio_bus);
1561 return i;
1564 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1566 if (!phydev || !phydev->drv) {
1567 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 mdiobus_unregister(tp->mdio_bus);
1569 mdiobus_free(tp->mdio_bus);
1570 return -ENODEV;
1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 case PHY_ID_BCM57780:
1575 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 break;
1578 case PHY_ID_BCM50610:
1579 case PHY_ID_BCM50610M:
1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 PHY_BRCM_RX_REFCLK_UNUSED |
1582 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1585 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1586 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1587 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1588 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1589 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1590 /* fallthru */
1591 case PHY_ID_RTL8211C:
1592 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1593 break;
1594 case PHY_ID_RTL8201E:
1595 case PHY_ID_BCMAC131:
1596 phydev->interface = PHY_INTERFACE_MODE_MII;
1597 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1598 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1599 break;
1602 tg3_flag_set(tp, MDIOBUS_INITED);
1604 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1605 tg3_mdio_config_5785(tp);
1607 return 0;
1610 static void tg3_mdio_fini(struct tg3 *tp)
1612 if (tg3_flag(tp, MDIOBUS_INITED)) {
1613 tg3_flag_clear(tp, MDIOBUS_INITED);
1614 mdiobus_unregister(tp->mdio_bus);
1615 mdiobus_free(tp->mdio_bus);
1619 /* tp->lock is held. */
1620 static inline void tg3_generate_fw_event(struct tg3 *tp)
1622 u32 val;
1624 val = tr32(GRC_RX_CPU_EVENT);
1625 val |= GRC_RX_CPU_DRIVER_EVENT;
1626 tw32_f(GRC_RX_CPU_EVENT, val);
1628 tp->last_event_jiffies = jiffies;
1631 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1633 /* tp->lock is held. */
1634 static void tg3_wait_for_event_ack(struct tg3 *tp)
1636 int i;
1637 unsigned int delay_cnt;
1638 long time_remain;
1640 /* If enough time has passed, no wait is necessary. */
1641 time_remain = (long)(tp->last_event_jiffies + 1 +
1642 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1643 (long)jiffies;
1644 if (time_remain < 0)
1645 return;
1647 /* Check if we can shorten the wait time. */
1648 delay_cnt = jiffies_to_usecs(time_remain);
1649 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1650 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1651 delay_cnt = (delay_cnt >> 3) + 1;
1653 for (i = 0; i < delay_cnt; i++) {
1654 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1655 break;
1656 if (pci_channel_offline(tp->pdev))
1657 break;
1659 udelay(8);
1663 /* tp->lock is held. */
1664 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1666 u32 reg, val;
1668 val = 0;
1669 if (!tg3_readphy(tp, MII_BMCR, &reg))
1670 val = reg << 16;
1671 if (!tg3_readphy(tp, MII_BMSR, &reg))
1672 val |= (reg & 0xffff);
1673 *data++ = val;
1675 val = 0;
1676 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1677 val = reg << 16;
1678 if (!tg3_readphy(tp, MII_LPA, &reg))
1679 val |= (reg & 0xffff);
1680 *data++ = val;
1682 val = 0;
1683 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1684 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1685 val = reg << 16;
1686 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1687 val |= (reg & 0xffff);
1689 *data++ = val;
1691 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1692 val = reg << 16;
1693 else
1694 val = 0;
1695 *data++ = val;
1698 /* tp->lock is held. */
1699 static void tg3_ump_link_report(struct tg3 *tp)
1701 u32 data[4];
1703 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1704 return;
1706 tg3_phy_gather_ump_data(tp, data);
1708 tg3_wait_for_event_ack(tp);
1710 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1711 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1712 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1713 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1714 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1717 tg3_generate_fw_event(tp);
1720 /* tp->lock is held. */
1721 static void tg3_stop_fw(struct tg3 *tp)
1723 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1724 /* Wait for RX cpu to ACK the previous event. */
1725 tg3_wait_for_event_ack(tp);
1727 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1729 tg3_generate_fw_event(tp);
1731 /* Wait for RX cpu to ACK this event. */
1732 tg3_wait_for_event_ack(tp);
1736 /* tp->lock is held. */
1737 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1739 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1740 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1742 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1743 switch (kind) {
1744 case RESET_KIND_INIT:
1745 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 DRV_STATE_START);
1747 break;
1749 case RESET_KIND_SHUTDOWN:
1750 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 DRV_STATE_UNLOAD);
1752 break;
1754 case RESET_KIND_SUSPEND:
1755 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1756 DRV_STATE_SUSPEND);
1757 break;
1759 default:
1760 break;
1765 /* tp->lock is held. */
1766 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1768 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1769 switch (kind) {
1770 case RESET_KIND_INIT:
1771 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 DRV_STATE_START_DONE);
1773 break;
1775 case RESET_KIND_SHUTDOWN:
1776 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 DRV_STATE_UNLOAD_DONE);
1778 break;
1780 default:
1781 break;
1786 /* tp->lock is held. */
1787 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1789 if (tg3_flag(tp, ENABLE_ASF)) {
1790 switch (kind) {
1791 case RESET_KIND_INIT:
1792 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 DRV_STATE_START);
1794 break;
1796 case RESET_KIND_SHUTDOWN:
1797 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 DRV_STATE_UNLOAD);
1799 break;
1801 case RESET_KIND_SUSPEND:
1802 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1803 DRV_STATE_SUSPEND);
1804 break;
1806 default:
1807 break;
1812 static int tg3_poll_fw(struct tg3 *tp)
1814 int i;
1815 u32 val;
1817 if (tg3_flag(tp, NO_FWARE_REPORTED))
1818 return 0;
1820 if (tg3_flag(tp, IS_SSB_CORE)) {
1821 /* We don't use firmware. */
1822 return 0;
1825 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1826 /* Wait up to 20ms for init done. */
1827 for (i = 0; i < 200; i++) {
1828 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1829 return 0;
1830 if (pci_channel_offline(tp->pdev))
1831 return -ENODEV;
1833 udelay(100);
1835 return -ENODEV;
1838 /* Wait for firmware initialization to complete. */
1839 for (i = 0; i < 100000; i++) {
1840 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1841 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1842 break;
1843 if (pci_channel_offline(tp->pdev)) {
1844 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1845 tg3_flag_set(tp, NO_FWARE_REPORTED);
1846 netdev_info(tp->dev, "No firmware running\n");
1849 break;
1852 udelay(10);
1855 /* Chip might not be fitted with firmware. Some Sun onboard
1856 * parts are configured like that. So don't signal the timeout
1857 * of the above loop as an error, but do report the lack of
1858 * running firmware once.
1860 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1861 tg3_flag_set(tp, NO_FWARE_REPORTED);
1863 netdev_info(tp->dev, "No firmware running\n");
1866 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1867 /* The 57765 A0 needs a little more
1868 * time to do some important work.
1870 mdelay(10);
1873 return 0;
1876 static void tg3_link_report(struct tg3 *tp)
1878 if (!netif_carrier_ok(tp->dev)) {
1879 netif_info(tp, link, tp->dev, "Link is down\n");
1880 tg3_ump_link_report(tp);
1881 } else if (netif_msg_link(tp)) {
1882 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1883 (tp->link_config.active_speed == SPEED_1000 ?
1884 1000 :
1885 (tp->link_config.active_speed == SPEED_100 ?
1886 100 : 10)),
1887 (tp->link_config.active_duplex == DUPLEX_FULL ?
1888 "full" : "half"));
1890 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1891 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1892 "on" : "off",
1893 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1894 "on" : "off");
1896 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1897 netdev_info(tp->dev, "EEE is %s\n",
1898 tp->setlpicnt ? "enabled" : "disabled");
1900 tg3_ump_link_report(tp);
1903 tp->link_up = netif_carrier_ok(tp->dev);
1906 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1908 u32 flowctrl = 0;
1910 if (adv & ADVERTISE_PAUSE_CAP) {
1911 flowctrl |= FLOW_CTRL_RX;
1912 if (!(adv & ADVERTISE_PAUSE_ASYM))
1913 flowctrl |= FLOW_CTRL_TX;
1914 } else if (adv & ADVERTISE_PAUSE_ASYM)
1915 flowctrl |= FLOW_CTRL_TX;
1917 return flowctrl;
1920 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1922 u16 miireg;
1924 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1925 miireg = ADVERTISE_1000XPAUSE;
1926 else if (flow_ctrl & FLOW_CTRL_TX)
1927 miireg = ADVERTISE_1000XPSE_ASYM;
1928 else if (flow_ctrl & FLOW_CTRL_RX)
1929 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1930 else
1931 miireg = 0;
1933 return miireg;
1936 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1938 u32 flowctrl = 0;
1940 if (adv & ADVERTISE_1000XPAUSE) {
1941 flowctrl |= FLOW_CTRL_RX;
1942 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1943 flowctrl |= FLOW_CTRL_TX;
1944 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1945 flowctrl |= FLOW_CTRL_TX;
1947 return flowctrl;
1950 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1952 u8 cap = 0;
1954 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1955 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1956 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1957 if (lcladv & ADVERTISE_1000XPAUSE)
1958 cap = FLOW_CTRL_RX;
1959 if (rmtadv & ADVERTISE_1000XPAUSE)
1960 cap = FLOW_CTRL_TX;
1963 return cap;
1966 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1968 u8 autoneg;
1969 u8 flowctrl = 0;
1970 u32 old_rx_mode = tp->rx_mode;
1971 u32 old_tx_mode = tp->tx_mode;
1973 if (tg3_flag(tp, USE_PHYLIB))
1974 autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1975 else
1976 autoneg = tp->link_config.autoneg;
1978 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1979 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1980 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1981 else
1982 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1983 } else
1984 flowctrl = tp->link_config.flowctrl;
1986 tp->link_config.active_flowctrl = flowctrl;
1988 if (flowctrl & FLOW_CTRL_RX)
1989 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1990 else
1991 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1993 if (old_rx_mode != tp->rx_mode)
1994 tw32_f(MAC_RX_MODE, tp->rx_mode);
1996 if (flowctrl & FLOW_CTRL_TX)
1997 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1998 else
1999 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2001 if (old_tx_mode != tp->tx_mode)
2002 tw32_f(MAC_TX_MODE, tp->tx_mode);
2005 static void tg3_adjust_link(struct net_device *dev)
2007 u8 oldflowctrl, linkmesg = 0;
2008 u32 mac_mode, lcl_adv, rmt_adv;
2009 struct tg3 *tp = netdev_priv(dev);
2010 struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2012 spin_lock_bh(&tp->lock);
2014 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2015 MAC_MODE_HALF_DUPLEX);
2017 oldflowctrl = tp->link_config.active_flowctrl;
2019 if (phydev->link) {
2020 lcl_adv = 0;
2021 rmt_adv = 0;
2023 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2024 mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 else if (phydev->speed == SPEED_1000 ||
2026 tg3_asic_rev(tp) != ASIC_REV_5785)
2027 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028 else
2029 mac_mode |= MAC_MODE_PORT_MODE_MII;
2031 if (phydev->duplex == DUPLEX_HALF)
2032 mac_mode |= MAC_MODE_HALF_DUPLEX;
2033 else {
2034 lcl_adv = mii_advertise_flowctrl(
2035 tp->link_config.flowctrl);
2037 if (phydev->pause)
2038 rmt_adv = LPA_PAUSE_CAP;
2039 if (phydev->asym_pause)
2040 rmt_adv |= LPA_PAUSE_ASYM;
2043 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2044 } else
2045 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2047 if (mac_mode != tp->mac_mode) {
2048 tp->mac_mode = mac_mode;
2049 tw32_f(MAC_MODE, tp->mac_mode);
2050 udelay(40);
2053 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2054 if (phydev->speed == SPEED_10)
2055 tw32(MAC_MI_STAT,
2056 MAC_MI_STAT_10MBPS_MODE |
2057 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2058 else
2059 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2062 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2063 tw32(MAC_TX_LENGTHS,
2064 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 (6 << TX_LENGTHS_IPG_SHIFT) |
2066 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067 else
2068 tw32(MAC_TX_LENGTHS,
2069 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2070 (6 << TX_LENGTHS_IPG_SHIFT) |
2071 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2073 if (phydev->link != tp->old_link ||
2074 phydev->speed != tp->link_config.active_speed ||
2075 phydev->duplex != tp->link_config.active_duplex ||
2076 oldflowctrl != tp->link_config.active_flowctrl)
2077 linkmesg = 1;
2079 tp->old_link = phydev->link;
2080 tp->link_config.active_speed = phydev->speed;
2081 tp->link_config.active_duplex = phydev->duplex;
2083 spin_unlock_bh(&tp->lock);
2085 if (linkmesg)
2086 tg3_link_report(tp);
2089 static int tg3_phy_init(struct tg3 *tp)
2091 struct phy_device *phydev;
2093 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2094 return 0;
2096 /* Bring the PHY back to a known state. */
2097 tg3_bmcr_reset(tp);
2099 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2101 /* Attach the MAC to the PHY. */
2102 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2103 tg3_adjust_link, phydev->interface);
2104 if (IS_ERR(phydev)) {
2105 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2106 return PTR_ERR(phydev);
2109 /* Mask with MAC supported features. */
2110 switch (phydev->interface) {
2111 case PHY_INTERFACE_MODE_GMII:
2112 case PHY_INTERFACE_MODE_RGMII:
2113 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2114 phydev->supported &= (PHY_GBIT_FEATURES |
2115 SUPPORTED_Pause |
2116 SUPPORTED_Asym_Pause);
2117 break;
2119 /* fallthru */
2120 case PHY_INTERFACE_MODE_MII:
2121 phydev->supported &= (PHY_BASIC_FEATURES |
2122 SUPPORTED_Pause |
2123 SUPPORTED_Asym_Pause);
2124 break;
2125 default:
2126 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2127 return -EINVAL;
2130 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2132 phydev->advertising = phydev->supported;
2134 return 0;
2137 static void tg3_phy_start(struct tg3 *tp)
2139 struct phy_device *phydev;
2141 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2142 return;
2144 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2146 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2147 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2148 phydev->speed = tp->link_config.speed;
2149 phydev->duplex = tp->link_config.duplex;
2150 phydev->autoneg = tp->link_config.autoneg;
2151 phydev->advertising = tp->link_config.advertising;
2154 phy_start(phydev);
2156 phy_start_aneg(phydev);
2159 static void tg3_phy_stop(struct tg3 *tp)
2161 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2162 return;
2164 phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2167 static void tg3_phy_fini(struct tg3 *tp)
2169 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2170 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2171 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2175 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2177 int err;
2178 u32 val;
2180 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2181 return 0;
2183 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2184 /* Cannot do read-modify-write on 5401 */
2185 err = tg3_phy_auxctl_write(tp,
2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2187 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2188 0x4c20);
2189 goto done;
2192 err = tg3_phy_auxctl_read(tp,
2193 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2194 if (err)
2195 return err;
2197 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2198 err = tg3_phy_auxctl_write(tp,
2199 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2201 done:
2202 return err;
2205 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2207 u32 phytest;
2209 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2210 u32 phy;
2212 tg3_writephy(tp, MII_TG3_FET_TEST,
2213 phytest | MII_TG3_FET_SHADOW_EN);
2214 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2215 if (enable)
2216 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217 else
2218 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2219 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2221 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2225 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2227 u32 reg;
2229 if (!tg3_flag(tp, 5705_PLUS) ||
2230 (tg3_flag(tp, 5717_PLUS) &&
2231 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2232 return;
2234 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2235 tg3_phy_fet_toggle_apd(tp, enable);
2236 return;
2239 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2240 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2241 MII_TG3_MISC_SHDW_SCR5_SDTL |
2242 MII_TG3_MISC_SHDW_SCR5_C125OE;
2243 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2244 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2249 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2250 if (enable)
2251 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2253 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2256 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2258 u32 phy;
2260 if (!tg3_flag(tp, 5705_PLUS) ||
2261 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2262 return;
2264 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2265 u32 ephy;
2267 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2268 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2270 tg3_writephy(tp, MII_TG3_FET_TEST,
2271 ephy | MII_TG3_FET_SHADOW_EN);
2272 if (!tg3_readphy(tp, reg, &phy)) {
2273 if (enable)
2274 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275 else
2276 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2277 tg3_writephy(tp, reg, phy);
2279 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2281 } else {
2282 int ret;
2284 ret = tg3_phy_auxctl_read(tp,
2285 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2286 if (!ret) {
2287 if (enable)
2288 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289 else
2290 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2291 tg3_phy_auxctl_write(tp,
2292 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2297 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2299 int ret;
2300 u32 val;
2302 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2303 return;
2305 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2306 if (!ret)
2307 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2308 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2311 static void tg3_phy_apply_otp(struct tg3 *tp)
2313 u32 otp, phy;
2315 if (!tp->phy_otp)
2316 return;
2318 otp = tp->phy_otp;
2320 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2321 return;
2323 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2324 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2325 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2327 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2328 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2331 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2332 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2333 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2335 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2338 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2339 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2341 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2342 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2343 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2345 tg3_phy_toggle_auxctl_smdsp(tp, false);
2348 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2350 u32 val;
2351 struct ethtool_eee *dest = &tp->eee;
2353 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2354 return;
2356 if (eee)
2357 dest = eee;
2359 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2360 return;
2362 /* Pull eee_active */
2363 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2364 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2365 dest->eee_active = 1;
2366 } else
2367 dest->eee_active = 0;
2369 /* Pull lp advertised settings */
2370 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2371 return;
2372 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2374 /* Pull advertised and eee_enabled settings */
2375 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2376 return;
2377 dest->eee_enabled = !!val;
2378 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2380 /* Pull tx_lpi_enabled */
2381 val = tr32(TG3_CPMU_EEE_MODE);
2382 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2384 /* Pull lpi timer value */
2385 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2388 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2390 u32 val;
2392 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2393 return;
2395 tp->setlpicnt = 0;
2397 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2398 current_link_up &&
2399 tp->link_config.active_duplex == DUPLEX_FULL &&
2400 (tp->link_config.active_speed == SPEED_100 ||
2401 tp->link_config.active_speed == SPEED_1000)) {
2402 u32 eeectl;
2404 if (tp->link_config.active_speed == SPEED_1000)
2405 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2406 else
2407 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2409 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2411 tg3_eee_pull_config(tp, NULL);
2412 if (tp->eee.eee_active)
2413 tp->setlpicnt = 2;
2416 if (!tp->setlpicnt) {
2417 if (current_link_up &&
2418 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2419 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2420 tg3_phy_toggle_auxctl_smdsp(tp, false);
2423 val = tr32(TG3_CPMU_EEE_MODE);
2424 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2428 static void tg3_phy_eee_enable(struct tg3 *tp)
2430 u32 val;
2432 if (tp->link_config.active_speed == SPEED_1000 &&
2433 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2434 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2435 tg3_flag(tp, 57765_CLASS)) &&
2436 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2437 val = MII_TG3_DSP_TAP26_ALNOKO |
2438 MII_TG3_DSP_TAP26_RMRXSTO;
2439 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2440 tg3_phy_toggle_auxctl_smdsp(tp, false);
2443 val = tr32(TG3_CPMU_EEE_MODE);
2444 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2447 static int tg3_wait_macro_done(struct tg3 *tp)
2449 int limit = 100;
2451 while (limit--) {
2452 u32 tmp32;
2454 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2455 if ((tmp32 & 0x1000) == 0)
2456 break;
2459 if (limit < 0)
2460 return -EBUSY;
2462 return 0;
2465 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2467 static const u32 test_pat[4][6] = {
2468 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2469 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2470 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2471 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2473 int chan;
2475 for (chan = 0; chan < 4; chan++) {
2476 int i;
2478 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479 (chan * 0x2000) | 0x0200);
2480 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2482 for (i = 0; i < 6; i++)
2483 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2484 test_pat[chan][i]);
2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2487 if (tg3_wait_macro_done(tp)) {
2488 *resetp = 1;
2489 return -EBUSY;
2492 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2493 (chan * 0x2000) | 0x0200);
2494 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2495 if (tg3_wait_macro_done(tp)) {
2496 *resetp = 1;
2497 return -EBUSY;
2500 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2501 if (tg3_wait_macro_done(tp)) {
2502 *resetp = 1;
2503 return -EBUSY;
2506 for (i = 0; i < 6; i += 2) {
2507 u32 low, high;
2509 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2510 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2511 tg3_wait_macro_done(tp)) {
2512 *resetp = 1;
2513 return -EBUSY;
2515 low &= 0x7fff;
2516 high &= 0x000f;
2517 if (low != test_pat[chan][i] ||
2518 high != test_pat[chan][i+1]) {
2519 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2520 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2521 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2523 return -EBUSY;
2528 return 0;
2531 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2533 int chan;
2535 for (chan = 0; chan < 4; chan++) {
2536 int i;
2538 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2539 (chan * 0x2000) | 0x0200);
2540 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2541 for (i = 0; i < 6; i++)
2542 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2543 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2544 if (tg3_wait_macro_done(tp))
2545 return -EBUSY;
2548 return 0;
2551 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2553 u32 reg32, phy9_orig;
2554 int retries, do_phy_reset, err;
2556 retries = 10;
2557 do_phy_reset = 1;
2558 do {
2559 if (do_phy_reset) {
2560 err = tg3_bmcr_reset(tp);
2561 if (err)
2562 return err;
2563 do_phy_reset = 0;
2566 /* Disable transmitter and interrupt. */
2567 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2568 continue;
2570 reg32 |= 0x3000;
2571 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2573 /* Set full-duplex, 1000 mbps. */
2574 tg3_writephy(tp, MII_BMCR,
2575 BMCR_FULLDPLX | BMCR_SPEED1000);
2577 /* Set to master mode. */
2578 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2579 continue;
2581 tg3_writephy(tp, MII_CTRL1000,
2582 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2584 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2585 if (err)
2586 return err;
2588 /* Block the PHY control access. */
2589 tg3_phydsp_write(tp, 0x8005, 0x0800);
2591 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2592 if (!err)
2593 break;
2594 } while (--retries);
2596 err = tg3_phy_reset_chanpat(tp);
2597 if (err)
2598 return err;
2600 tg3_phydsp_write(tp, 0x8005, 0x0000);
2602 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2603 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2605 tg3_phy_toggle_auxctl_smdsp(tp, false);
2607 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2609 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2610 reg32 &= ~0x3000;
2611 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2612 } else if (!err)
2613 err = -EBUSY;
2615 return err;
2618 static void tg3_carrier_off(struct tg3 *tp)
2620 netif_carrier_off(tp->dev);
2621 tp->link_up = false;
2624 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2626 if (tg3_flag(tp, ENABLE_ASF))
2627 netdev_warn(tp->dev,
2628 "Management side-band traffic will be interrupted during phy settings change\n");
2631 /* This will reset the tigon3 PHY if there is no valid
2632 * link unless the FORCE argument is non-zero.
2634 static int tg3_phy_reset(struct tg3 *tp)
2636 u32 val, cpmuctrl;
2637 int err;
2639 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2640 val = tr32(GRC_MISC_CFG);
2641 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2642 udelay(40);
2644 err = tg3_readphy(tp, MII_BMSR, &val);
2645 err |= tg3_readphy(tp, MII_BMSR, &val);
2646 if (err != 0)
2647 return -EBUSY;
2649 if (netif_running(tp->dev) && tp->link_up) {
2650 netif_carrier_off(tp->dev);
2651 tg3_link_report(tp);
2654 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2655 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2656 tg3_asic_rev(tp) == ASIC_REV_5705) {
2657 err = tg3_phy_reset_5703_4_5(tp);
2658 if (err)
2659 return err;
2660 goto out;
2663 cpmuctrl = 0;
2664 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2665 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2666 cpmuctrl = tr32(TG3_CPMU_CTRL);
2667 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2668 tw32(TG3_CPMU_CTRL,
2669 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2672 err = tg3_bmcr_reset(tp);
2673 if (err)
2674 return err;
2676 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2677 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2678 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2680 tw32(TG3_CPMU_CTRL, cpmuctrl);
2683 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2684 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2685 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2686 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2687 CPMU_LSPD_1000MB_MACCLK_12_5) {
2688 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2689 udelay(40);
2690 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2694 if (tg3_flag(tp, 5717_PLUS) &&
2695 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2696 return 0;
2698 tg3_phy_apply_otp(tp);
2700 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2701 tg3_phy_toggle_apd(tp, true);
2702 else
2703 tg3_phy_toggle_apd(tp, false);
2705 out:
2706 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2707 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2708 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2709 tg3_phydsp_write(tp, 0x000a, 0x0323);
2710 tg3_phy_toggle_auxctl_smdsp(tp, false);
2713 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2714 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2718 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2719 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720 tg3_phydsp_write(tp, 0x000a, 0x310b);
2721 tg3_phydsp_write(tp, 0x201f, 0x9506);
2722 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2723 tg3_phy_toggle_auxctl_smdsp(tp, false);
2725 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2726 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2727 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2728 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2729 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2730 tg3_writephy(tp, MII_TG3_TEST1,
2731 MII_TG3_TEST1_TRIM_EN | 0x4);
2732 } else
2733 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2735 tg3_phy_toggle_auxctl_smdsp(tp, false);
2739 /* Set Extended packet length bit (bit 14) on all chips that */
2740 /* support jumbo frames */
2741 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2742 /* Cannot do read-modify-write on 5401 */
2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2744 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2745 /* Set bit 14 with read-modify-write to preserve other bits */
2746 err = tg3_phy_auxctl_read(tp,
2747 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2748 if (!err)
2749 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2750 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2753 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2754 * jumbo frames transmission.
2756 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2757 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2758 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2759 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2762 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2763 /* adjust output voltage */
2764 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2767 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2768 tg3_phydsp_write(tp, 0xffb, 0x4000);
2770 tg3_phy_toggle_automdix(tp, true);
2771 tg3_phy_set_wirespeed(tp);
2772 return 0;
2775 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2776 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2777 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2778 TG3_GPIO_MSG_NEED_VAUX)
2779 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2780 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2781 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2782 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2783 (TG3_GPIO_MSG_DRVR_PRES << 12))
2785 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2786 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2787 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2788 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2789 (TG3_GPIO_MSG_NEED_VAUX << 12))
2791 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2793 u32 status, shift;
2795 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2796 tg3_asic_rev(tp) == ASIC_REV_5719)
2797 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2798 else
2799 status = tr32(TG3_CPMU_DRV_STATUS);
2801 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2802 status &= ~(TG3_GPIO_MSG_MASK << shift);
2803 status |= (newstat << shift);
2805 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 tg3_asic_rev(tp) == ASIC_REV_5719)
2807 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2808 else
2809 tw32(TG3_CPMU_DRV_STATUS, status);
2811 return status >> TG3_APE_GPIO_MSG_SHIFT;
2814 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2816 if (!tg3_flag(tp, IS_NIC))
2817 return 0;
2819 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2820 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2821 tg3_asic_rev(tp) == ASIC_REV_5720) {
2822 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2823 return -EIO;
2825 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2827 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 TG3_GRC_LCLCTL_PWRSW_DELAY);
2830 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2831 } else {
2832 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836 return 0;
2839 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2841 u32 grc_local_ctrl;
2843 if (!tg3_flag(tp, IS_NIC) ||
2844 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2845 tg3_asic_rev(tp) == ASIC_REV_5701)
2846 return;
2848 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2850 tw32_wait_f(GRC_LOCAL_CTRL,
2851 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2852 TG3_GRC_LCLCTL_PWRSW_DELAY);
2854 tw32_wait_f(GRC_LOCAL_CTRL,
2855 grc_local_ctrl,
2856 TG3_GRC_LCLCTL_PWRSW_DELAY);
2858 tw32_wait_f(GRC_LOCAL_CTRL,
2859 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2860 TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2865 if (!tg3_flag(tp, IS_NIC))
2866 return;
2868 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2869 tg3_asic_rev(tp) == ASIC_REV_5701) {
2870 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2871 (GRC_LCLCTRL_GPIO_OE0 |
2872 GRC_LCLCTRL_GPIO_OE1 |
2873 GRC_LCLCTRL_GPIO_OE2 |
2874 GRC_LCLCTRL_GPIO_OUTPUT0 |
2875 GRC_LCLCTRL_GPIO_OUTPUT1),
2876 TG3_GRC_LCLCTL_PWRSW_DELAY);
2877 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2878 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2879 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2880 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2881 GRC_LCLCTRL_GPIO_OE1 |
2882 GRC_LCLCTRL_GPIO_OE2 |
2883 GRC_LCLCTRL_GPIO_OUTPUT0 |
2884 GRC_LCLCTRL_GPIO_OUTPUT1 |
2885 tp->grc_local_ctrl;
2886 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2887 TG3_GRC_LCLCTL_PWRSW_DELAY);
2889 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2890 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891 TG3_GRC_LCLCTL_PWRSW_DELAY);
2893 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2894 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2895 TG3_GRC_LCLCTL_PWRSW_DELAY);
2896 } else {
2897 u32 no_gpio2;
2898 u32 grc_local_ctrl = 0;
2900 /* Workaround to prevent overdrawing Amps. */
2901 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2902 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2903 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2904 grc_local_ctrl,
2905 TG3_GRC_LCLCTL_PWRSW_DELAY);
2908 /* On 5753 and variants, GPIO2 cannot be used. */
2909 no_gpio2 = tp->nic_sram_data_cfg &
2910 NIC_SRAM_DATA_CFG_NO_GPIO2;
2912 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2913 GRC_LCLCTRL_GPIO_OE1 |
2914 GRC_LCLCTRL_GPIO_OE2 |
2915 GRC_LCLCTRL_GPIO_OUTPUT1 |
2916 GRC_LCLCTRL_GPIO_OUTPUT2;
2917 if (no_gpio2) {
2918 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2919 GRC_LCLCTRL_GPIO_OUTPUT2);
2921 tw32_wait_f(GRC_LOCAL_CTRL,
2922 tp->grc_local_ctrl | grc_local_ctrl,
2923 TG3_GRC_LCLCTL_PWRSW_DELAY);
2925 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2927 tw32_wait_f(GRC_LOCAL_CTRL,
2928 tp->grc_local_ctrl | grc_local_ctrl,
2929 TG3_GRC_LCLCTL_PWRSW_DELAY);
2931 if (!no_gpio2) {
2932 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2933 tw32_wait_f(GRC_LOCAL_CTRL,
2934 tp->grc_local_ctrl | grc_local_ctrl,
2935 TG3_GRC_LCLCTL_PWRSW_DELAY);
2940 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2942 u32 msg = 0;
2944 /* Serialize power state transitions */
2945 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2946 return;
2948 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2949 msg = TG3_GPIO_MSG_NEED_VAUX;
2951 msg = tg3_set_function_status(tp, msg);
2953 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2954 goto done;
2956 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2957 tg3_pwrsrc_switch_to_vaux(tp);
2958 else
2959 tg3_pwrsrc_die_with_vmain(tp);
2961 done:
2962 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2965 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2967 bool need_vaux = false;
2969 /* The GPIOs do something completely different on 57765. */
2970 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2971 return;
2973 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2974 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2975 tg3_asic_rev(tp) == ASIC_REV_5720) {
2976 tg3_frob_aux_power_5717(tp, include_wol ?
2977 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2978 return;
2981 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2982 struct net_device *dev_peer;
2984 dev_peer = pci_get_drvdata(tp->pdev_peer);
2986 /* remove_one() may have been run on the peer. */
2987 if (dev_peer) {
2988 struct tg3 *tp_peer = netdev_priv(dev_peer);
2990 if (tg3_flag(tp_peer, INIT_COMPLETE))
2991 return;
2993 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2994 tg3_flag(tp_peer, ENABLE_ASF))
2995 need_vaux = true;
2999 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3000 tg3_flag(tp, ENABLE_ASF))
3001 need_vaux = true;
3003 if (need_vaux)
3004 tg3_pwrsrc_switch_to_vaux(tp);
3005 else
3006 tg3_pwrsrc_die_with_vmain(tp);
3009 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3011 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3012 return 1;
3013 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3014 if (speed != SPEED_10)
3015 return 1;
3016 } else if (speed == SPEED_10)
3017 return 1;
3019 return 0;
3022 static bool tg3_phy_power_bug(struct tg3 *tp)
3024 switch (tg3_asic_rev(tp)) {
3025 case ASIC_REV_5700:
3026 case ASIC_REV_5704:
3027 return true;
3028 case ASIC_REV_5780:
3029 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3030 return true;
3031 return false;
3032 case ASIC_REV_5717:
3033 if (!tp->pci_fn)
3034 return true;
3035 return false;
3036 case ASIC_REV_5719:
3037 case ASIC_REV_5720:
3038 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3039 !tp->pci_fn)
3040 return true;
3041 return false;
3044 return false;
3047 static bool tg3_phy_led_bug(struct tg3 *tp)
3049 switch (tg3_asic_rev(tp)) {
3050 case ASIC_REV_5719:
3051 case ASIC_REV_5720:
3052 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3053 !tp->pci_fn)
3054 return true;
3055 return false;
3058 return false;
3061 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3063 u32 val;
3065 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3066 return;
3068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3069 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3070 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3071 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3073 sg_dig_ctrl |=
3074 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3075 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3076 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3078 return;
3081 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3082 tg3_bmcr_reset(tp);
3083 val = tr32(GRC_MISC_CFG);
3084 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3085 udelay(40);
3086 return;
3087 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3088 u32 phytest;
3089 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3090 u32 phy;
3092 tg3_writephy(tp, MII_ADVERTISE, 0);
3093 tg3_writephy(tp, MII_BMCR,
3094 BMCR_ANENABLE | BMCR_ANRESTART);
3096 tg3_writephy(tp, MII_TG3_FET_TEST,
3097 phytest | MII_TG3_FET_SHADOW_EN);
3098 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3099 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3100 tg3_writephy(tp,
3101 MII_TG3_FET_SHDW_AUXMODE4,
3102 phy);
3104 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3106 return;
3107 } else if (do_low_power) {
3108 if (!tg3_phy_led_bug(tp))
3109 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3110 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3112 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3113 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3114 MII_TG3_AUXCTL_PCTL_VREG_11V;
3115 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3118 /* The PHY should not be powered down on some chips because
3119 * of bugs.
3121 if (tg3_phy_power_bug(tp))
3122 return;
3124 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3125 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3126 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3127 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3128 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3129 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3132 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3135 /* tp->lock is held. */
3136 static int tg3_nvram_lock(struct tg3 *tp)
3138 if (tg3_flag(tp, NVRAM)) {
3139 int i;
3141 if (tp->nvram_lock_cnt == 0) {
3142 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3143 for (i = 0; i < 8000; i++) {
3144 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3145 break;
3146 udelay(20);
3148 if (i == 8000) {
3149 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3150 return -ENODEV;
3153 tp->nvram_lock_cnt++;
3155 return 0;
3158 /* tp->lock is held. */
3159 static void tg3_nvram_unlock(struct tg3 *tp)
3161 if (tg3_flag(tp, NVRAM)) {
3162 if (tp->nvram_lock_cnt > 0)
3163 tp->nvram_lock_cnt--;
3164 if (tp->nvram_lock_cnt == 0)
3165 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3169 /* tp->lock is held. */
3170 static void tg3_enable_nvram_access(struct tg3 *tp)
3172 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3173 u32 nvaccess = tr32(NVRAM_ACCESS);
3175 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3179 /* tp->lock is held. */
3180 static void tg3_disable_nvram_access(struct tg3 *tp)
3182 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3183 u32 nvaccess = tr32(NVRAM_ACCESS);
3185 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3189 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3190 u32 offset, u32 *val)
3192 u32 tmp;
3193 int i;
3195 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3196 return -EINVAL;
3198 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3199 EEPROM_ADDR_DEVID_MASK |
3200 EEPROM_ADDR_READ);
3201 tw32(GRC_EEPROM_ADDR,
3202 tmp |
3203 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3204 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3205 EEPROM_ADDR_ADDR_MASK) |
3206 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3208 for (i = 0; i < 1000; i++) {
3209 tmp = tr32(GRC_EEPROM_ADDR);
3211 if (tmp & EEPROM_ADDR_COMPLETE)
3212 break;
3213 msleep(1);
3215 if (!(tmp & EEPROM_ADDR_COMPLETE))
3216 return -EBUSY;
3218 tmp = tr32(GRC_EEPROM_DATA);
3221 * The data will always be opposite the native endian
3222 * format. Perform a blind byteswap to compensate.
3224 *val = swab32(tmp);
3226 return 0;
3229 #define NVRAM_CMD_TIMEOUT 10000
3231 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3233 int i;
3235 tw32(NVRAM_CMD, nvram_cmd);
3236 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3237 udelay(10);
3238 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3239 udelay(10);
3240 break;
3244 if (i == NVRAM_CMD_TIMEOUT)
3245 return -EBUSY;
3247 return 0;
3250 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3252 if (tg3_flag(tp, NVRAM) &&
3253 tg3_flag(tp, NVRAM_BUFFERED) &&
3254 tg3_flag(tp, FLASH) &&
3255 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3256 (tp->nvram_jedecnum == JEDEC_ATMEL))
3258 addr = ((addr / tp->nvram_pagesize) <<
3259 ATMEL_AT45DB0X1B_PAGE_POS) +
3260 (addr % tp->nvram_pagesize);
3262 return addr;
3265 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3267 if (tg3_flag(tp, NVRAM) &&
3268 tg3_flag(tp, NVRAM_BUFFERED) &&
3269 tg3_flag(tp, FLASH) &&
3270 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3271 (tp->nvram_jedecnum == JEDEC_ATMEL))
3273 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3274 tp->nvram_pagesize) +
3275 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3277 return addr;
3280 /* NOTE: Data read in from NVRAM is byteswapped according to
3281 * the byteswapping settings for all other register accesses.
3282 * tg3 devices are BE devices, so on a BE machine, the data
3283 * returned will be exactly as it is seen in NVRAM. On a LE
3284 * machine, the 32-bit value will be byteswapped.
3286 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3288 int ret;
3290 if (!tg3_flag(tp, NVRAM))
3291 return tg3_nvram_read_using_eeprom(tp, offset, val);
3293 offset = tg3_nvram_phys_addr(tp, offset);
3295 if (offset > NVRAM_ADDR_MSK)
3296 return -EINVAL;
3298 ret = tg3_nvram_lock(tp);
3299 if (ret)
3300 return ret;
3302 tg3_enable_nvram_access(tp);
3304 tw32(NVRAM_ADDR, offset);
3305 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3306 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3308 if (ret == 0)
3309 *val = tr32(NVRAM_RDDATA);
3311 tg3_disable_nvram_access(tp);
3313 tg3_nvram_unlock(tp);
3315 return ret;
3318 /* Ensures NVRAM data is in bytestream format. */
3319 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3321 u32 v;
3322 int res = tg3_nvram_read(tp, offset, &v);
3323 if (!res)
3324 *val = cpu_to_be32(v);
3325 return res;
3328 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3329 u32 offset, u32 len, u8 *buf)
3331 int i, j, rc = 0;
3332 u32 val;
3334 for (i = 0; i < len; i += 4) {
3335 u32 addr;
3336 __be32 data;
3338 addr = offset + i;
3340 memcpy(&data, buf + i, 4);
3343 * The SEEPROM interface expects the data to always be opposite
3344 * the native endian format. We accomplish this by reversing
3345 * all the operations that would have been performed on the
3346 * data from a call to tg3_nvram_read_be32().
3348 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3350 val = tr32(GRC_EEPROM_ADDR);
3351 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3353 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3354 EEPROM_ADDR_READ);
3355 tw32(GRC_EEPROM_ADDR, val |
3356 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3357 (addr & EEPROM_ADDR_ADDR_MASK) |
3358 EEPROM_ADDR_START |
3359 EEPROM_ADDR_WRITE);
3361 for (j = 0; j < 1000; j++) {
3362 val = tr32(GRC_EEPROM_ADDR);
3364 if (val & EEPROM_ADDR_COMPLETE)
3365 break;
3366 msleep(1);
3368 if (!(val & EEPROM_ADDR_COMPLETE)) {
3369 rc = -EBUSY;
3370 break;
3374 return rc;
3377 /* offset and length are dword aligned */
3378 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3379 u8 *buf)
3381 int ret = 0;
3382 u32 pagesize = tp->nvram_pagesize;
3383 u32 pagemask = pagesize - 1;
3384 u32 nvram_cmd;
3385 u8 *tmp;
3387 tmp = kmalloc(pagesize, GFP_KERNEL);
3388 if (tmp == NULL)
3389 return -ENOMEM;
3391 while (len) {
3392 int j;
3393 u32 phy_addr, page_off, size;
3395 phy_addr = offset & ~pagemask;
3397 for (j = 0; j < pagesize; j += 4) {
3398 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3399 (__be32 *) (tmp + j));
3400 if (ret)
3401 break;
3403 if (ret)
3404 break;
3406 page_off = offset & pagemask;
3407 size = pagesize;
3408 if (len < size)
3409 size = len;
3411 len -= size;
3413 memcpy(tmp + page_off, buf, size);
3415 offset = offset + (pagesize - page_off);
3417 tg3_enable_nvram_access(tp);
3420 * Before we can erase the flash page, we need
3421 * to issue a special "write enable" command.
3423 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3425 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3426 break;
3428 /* Erase the target page */
3429 tw32(NVRAM_ADDR, phy_addr);
3431 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3432 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 break;
3437 /* Issue another write enable to start the write. */
3438 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3440 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3441 break;
3443 for (j = 0; j < pagesize; j += 4) {
3444 __be32 data;
3446 data = *((__be32 *) (tmp + j));
3448 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3450 tw32(NVRAM_ADDR, phy_addr + j);
3452 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3453 NVRAM_CMD_WR;
3455 if (j == 0)
3456 nvram_cmd |= NVRAM_CMD_FIRST;
3457 else if (j == (pagesize - 4))
3458 nvram_cmd |= NVRAM_CMD_LAST;
3460 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3461 if (ret)
3462 break;
3464 if (ret)
3465 break;
3468 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3469 tg3_nvram_exec_cmd(tp, nvram_cmd);
3471 kfree(tmp);
3473 return ret;
3476 /* offset and length are dword aligned */
3477 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3478 u8 *buf)
3480 int i, ret = 0;
3482 for (i = 0; i < len; i += 4, offset += 4) {
3483 u32 page_off, phy_addr, nvram_cmd;
3484 __be32 data;
3486 memcpy(&data, buf + i, 4);
3487 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3489 page_off = offset % tp->nvram_pagesize;
3491 phy_addr = tg3_nvram_phys_addr(tp, offset);
3493 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3495 if (page_off == 0 || i == 0)
3496 nvram_cmd |= NVRAM_CMD_FIRST;
3497 if (page_off == (tp->nvram_pagesize - 4))
3498 nvram_cmd |= NVRAM_CMD_LAST;
3500 if (i == (len - 4))
3501 nvram_cmd |= NVRAM_CMD_LAST;
3503 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3504 !tg3_flag(tp, FLASH) ||
3505 !tg3_flag(tp, 57765_PLUS))
3506 tw32(NVRAM_ADDR, phy_addr);
3508 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3509 !tg3_flag(tp, 5755_PLUS) &&
3510 (tp->nvram_jedecnum == JEDEC_ST) &&
3511 (nvram_cmd & NVRAM_CMD_FIRST)) {
3512 u32 cmd;
3514 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3515 ret = tg3_nvram_exec_cmd(tp, cmd);
3516 if (ret)
3517 break;
3519 if (!tg3_flag(tp, FLASH)) {
3520 /* We always do complete word writes to eeprom. */
3521 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3524 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3525 if (ret)
3526 break;
3528 return ret;
3531 /* offset and length are dword aligned */
3532 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3534 int ret;
3536 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3537 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3538 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3539 udelay(40);
3542 if (!tg3_flag(tp, NVRAM)) {
3543 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3544 } else {
3545 u32 grc_mode;
3547 ret = tg3_nvram_lock(tp);
3548 if (ret)
3549 return ret;
3551 tg3_enable_nvram_access(tp);
3552 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3553 tw32(NVRAM_WRITE1, 0x406);
3555 grc_mode = tr32(GRC_MODE);
3556 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3558 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3559 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3560 buf);
3561 } else {
3562 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3563 buf);
3566 grc_mode = tr32(GRC_MODE);
3567 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3569 tg3_disable_nvram_access(tp);
3570 tg3_nvram_unlock(tp);
3573 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3574 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3575 udelay(40);
3578 return ret;
3581 #define RX_CPU_SCRATCH_BASE 0x30000
3582 #define RX_CPU_SCRATCH_SIZE 0x04000
3583 #define TX_CPU_SCRATCH_BASE 0x34000
3584 #define TX_CPU_SCRATCH_SIZE 0x04000
3586 /* tp->lock is held. */
3587 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3589 int i;
3590 const int iters = 10000;
3592 for (i = 0; i < iters; i++) {
3593 tw32(cpu_base + CPU_STATE, 0xffffffff);
3594 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3595 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3596 break;
3597 if (pci_channel_offline(tp->pdev))
3598 return -EBUSY;
3601 return (i == iters) ? -EBUSY : 0;
3604 /* tp->lock is held. */
3605 static int tg3_rxcpu_pause(struct tg3 *tp)
3607 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3609 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3610 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3611 udelay(10);
3613 return rc;
3616 /* tp->lock is held. */
3617 static int tg3_txcpu_pause(struct tg3 *tp)
3619 return tg3_pause_cpu(tp, TX_CPU_BASE);
3622 /* tp->lock is held. */
3623 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3625 tw32(cpu_base + CPU_STATE, 0xffffffff);
3626 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3629 /* tp->lock is held. */
3630 static void tg3_rxcpu_resume(struct tg3 *tp)
3632 tg3_resume_cpu(tp, RX_CPU_BASE);
3635 /* tp->lock is held. */
3636 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3638 int rc;
3640 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3642 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3643 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3645 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3646 return 0;
3648 if (cpu_base == RX_CPU_BASE) {
3649 rc = tg3_rxcpu_pause(tp);
3650 } else {
3652 * There is only an Rx CPU for the 5750 derivative in the
3653 * BCM4785.
3655 if (tg3_flag(tp, IS_SSB_CORE))
3656 return 0;
3658 rc = tg3_txcpu_pause(tp);
3661 if (rc) {
3662 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3663 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3664 return -ENODEV;
3667 /* Clear firmware's nvram arbitration. */
3668 if (tg3_flag(tp, NVRAM))
3669 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3670 return 0;
3673 static int tg3_fw_data_len(struct tg3 *tp,
3674 const struct tg3_firmware_hdr *fw_hdr)
3676 int fw_len;
3678 /* Non fragmented firmware have one firmware header followed by a
3679 * contiguous chunk of data to be written. The length field in that
3680 * header is not the length of data to be written but the complete
3681 * length of the bss. The data length is determined based on
3682 * tp->fw->size minus headers.
3684 * Fragmented firmware have a main header followed by multiple
3685 * fragments. Each fragment is identical to non fragmented firmware
3686 * with a firmware header followed by a contiguous chunk of data. In
3687 * the main header, the length field is unused and set to 0xffffffff.
3688 * In each fragment header the length is the entire size of that
3689 * fragment i.e. fragment data + header length. Data length is
3690 * therefore length field in the header minus TG3_FW_HDR_LEN.
3692 if (tp->fw_len == 0xffffffff)
3693 fw_len = be32_to_cpu(fw_hdr->len);
3694 else
3695 fw_len = tp->fw->size;
3697 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3700 /* tp->lock is held. */
3701 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3702 u32 cpu_scratch_base, int cpu_scratch_size,
3703 const struct tg3_firmware_hdr *fw_hdr)
3705 int err, i;
3706 void (*write_op)(struct tg3 *, u32, u32);
3707 int total_len = tp->fw->size;
3709 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3710 netdev_err(tp->dev,
3711 "%s: Trying to load TX cpu firmware which is 5705\n",
3712 __func__);
3713 return -EINVAL;
3716 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3717 write_op = tg3_write_mem;
3718 else
3719 write_op = tg3_write_indirect_reg32;
3721 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3722 /* It is possible that bootcode is still loading at this point.
3723 * Get the nvram lock first before halting the cpu.
3725 int lock_err = tg3_nvram_lock(tp);
3726 err = tg3_halt_cpu(tp, cpu_base);
3727 if (!lock_err)
3728 tg3_nvram_unlock(tp);
3729 if (err)
3730 goto out;
3732 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3733 write_op(tp, cpu_scratch_base + i, 0);
3734 tw32(cpu_base + CPU_STATE, 0xffffffff);
3735 tw32(cpu_base + CPU_MODE,
3736 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3737 } else {
3738 /* Subtract additional main header for fragmented firmware and
3739 * advance to the first fragment
3741 total_len -= TG3_FW_HDR_LEN;
3742 fw_hdr++;
3745 do {
3746 u32 *fw_data = (u32 *)(fw_hdr + 1);
3747 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3748 write_op(tp, cpu_scratch_base +
3749 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3750 (i * sizeof(u32)),
3751 be32_to_cpu(fw_data[i]));
3753 total_len -= be32_to_cpu(fw_hdr->len);
3755 /* Advance to next fragment */
3756 fw_hdr = (struct tg3_firmware_hdr *)
3757 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3758 } while (total_len > 0);
3760 err = 0;
3762 out:
3763 return err;
3766 /* tp->lock is held. */
3767 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3769 int i;
3770 const int iters = 5;
3772 tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 tw32_f(cpu_base + CPU_PC, pc);
3775 for (i = 0; i < iters; i++) {
3776 if (tr32(cpu_base + CPU_PC) == pc)
3777 break;
3778 tw32(cpu_base + CPU_STATE, 0xffffffff);
3779 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3780 tw32_f(cpu_base + CPU_PC, pc);
3781 udelay(1000);
3784 return (i == iters) ? -EBUSY : 0;
3787 /* tp->lock is held. */
3788 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3790 const struct tg3_firmware_hdr *fw_hdr;
3791 int err;
3793 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3795 /* Firmware blob starts with version numbers, followed by
3796 start address and length. We are setting complete length.
3797 length = end_address_of_bss - start_address_of_text.
3798 Remainder is the blob to be loaded contiguously
3799 from start address. */
3801 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3802 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3803 fw_hdr);
3804 if (err)
3805 return err;
3807 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3808 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3809 fw_hdr);
3810 if (err)
3811 return err;
3813 /* Now startup only the RX cpu. */
3814 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3815 be32_to_cpu(fw_hdr->base_addr));
3816 if (err) {
3817 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3818 "should be %08x\n", __func__,
3819 tr32(RX_CPU_BASE + CPU_PC),
3820 be32_to_cpu(fw_hdr->base_addr));
3821 return -ENODEV;
3824 tg3_rxcpu_resume(tp);
3826 return 0;
3829 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3831 const int iters = 1000;
3832 int i;
3833 u32 val;
3835 /* Wait for boot code to complete initialization and enter service
3836 * loop. It is then safe to download service patches
3838 for (i = 0; i < iters; i++) {
3839 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3840 break;
3842 udelay(10);
3845 if (i == iters) {
3846 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3847 return -EBUSY;
3850 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3851 if (val & 0xff) {
3852 netdev_warn(tp->dev,
3853 "Other patches exist. Not downloading EEE patch\n");
3854 return -EEXIST;
3857 return 0;
3860 /* tp->lock is held. */
3861 static void tg3_load_57766_firmware(struct tg3 *tp)
3863 struct tg3_firmware_hdr *fw_hdr;
3865 if (!tg3_flag(tp, NO_NVRAM))
3866 return;
3868 if (tg3_validate_rxcpu_state(tp))
3869 return;
3871 if (!tp->fw)
3872 return;
3874 /* This firmware blob has a different format than older firmware
3875 * releases as given below. The main difference is we have fragmented
3876 * data to be written to non-contiguous locations.
3878 * In the beginning we have a firmware header identical to other
3879 * firmware which consists of version, base addr and length. The length
3880 * here is unused and set to 0xffffffff.
3882 * This is followed by a series of firmware fragments which are
3883 * individually identical to previous firmware. i.e. they have the
3884 * firmware header and followed by data for that fragment. The version
3885 * field of the individual fragment header is unused.
3888 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3889 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3890 return;
3892 if (tg3_rxcpu_pause(tp))
3893 return;
3895 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3896 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3898 tg3_rxcpu_resume(tp);
3901 /* tp->lock is held. */
3902 static int tg3_load_tso_firmware(struct tg3 *tp)
3904 const struct tg3_firmware_hdr *fw_hdr;
3905 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3906 int err;
3908 if (!tg3_flag(tp, FW_TSO))
3909 return 0;
3911 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3913 /* Firmware blob starts with version numbers, followed by
3914 start address and length. We are setting complete length.
3915 length = end_address_of_bss - start_address_of_text.
3916 Remainder is the blob to be loaded contiguously
3917 from start address. */
3919 cpu_scratch_size = tp->fw_len;
3921 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3922 cpu_base = RX_CPU_BASE;
3923 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3924 } else {
3925 cpu_base = TX_CPU_BASE;
3926 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3927 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3930 err = tg3_load_firmware_cpu(tp, cpu_base,
3931 cpu_scratch_base, cpu_scratch_size,
3932 fw_hdr);
3933 if (err)
3934 return err;
3936 /* Now startup the cpu. */
3937 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3938 be32_to_cpu(fw_hdr->base_addr));
3939 if (err) {
3940 netdev_err(tp->dev,
3941 "%s fails to set CPU PC, is %08x should be %08x\n",
3942 __func__, tr32(cpu_base + CPU_PC),
3943 be32_to_cpu(fw_hdr->base_addr));
3944 return -ENODEV;
3947 tg3_resume_cpu(tp, cpu_base);
3948 return 0;
3952 /* tp->lock is held. */
3953 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3955 u32 addr_high, addr_low;
3956 int i;
3958 addr_high = ((tp->dev->dev_addr[0] << 8) |
3959 tp->dev->dev_addr[1]);
3960 addr_low = ((tp->dev->dev_addr[2] << 24) |
3961 (tp->dev->dev_addr[3] << 16) |
3962 (tp->dev->dev_addr[4] << 8) |
3963 (tp->dev->dev_addr[5] << 0));
3964 for (i = 0; i < 4; i++) {
3965 if (i == 1 && skip_mac_1)
3966 continue;
3967 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3968 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3971 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3972 tg3_asic_rev(tp) == ASIC_REV_5704) {
3973 for (i = 0; i < 12; i++) {
3974 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3975 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3979 addr_high = (tp->dev->dev_addr[0] +
3980 tp->dev->dev_addr[1] +
3981 tp->dev->dev_addr[2] +
3982 tp->dev->dev_addr[3] +
3983 tp->dev->dev_addr[4] +
3984 tp->dev->dev_addr[5]) &
3985 TX_BACKOFF_SEED_MASK;
3986 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3989 static void tg3_enable_register_access(struct tg3 *tp)
3992 * Make sure register accesses (indirect or otherwise) will function
3993 * correctly.
3995 pci_write_config_dword(tp->pdev,
3996 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3999 static int tg3_power_up(struct tg3 *tp)
4001 int err;
4003 tg3_enable_register_access(tp);
4005 err = pci_set_power_state(tp->pdev, PCI_D0);
4006 if (!err) {
4007 /* Switch out of Vaux if it is a NIC */
4008 tg3_pwrsrc_switch_to_vmain(tp);
4009 } else {
4010 netdev_err(tp->dev, "Transition to D0 failed\n");
4013 return err;
4016 static int tg3_setup_phy(struct tg3 *, bool);
4018 static int tg3_power_down_prepare(struct tg3 *tp)
4020 u32 misc_host_ctrl;
4021 bool device_should_wake, do_low_power;
4023 tg3_enable_register_access(tp);
4025 /* Restore the CLKREQ setting. */
4026 if (tg3_flag(tp, CLKREQ_BUG))
4027 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4028 PCI_EXP_LNKCTL_CLKREQ_EN);
4030 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4031 tw32(TG3PCI_MISC_HOST_CTRL,
4032 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4034 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4035 tg3_flag(tp, WOL_ENABLE);
4037 if (tg3_flag(tp, USE_PHYLIB)) {
4038 do_low_power = false;
4039 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4040 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4041 struct phy_device *phydev;
4042 u32 phyid, advertising;
4044 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4046 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4048 tp->link_config.speed = phydev->speed;
4049 tp->link_config.duplex = phydev->duplex;
4050 tp->link_config.autoneg = phydev->autoneg;
4051 tp->link_config.advertising = phydev->advertising;
4053 advertising = ADVERTISED_TP |
4054 ADVERTISED_Pause |
4055 ADVERTISED_Autoneg |
4056 ADVERTISED_10baseT_Half;
4058 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4059 if (tg3_flag(tp, WOL_SPEED_100MB))
4060 advertising |=
4061 ADVERTISED_100baseT_Half |
4062 ADVERTISED_100baseT_Full |
4063 ADVERTISED_10baseT_Full;
4064 else
4065 advertising |= ADVERTISED_10baseT_Full;
4068 phydev->advertising = advertising;
4070 phy_start_aneg(phydev);
4072 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4073 if (phyid != PHY_ID_BCMAC131) {
4074 phyid &= PHY_BCM_OUI_MASK;
4075 if (phyid == PHY_BCM_OUI_1 ||
4076 phyid == PHY_BCM_OUI_2 ||
4077 phyid == PHY_BCM_OUI_3)
4078 do_low_power = true;
4081 } else {
4082 do_low_power = true;
4084 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4085 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4087 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4088 tg3_setup_phy(tp, false);
4091 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4092 u32 val;
4094 val = tr32(GRC_VCPU_EXT_CTRL);
4095 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4096 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4097 int i;
4098 u32 val;
4100 for (i = 0; i < 200; i++) {
4101 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4102 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4103 break;
4104 msleep(1);
4107 if (tg3_flag(tp, WOL_CAP))
4108 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4109 WOL_DRV_STATE_SHUTDOWN |
4110 WOL_DRV_WOL |
4111 WOL_SET_MAGIC_PKT);
4113 if (device_should_wake) {
4114 u32 mac_mode;
4116 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4117 if (do_low_power &&
4118 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4119 tg3_phy_auxctl_write(tp,
4120 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4121 MII_TG3_AUXCTL_PCTL_WOL_EN |
4122 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4123 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4124 udelay(40);
4127 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4128 mac_mode = MAC_MODE_PORT_MODE_GMII;
4129 else if (tp->phy_flags &
4130 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4131 if (tp->link_config.active_speed == SPEED_1000)
4132 mac_mode = MAC_MODE_PORT_MODE_GMII;
4133 else
4134 mac_mode = MAC_MODE_PORT_MODE_MII;
4135 } else
4136 mac_mode = MAC_MODE_PORT_MODE_MII;
4138 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4139 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4140 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4141 SPEED_100 : SPEED_10;
4142 if (tg3_5700_link_polarity(tp, speed))
4143 mac_mode |= MAC_MODE_LINK_POLARITY;
4144 else
4145 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4147 } else {
4148 mac_mode = MAC_MODE_PORT_MODE_TBI;
4151 if (!tg3_flag(tp, 5750_PLUS))
4152 tw32(MAC_LED_CTRL, tp->led_ctrl);
4154 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4155 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4156 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4157 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4159 if (tg3_flag(tp, ENABLE_APE))
4160 mac_mode |= MAC_MODE_APE_TX_EN |
4161 MAC_MODE_APE_RX_EN |
4162 MAC_MODE_TDE_ENABLE;
4164 tw32_f(MAC_MODE, mac_mode);
4165 udelay(100);
4167 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4168 udelay(10);
4171 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4172 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4173 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4174 u32 base_val;
4176 base_val = tp->pci_clock_ctrl;
4177 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4178 CLOCK_CTRL_TXCLK_DISABLE);
4180 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4181 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4182 } else if (tg3_flag(tp, 5780_CLASS) ||
4183 tg3_flag(tp, CPMU_PRESENT) ||
4184 tg3_asic_rev(tp) == ASIC_REV_5906) {
4185 /* do nothing */
4186 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4187 u32 newbits1, newbits2;
4189 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190 tg3_asic_rev(tp) == ASIC_REV_5701) {
4191 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4192 CLOCK_CTRL_TXCLK_DISABLE |
4193 CLOCK_CTRL_ALTCLK);
4194 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4195 } else if (tg3_flag(tp, 5705_PLUS)) {
4196 newbits1 = CLOCK_CTRL_625_CORE;
4197 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4198 } else {
4199 newbits1 = CLOCK_CTRL_ALTCLK;
4200 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4203 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4204 40);
4206 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4207 40);
4209 if (!tg3_flag(tp, 5705_PLUS)) {
4210 u32 newbits3;
4212 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213 tg3_asic_rev(tp) == ASIC_REV_5701) {
4214 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4215 CLOCK_CTRL_TXCLK_DISABLE |
4216 CLOCK_CTRL_44MHZ_CORE);
4217 } else {
4218 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4221 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4222 tp->pci_clock_ctrl | newbits3, 40);
4226 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4227 tg3_power_down_phy(tp, do_low_power);
4229 tg3_frob_aux_power(tp, true);
4231 /* Workaround for unstable PLL clock */
4232 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4233 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4234 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4235 u32 val = tr32(0x7d00);
4237 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4238 tw32(0x7d00, val);
4239 if (!tg3_flag(tp, ENABLE_ASF)) {
4240 int err;
4242 err = tg3_nvram_lock(tp);
4243 tg3_halt_cpu(tp, RX_CPU_BASE);
4244 if (!err)
4245 tg3_nvram_unlock(tp);
4249 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4251 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4253 return 0;
4256 static void tg3_power_down(struct tg3 *tp)
4258 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4259 pci_set_power_state(tp->pdev, PCI_D3hot);
4262 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4264 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4265 case MII_TG3_AUX_STAT_10HALF:
4266 *speed = SPEED_10;
4267 *duplex = DUPLEX_HALF;
4268 break;
4270 case MII_TG3_AUX_STAT_10FULL:
4271 *speed = SPEED_10;
4272 *duplex = DUPLEX_FULL;
4273 break;
4275 case MII_TG3_AUX_STAT_100HALF:
4276 *speed = SPEED_100;
4277 *duplex = DUPLEX_HALF;
4278 break;
4280 case MII_TG3_AUX_STAT_100FULL:
4281 *speed = SPEED_100;
4282 *duplex = DUPLEX_FULL;
4283 break;
4285 case MII_TG3_AUX_STAT_1000HALF:
4286 *speed = SPEED_1000;
4287 *duplex = DUPLEX_HALF;
4288 break;
4290 case MII_TG3_AUX_STAT_1000FULL:
4291 *speed = SPEED_1000;
4292 *duplex = DUPLEX_FULL;
4293 break;
4295 default:
4296 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4297 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4298 SPEED_10;
4299 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4300 DUPLEX_HALF;
4301 break;
4303 *speed = SPEED_UNKNOWN;
4304 *duplex = DUPLEX_UNKNOWN;
4305 break;
4309 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4311 int err = 0;
4312 u32 val, new_adv;
4314 new_adv = ADVERTISE_CSMA;
4315 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4316 new_adv |= mii_advertise_flowctrl(flowctrl);
4318 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4319 if (err)
4320 goto done;
4322 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4323 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4325 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4326 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4327 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4329 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4330 if (err)
4331 goto done;
4334 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4335 goto done;
4337 tw32(TG3_CPMU_EEE_MODE,
4338 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4340 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4341 if (!err) {
4342 u32 err2;
4344 val = 0;
4345 /* Advertise 100-BaseTX EEE ability */
4346 if (advertise & ADVERTISED_100baseT_Full)
4347 val |= MDIO_AN_EEE_ADV_100TX;
4348 /* Advertise 1000-BaseT EEE ability */
4349 if (advertise & ADVERTISED_1000baseT_Full)
4350 val |= MDIO_AN_EEE_ADV_1000T;
4352 if (!tp->eee.eee_enabled) {
4353 val = 0;
4354 tp->eee.advertised = 0;
4355 } else {
4356 tp->eee.advertised = advertise &
4357 (ADVERTISED_100baseT_Full |
4358 ADVERTISED_1000baseT_Full);
4361 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4362 if (err)
4363 val = 0;
4365 switch (tg3_asic_rev(tp)) {
4366 case ASIC_REV_5717:
4367 case ASIC_REV_57765:
4368 case ASIC_REV_57766:
4369 case ASIC_REV_5719:
4370 /* If we advertised any eee advertisements above... */
4371 if (val)
4372 val = MII_TG3_DSP_TAP26_ALNOKO |
4373 MII_TG3_DSP_TAP26_RMRXSTO |
4374 MII_TG3_DSP_TAP26_OPCSINPT;
4375 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4376 /* Fall through */
4377 case ASIC_REV_5720:
4378 case ASIC_REV_5762:
4379 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4380 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4381 MII_TG3_DSP_CH34TP2_HIBW01);
4384 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4385 if (!err)
4386 err = err2;
4389 done:
4390 return err;
4393 static void tg3_phy_copper_begin(struct tg3 *tp)
4395 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4396 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4397 u32 adv, fc;
4399 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4400 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4401 adv = ADVERTISED_10baseT_Half |
4402 ADVERTISED_10baseT_Full;
4403 if (tg3_flag(tp, WOL_SPEED_100MB))
4404 adv |= ADVERTISED_100baseT_Half |
4405 ADVERTISED_100baseT_Full;
4406 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4407 adv |= ADVERTISED_1000baseT_Half |
4408 ADVERTISED_1000baseT_Full;
4410 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4411 } else {
4412 adv = tp->link_config.advertising;
4413 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4414 adv &= ~(ADVERTISED_1000baseT_Half |
4415 ADVERTISED_1000baseT_Full);
4417 fc = tp->link_config.flowctrl;
4420 tg3_phy_autoneg_cfg(tp, adv, fc);
4422 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4423 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4424 /* Normally during power down we want to autonegotiate
4425 * the lowest possible speed for WOL. However, to avoid
4426 * link flap, we leave it untouched.
4428 return;
4431 tg3_writephy(tp, MII_BMCR,
4432 BMCR_ANENABLE | BMCR_ANRESTART);
4433 } else {
4434 int i;
4435 u32 bmcr, orig_bmcr;
4437 tp->link_config.active_speed = tp->link_config.speed;
4438 tp->link_config.active_duplex = tp->link_config.duplex;
4440 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4441 /* With autoneg disabled, 5715 only links up when the
4442 * advertisement register has the configured speed
4443 * enabled.
4445 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4448 bmcr = 0;
4449 switch (tp->link_config.speed) {
4450 default:
4451 case SPEED_10:
4452 break;
4454 case SPEED_100:
4455 bmcr |= BMCR_SPEED100;
4456 break;
4458 case SPEED_1000:
4459 bmcr |= BMCR_SPEED1000;
4460 break;
4463 if (tp->link_config.duplex == DUPLEX_FULL)
4464 bmcr |= BMCR_FULLDPLX;
4466 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4467 (bmcr != orig_bmcr)) {
4468 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4469 for (i = 0; i < 1500; i++) {
4470 u32 tmp;
4472 udelay(10);
4473 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4474 tg3_readphy(tp, MII_BMSR, &tmp))
4475 continue;
4476 if (!(tmp & BMSR_LSTATUS)) {
4477 udelay(40);
4478 break;
4481 tg3_writephy(tp, MII_BMCR, bmcr);
4482 udelay(40);
4487 static int tg3_phy_pull_config(struct tg3 *tp)
4489 int err;
4490 u32 val;
4492 err = tg3_readphy(tp, MII_BMCR, &val);
4493 if (err)
4494 goto done;
4496 if (!(val & BMCR_ANENABLE)) {
4497 tp->link_config.autoneg = AUTONEG_DISABLE;
4498 tp->link_config.advertising = 0;
4499 tg3_flag_clear(tp, PAUSE_AUTONEG);
4501 err = -EIO;
4503 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4504 case 0:
4505 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4506 goto done;
4508 tp->link_config.speed = SPEED_10;
4509 break;
4510 case BMCR_SPEED100:
4511 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4512 goto done;
4514 tp->link_config.speed = SPEED_100;
4515 break;
4516 case BMCR_SPEED1000:
4517 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4518 tp->link_config.speed = SPEED_1000;
4519 break;
4521 /* Fall through */
4522 default:
4523 goto done;
4526 if (val & BMCR_FULLDPLX)
4527 tp->link_config.duplex = DUPLEX_FULL;
4528 else
4529 tp->link_config.duplex = DUPLEX_HALF;
4531 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4533 err = 0;
4534 goto done;
4537 tp->link_config.autoneg = AUTONEG_ENABLE;
4538 tp->link_config.advertising = ADVERTISED_Autoneg;
4539 tg3_flag_set(tp, PAUSE_AUTONEG);
4541 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4542 u32 adv;
4544 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4545 if (err)
4546 goto done;
4548 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4549 tp->link_config.advertising |= adv | ADVERTISED_TP;
4551 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4552 } else {
4553 tp->link_config.advertising |= ADVERTISED_FIBRE;
4556 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4557 u32 adv;
4559 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4560 err = tg3_readphy(tp, MII_CTRL1000, &val);
4561 if (err)
4562 goto done;
4564 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4565 } else {
4566 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4567 if (err)
4568 goto done;
4570 adv = tg3_decode_flowctrl_1000X(val);
4571 tp->link_config.flowctrl = adv;
4573 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4574 adv = mii_adv_to_ethtool_adv_x(val);
4577 tp->link_config.advertising |= adv;
4580 done:
4581 return err;
4584 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4586 int err;
4588 /* Turn off tap power management. */
4589 /* Set Extended packet length bit */
4590 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4592 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4593 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4594 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4595 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4596 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4598 udelay(40);
4600 return err;
4603 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4605 struct ethtool_eee eee;
4607 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4608 return true;
4610 tg3_eee_pull_config(tp, &eee);
4612 if (tp->eee.eee_enabled) {
4613 if (tp->eee.advertised != eee.advertised ||
4614 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4615 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4616 return false;
4617 } else {
4618 /* EEE is disabled but we're advertising */
4619 if (eee.advertised)
4620 return false;
4623 return true;
4626 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4628 u32 advmsk, tgtadv, advertising;
4630 advertising = tp->link_config.advertising;
4631 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4633 advmsk = ADVERTISE_ALL;
4634 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4635 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4636 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4639 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4640 return false;
4642 if ((*lcladv & advmsk) != tgtadv)
4643 return false;
4645 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4646 u32 tg3_ctrl;
4648 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4650 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4651 return false;
4653 if (tgtadv &&
4654 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4655 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4656 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4657 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4658 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4659 } else {
4660 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4663 if (tg3_ctrl != tgtadv)
4664 return false;
4667 return true;
4670 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4672 u32 lpeth = 0;
4674 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4675 u32 val;
4677 if (tg3_readphy(tp, MII_STAT1000, &val))
4678 return false;
4680 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4683 if (tg3_readphy(tp, MII_LPA, rmtadv))
4684 return false;
4686 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4687 tp->link_config.rmt_adv = lpeth;
4689 return true;
4692 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4694 if (curr_link_up != tp->link_up) {
4695 if (curr_link_up) {
4696 netif_carrier_on(tp->dev);
4697 } else {
4698 netif_carrier_off(tp->dev);
4699 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4700 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4703 tg3_link_report(tp);
4704 return true;
4707 return false;
4710 static void tg3_clear_mac_status(struct tg3 *tp)
4712 tw32(MAC_EVENT, 0);
4714 tw32_f(MAC_STATUS,
4715 MAC_STATUS_SYNC_CHANGED |
4716 MAC_STATUS_CFG_CHANGED |
4717 MAC_STATUS_MI_COMPLETION |
4718 MAC_STATUS_LNKSTATE_CHANGED);
4719 udelay(40);
4722 static void tg3_setup_eee(struct tg3 *tp)
4724 u32 val;
4726 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4727 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4728 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4729 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4731 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4733 tw32_f(TG3_CPMU_EEE_CTRL,
4734 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4736 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4737 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4738 TG3_CPMU_EEEMD_LPI_IN_RX |
4739 TG3_CPMU_EEEMD_EEE_ENABLE;
4741 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4742 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4744 if (tg3_flag(tp, ENABLE_APE))
4745 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4747 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4749 tw32_f(TG3_CPMU_EEE_DBTMR1,
4750 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4751 (tp->eee.tx_lpi_timer & 0xffff));
4753 tw32_f(TG3_CPMU_EEE_DBTMR2,
4754 TG3_CPMU_DBTMR2_APE_TX_2047US |
4755 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4758 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4760 bool current_link_up;
4761 u32 bmsr, val;
4762 u32 lcl_adv, rmt_adv;
4763 u16 current_speed;
4764 u8 current_duplex;
4765 int i, err;
4767 tg3_clear_mac_status(tp);
4769 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4770 tw32_f(MAC_MI_MODE,
4771 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4772 udelay(80);
4775 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4777 /* Some third-party PHYs need to be reset on link going
4778 * down.
4780 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4781 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4782 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4783 tp->link_up) {
4784 tg3_readphy(tp, MII_BMSR, &bmsr);
4785 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4786 !(bmsr & BMSR_LSTATUS))
4787 force_reset = true;
4789 if (force_reset)
4790 tg3_phy_reset(tp);
4792 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4793 tg3_readphy(tp, MII_BMSR, &bmsr);
4794 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4795 !tg3_flag(tp, INIT_COMPLETE))
4796 bmsr = 0;
4798 if (!(bmsr & BMSR_LSTATUS)) {
4799 err = tg3_init_5401phy_dsp(tp);
4800 if (err)
4801 return err;
4803 tg3_readphy(tp, MII_BMSR, &bmsr);
4804 for (i = 0; i < 1000; i++) {
4805 udelay(10);
4806 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4807 (bmsr & BMSR_LSTATUS)) {
4808 udelay(40);
4809 break;
4813 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4814 TG3_PHY_REV_BCM5401_B0 &&
4815 !(bmsr & BMSR_LSTATUS) &&
4816 tp->link_config.active_speed == SPEED_1000) {
4817 err = tg3_phy_reset(tp);
4818 if (!err)
4819 err = tg3_init_5401phy_dsp(tp);
4820 if (err)
4821 return err;
4824 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4825 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4826 /* 5701 {A0,B0} CRC bug workaround */
4827 tg3_writephy(tp, 0x15, 0x0a75);
4828 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4829 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4830 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4833 /* Clear pending interrupts... */
4834 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4835 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4837 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4838 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4839 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4840 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4842 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4843 tg3_asic_rev(tp) == ASIC_REV_5701) {
4844 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4845 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4846 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4847 else
4848 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4851 current_link_up = false;
4852 current_speed = SPEED_UNKNOWN;
4853 current_duplex = DUPLEX_UNKNOWN;
4854 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4855 tp->link_config.rmt_adv = 0;
4857 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4858 err = tg3_phy_auxctl_read(tp,
4859 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4860 &val);
4861 if (!err && !(val & (1 << 10))) {
4862 tg3_phy_auxctl_write(tp,
4863 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4864 val | (1 << 10));
4865 goto relink;
4869 bmsr = 0;
4870 for (i = 0; i < 100; i++) {
4871 tg3_readphy(tp, MII_BMSR, &bmsr);
4872 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4873 (bmsr & BMSR_LSTATUS))
4874 break;
4875 udelay(40);
4878 if (bmsr & BMSR_LSTATUS) {
4879 u32 aux_stat, bmcr;
4881 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4882 for (i = 0; i < 2000; i++) {
4883 udelay(10);
4884 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4885 aux_stat)
4886 break;
4889 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4890 &current_speed,
4891 &current_duplex);
4893 bmcr = 0;
4894 for (i = 0; i < 200; i++) {
4895 tg3_readphy(tp, MII_BMCR, &bmcr);
4896 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4897 continue;
4898 if (bmcr && bmcr != 0x7fff)
4899 break;
4900 udelay(10);
4903 lcl_adv = 0;
4904 rmt_adv = 0;
4906 tp->link_config.active_speed = current_speed;
4907 tp->link_config.active_duplex = current_duplex;
4909 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4910 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4912 if ((bmcr & BMCR_ANENABLE) &&
4913 eee_config_ok &&
4914 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4915 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4916 current_link_up = true;
4918 /* EEE settings changes take effect only after a phy
4919 * reset. If we have skipped a reset due to Link Flap
4920 * Avoidance being enabled, do it now.
4922 if (!eee_config_ok &&
4923 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4924 !force_reset) {
4925 tg3_setup_eee(tp);
4926 tg3_phy_reset(tp);
4928 } else {
4929 if (!(bmcr & BMCR_ANENABLE) &&
4930 tp->link_config.speed == current_speed &&
4931 tp->link_config.duplex == current_duplex) {
4932 current_link_up = true;
4936 if (current_link_up &&
4937 tp->link_config.active_duplex == DUPLEX_FULL) {
4938 u32 reg, bit;
4940 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4941 reg = MII_TG3_FET_GEN_STAT;
4942 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4943 } else {
4944 reg = MII_TG3_EXT_STAT;
4945 bit = MII_TG3_EXT_STAT_MDIX;
4948 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4949 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4951 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4955 relink:
4956 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4957 tg3_phy_copper_begin(tp);
4959 if (tg3_flag(tp, ROBOSWITCH)) {
4960 current_link_up = true;
4961 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4962 current_speed = SPEED_1000;
4963 current_duplex = DUPLEX_FULL;
4964 tp->link_config.active_speed = current_speed;
4965 tp->link_config.active_duplex = current_duplex;
4968 tg3_readphy(tp, MII_BMSR, &bmsr);
4969 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4970 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4971 current_link_up = true;
4974 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4975 if (current_link_up) {
4976 if (tp->link_config.active_speed == SPEED_100 ||
4977 tp->link_config.active_speed == SPEED_10)
4978 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4979 else
4980 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4981 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4982 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4983 else
4984 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4986 /* In order for the 5750 core in BCM4785 chip to work properly
4987 * in RGMII mode, the Led Control Register must be set up.
4989 if (tg3_flag(tp, RGMII_MODE)) {
4990 u32 led_ctrl = tr32(MAC_LED_CTRL);
4991 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4993 if (tp->link_config.active_speed == SPEED_10)
4994 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4995 else if (tp->link_config.active_speed == SPEED_100)
4996 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4997 LED_CTRL_100MBPS_ON);
4998 else if (tp->link_config.active_speed == SPEED_1000)
4999 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5000 LED_CTRL_1000MBPS_ON);
5002 tw32(MAC_LED_CTRL, led_ctrl);
5003 udelay(40);
5006 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5007 if (tp->link_config.active_duplex == DUPLEX_HALF)
5008 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5010 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5011 if (current_link_up &&
5012 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5013 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5014 else
5015 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5018 /* ??? Without this setting Netgear GA302T PHY does not
5019 * ??? send/receive packets...
5021 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5022 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5023 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5024 tw32_f(MAC_MI_MODE, tp->mi_mode);
5025 udelay(80);
5028 tw32_f(MAC_MODE, tp->mac_mode);
5029 udelay(40);
5031 tg3_phy_eee_adjust(tp, current_link_up);
5033 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5034 /* Polled via timer. */
5035 tw32_f(MAC_EVENT, 0);
5036 } else {
5037 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5039 udelay(40);
5041 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5042 current_link_up &&
5043 tp->link_config.active_speed == SPEED_1000 &&
5044 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5045 udelay(120);
5046 tw32_f(MAC_STATUS,
5047 (MAC_STATUS_SYNC_CHANGED |
5048 MAC_STATUS_CFG_CHANGED));
5049 udelay(40);
5050 tg3_write_mem(tp,
5051 NIC_SRAM_FIRMWARE_MBOX,
5052 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5055 /* Prevent send BD corruption. */
5056 if (tg3_flag(tp, CLKREQ_BUG)) {
5057 if (tp->link_config.active_speed == SPEED_100 ||
5058 tp->link_config.active_speed == SPEED_10)
5059 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5060 PCI_EXP_LNKCTL_CLKREQ_EN);
5061 else
5062 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5063 PCI_EXP_LNKCTL_CLKREQ_EN);
5066 tg3_test_and_report_link_chg(tp, current_link_up);
5068 return 0;
5071 struct tg3_fiber_aneginfo {
5072 int state;
5073 #define ANEG_STATE_UNKNOWN 0
5074 #define ANEG_STATE_AN_ENABLE 1
5075 #define ANEG_STATE_RESTART_INIT 2
5076 #define ANEG_STATE_RESTART 3
5077 #define ANEG_STATE_DISABLE_LINK_OK 4
5078 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5079 #define ANEG_STATE_ABILITY_DETECT 6
5080 #define ANEG_STATE_ACK_DETECT_INIT 7
5081 #define ANEG_STATE_ACK_DETECT 8
5082 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5083 #define ANEG_STATE_COMPLETE_ACK 10
5084 #define ANEG_STATE_IDLE_DETECT_INIT 11
5085 #define ANEG_STATE_IDLE_DETECT 12
5086 #define ANEG_STATE_LINK_OK 13
5087 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5088 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5090 u32 flags;
5091 #define MR_AN_ENABLE 0x00000001
5092 #define MR_RESTART_AN 0x00000002
5093 #define MR_AN_COMPLETE 0x00000004
5094 #define MR_PAGE_RX 0x00000008
5095 #define MR_NP_LOADED 0x00000010
5096 #define MR_TOGGLE_TX 0x00000020
5097 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5098 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5099 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5100 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5101 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5102 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5103 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5104 #define MR_TOGGLE_RX 0x00002000
5105 #define MR_NP_RX 0x00004000
5107 #define MR_LINK_OK 0x80000000
5109 unsigned long link_time, cur_time;
5111 u32 ability_match_cfg;
5112 int ability_match_count;
5114 char ability_match, idle_match, ack_match;
5116 u32 txconfig, rxconfig;
5117 #define ANEG_CFG_NP 0x00000080
5118 #define ANEG_CFG_ACK 0x00000040
5119 #define ANEG_CFG_RF2 0x00000020
5120 #define ANEG_CFG_RF1 0x00000010
5121 #define ANEG_CFG_PS2 0x00000001
5122 #define ANEG_CFG_PS1 0x00008000
5123 #define ANEG_CFG_HD 0x00004000
5124 #define ANEG_CFG_FD 0x00002000
5125 #define ANEG_CFG_INVAL 0x00001f06
5128 #define ANEG_OK 0
5129 #define ANEG_DONE 1
5130 #define ANEG_TIMER_ENAB 2
5131 #define ANEG_FAILED -1
5133 #define ANEG_STATE_SETTLE_TIME 10000
5135 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5136 struct tg3_fiber_aneginfo *ap)
5138 u16 flowctrl;
5139 unsigned long delta;
5140 u32 rx_cfg_reg;
5141 int ret;
5143 if (ap->state == ANEG_STATE_UNKNOWN) {
5144 ap->rxconfig = 0;
5145 ap->link_time = 0;
5146 ap->cur_time = 0;
5147 ap->ability_match_cfg = 0;
5148 ap->ability_match_count = 0;
5149 ap->ability_match = 0;
5150 ap->idle_match = 0;
5151 ap->ack_match = 0;
5153 ap->cur_time++;
5155 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5156 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5158 if (rx_cfg_reg != ap->ability_match_cfg) {
5159 ap->ability_match_cfg = rx_cfg_reg;
5160 ap->ability_match = 0;
5161 ap->ability_match_count = 0;
5162 } else {
5163 if (++ap->ability_match_count > 1) {
5164 ap->ability_match = 1;
5165 ap->ability_match_cfg = rx_cfg_reg;
5168 if (rx_cfg_reg & ANEG_CFG_ACK)
5169 ap->ack_match = 1;
5170 else
5171 ap->ack_match = 0;
5173 ap->idle_match = 0;
5174 } else {
5175 ap->idle_match = 1;
5176 ap->ability_match_cfg = 0;
5177 ap->ability_match_count = 0;
5178 ap->ability_match = 0;
5179 ap->ack_match = 0;
5181 rx_cfg_reg = 0;
5184 ap->rxconfig = rx_cfg_reg;
5185 ret = ANEG_OK;
5187 switch (ap->state) {
5188 case ANEG_STATE_UNKNOWN:
5189 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5190 ap->state = ANEG_STATE_AN_ENABLE;
5192 /* fallthru */
5193 case ANEG_STATE_AN_ENABLE:
5194 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5195 if (ap->flags & MR_AN_ENABLE) {
5196 ap->link_time = 0;
5197 ap->cur_time = 0;
5198 ap->ability_match_cfg = 0;
5199 ap->ability_match_count = 0;
5200 ap->ability_match = 0;
5201 ap->idle_match = 0;
5202 ap->ack_match = 0;
5204 ap->state = ANEG_STATE_RESTART_INIT;
5205 } else {
5206 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5208 break;
5210 case ANEG_STATE_RESTART_INIT:
5211 ap->link_time = ap->cur_time;
5212 ap->flags &= ~(MR_NP_LOADED);
5213 ap->txconfig = 0;
5214 tw32(MAC_TX_AUTO_NEG, 0);
5215 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5216 tw32_f(MAC_MODE, tp->mac_mode);
5217 udelay(40);
5219 ret = ANEG_TIMER_ENAB;
5220 ap->state = ANEG_STATE_RESTART;
5222 /* fallthru */
5223 case ANEG_STATE_RESTART:
5224 delta = ap->cur_time - ap->link_time;
5225 if (delta > ANEG_STATE_SETTLE_TIME)
5226 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5227 else
5228 ret = ANEG_TIMER_ENAB;
5229 break;
5231 case ANEG_STATE_DISABLE_LINK_OK:
5232 ret = ANEG_DONE;
5233 break;
5235 case ANEG_STATE_ABILITY_DETECT_INIT:
5236 ap->flags &= ~(MR_TOGGLE_TX);
5237 ap->txconfig = ANEG_CFG_FD;
5238 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5239 if (flowctrl & ADVERTISE_1000XPAUSE)
5240 ap->txconfig |= ANEG_CFG_PS1;
5241 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5242 ap->txconfig |= ANEG_CFG_PS2;
5243 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5244 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5245 tw32_f(MAC_MODE, tp->mac_mode);
5246 udelay(40);
5248 ap->state = ANEG_STATE_ABILITY_DETECT;
5249 break;
5251 case ANEG_STATE_ABILITY_DETECT:
5252 if (ap->ability_match != 0 && ap->rxconfig != 0)
5253 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5254 break;
5256 case ANEG_STATE_ACK_DETECT_INIT:
5257 ap->txconfig |= ANEG_CFG_ACK;
5258 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5259 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5260 tw32_f(MAC_MODE, tp->mac_mode);
5261 udelay(40);
5263 ap->state = ANEG_STATE_ACK_DETECT;
5265 /* fallthru */
5266 case ANEG_STATE_ACK_DETECT:
5267 if (ap->ack_match != 0) {
5268 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5269 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5270 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5271 } else {
5272 ap->state = ANEG_STATE_AN_ENABLE;
5274 } else if (ap->ability_match != 0 &&
5275 ap->rxconfig == 0) {
5276 ap->state = ANEG_STATE_AN_ENABLE;
5278 break;
5280 case ANEG_STATE_COMPLETE_ACK_INIT:
5281 if (ap->rxconfig & ANEG_CFG_INVAL) {
5282 ret = ANEG_FAILED;
5283 break;
5285 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5286 MR_LP_ADV_HALF_DUPLEX |
5287 MR_LP_ADV_SYM_PAUSE |
5288 MR_LP_ADV_ASYM_PAUSE |
5289 MR_LP_ADV_REMOTE_FAULT1 |
5290 MR_LP_ADV_REMOTE_FAULT2 |
5291 MR_LP_ADV_NEXT_PAGE |
5292 MR_TOGGLE_RX |
5293 MR_NP_RX);
5294 if (ap->rxconfig & ANEG_CFG_FD)
5295 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5296 if (ap->rxconfig & ANEG_CFG_HD)
5297 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5298 if (ap->rxconfig & ANEG_CFG_PS1)
5299 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5300 if (ap->rxconfig & ANEG_CFG_PS2)
5301 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5302 if (ap->rxconfig & ANEG_CFG_RF1)
5303 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5304 if (ap->rxconfig & ANEG_CFG_RF2)
5305 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5306 if (ap->rxconfig & ANEG_CFG_NP)
5307 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5309 ap->link_time = ap->cur_time;
5311 ap->flags ^= (MR_TOGGLE_TX);
5312 if (ap->rxconfig & 0x0008)
5313 ap->flags |= MR_TOGGLE_RX;
5314 if (ap->rxconfig & ANEG_CFG_NP)
5315 ap->flags |= MR_NP_RX;
5316 ap->flags |= MR_PAGE_RX;
5318 ap->state = ANEG_STATE_COMPLETE_ACK;
5319 ret = ANEG_TIMER_ENAB;
5320 break;
5322 case ANEG_STATE_COMPLETE_ACK:
5323 if (ap->ability_match != 0 &&
5324 ap->rxconfig == 0) {
5325 ap->state = ANEG_STATE_AN_ENABLE;
5326 break;
5328 delta = ap->cur_time - ap->link_time;
5329 if (delta > ANEG_STATE_SETTLE_TIME) {
5330 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5331 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5332 } else {
5333 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5334 !(ap->flags & MR_NP_RX)) {
5335 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5336 } else {
5337 ret = ANEG_FAILED;
5341 break;
5343 case ANEG_STATE_IDLE_DETECT_INIT:
5344 ap->link_time = ap->cur_time;
5345 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5346 tw32_f(MAC_MODE, tp->mac_mode);
5347 udelay(40);
5349 ap->state = ANEG_STATE_IDLE_DETECT;
5350 ret = ANEG_TIMER_ENAB;
5351 break;
5353 case ANEG_STATE_IDLE_DETECT:
5354 if (ap->ability_match != 0 &&
5355 ap->rxconfig == 0) {
5356 ap->state = ANEG_STATE_AN_ENABLE;
5357 break;
5359 delta = ap->cur_time - ap->link_time;
5360 if (delta > ANEG_STATE_SETTLE_TIME) {
5361 /* XXX another gem from the Broadcom driver :( */
5362 ap->state = ANEG_STATE_LINK_OK;
5364 break;
5366 case ANEG_STATE_LINK_OK:
5367 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5368 ret = ANEG_DONE;
5369 break;
5371 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5372 /* ??? unimplemented */
5373 break;
5375 case ANEG_STATE_NEXT_PAGE_WAIT:
5376 /* ??? unimplemented */
5377 break;
5379 default:
5380 ret = ANEG_FAILED;
5381 break;
5384 return ret;
5387 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5389 int res = 0;
5390 struct tg3_fiber_aneginfo aninfo;
5391 int status = ANEG_FAILED;
5392 unsigned int tick;
5393 u32 tmp;
5395 tw32_f(MAC_TX_AUTO_NEG, 0);
5397 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5398 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5399 udelay(40);
5401 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5402 udelay(40);
5404 memset(&aninfo, 0, sizeof(aninfo));
5405 aninfo.flags |= MR_AN_ENABLE;
5406 aninfo.state = ANEG_STATE_UNKNOWN;
5407 aninfo.cur_time = 0;
5408 tick = 0;
5409 while (++tick < 195000) {
5410 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5411 if (status == ANEG_DONE || status == ANEG_FAILED)
5412 break;
5414 udelay(1);
5417 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5418 tw32_f(MAC_MODE, tp->mac_mode);
5419 udelay(40);
5421 *txflags = aninfo.txconfig;
5422 *rxflags = aninfo.flags;
5424 if (status == ANEG_DONE &&
5425 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5426 MR_LP_ADV_FULL_DUPLEX)))
5427 res = 1;
5429 return res;
5432 static void tg3_init_bcm8002(struct tg3 *tp)
5434 u32 mac_status = tr32(MAC_STATUS);
5435 int i;
5437 /* Reset when initting first time or we have a link. */
5438 if (tg3_flag(tp, INIT_COMPLETE) &&
5439 !(mac_status & MAC_STATUS_PCS_SYNCED))
5440 return;
5442 /* Set PLL lock range. */
5443 tg3_writephy(tp, 0x16, 0x8007);
5445 /* SW reset */
5446 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5448 /* Wait for reset to complete. */
5449 /* XXX schedule_timeout() ... */
5450 for (i = 0; i < 500; i++)
5451 udelay(10);
5453 /* Config mode; select PMA/Ch 1 regs. */
5454 tg3_writephy(tp, 0x10, 0x8411);
5456 /* Enable auto-lock and comdet, select txclk for tx. */
5457 tg3_writephy(tp, 0x11, 0x0a10);
5459 tg3_writephy(tp, 0x18, 0x00a0);
5460 tg3_writephy(tp, 0x16, 0x41ff);
5462 /* Assert and deassert POR. */
5463 tg3_writephy(tp, 0x13, 0x0400);
5464 udelay(40);
5465 tg3_writephy(tp, 0x13, 0x0000);
5467 tg3_writephy(tp, 0x11, 0x0a50);
5468 udelay(40);
5469 tg3_writephy(tp, 0x11, 0x0a10);
5471 /* Wait for signal to stabilize */
5472 /* XXX schedule_timeout() ... */
5473 for (i = 0; i < 15000; i++)
5474 udelay(10);
5476 /* Deselect the channel register so we can read the PHYID
5477 * later.
5479 tg3_writephy(tp, 0x10, 0x8011);
5482 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5484 u16 flowctrl;
5485 bool current_link_up;
5486 u32 sg_dig_ctrl, sg_dig_status;
5487 u32 serdes_cfg, expected_sg_dig_ctrl;
5488 int workaround, port_a;
5490 serdes_cfg = 0;
5491 expected_sg_dig_ctrl = 0;
5492 workaround = 0;
5493 port_a = 1;
5494 current_link_up = false;
5496 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5497 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5498 workaround = 1;
5499 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5500 port_a = 0;
5502 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5503 /* preserve bits 20-23 for voltage regulator */
5504 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5507 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5509 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5510 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5511 if (workaround) {
5512 u32 val = serdes_cfg;
5514 if (port_a)
5515 val |= 0xc010000;
5516 else
5517 val |= 0x4010000;
5518 tw32_f(MAC_SERDES_CFG, val);
5521 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5523 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5524 tg3_setup_flow_control(tp, 0, 0);
5525 current_link_up = true;
5527 goto out;
5530 /* Want auto-negotiation. */
5531 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5533 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5534 if (flowctrl & ADVERTISE_1000XPAUSE)
5535 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5536 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5537 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5539 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5540 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5541 tp->serdes_counter &&
5542 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5543 MAC_STATUS_RCVD_CFG)) ==
5544 MAC_STATUS_PCS_SYNCED)) {
5545 tp->serdes_counter--;
5546 current_link_up = true;
5547 goto out;
5549 restart_autoneg:
5550 if (workaround)
5551 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5552 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5553 udelay(5);
5554 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5556 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5557 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5558 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5559 MAC_STATUS_SIGNAL_DET)) {
5560 sg_dig_status = tr32(SG_DIG_STATUS);
5561 mac_status = tr32(MAC_STATUS);
5563 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5564 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5565 u32 local_adv = 0, remote_adv = 0;
5567 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5568 local_adv |= ADVERTISE_1000XPAUSE;
5569 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5570 local_adv |= ADVERTISE_1000XPSE_ASYM;
5572 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5573 remote_adv |= LPA_1000XPAUSE;
5574 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5575 remote_adv |= LPA_1000XPAUSE_ASYM;
5577 tp->link_config.rmt_adv =
5578 mii_adv_to_ethtool_adv_x(remote_adv);
5580 tg3_setup_flow_control(tp, local_adv, remote_adv);
5581 current_link_up = true;
5582 tp->serdes_counter = 0;
5583 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5584 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5585 if (tp->serdes_counter)
5586 tp->serdes_counter--;
5587 else {
5588 if (workaround) {
5589 u32 val = serdes_cfg;
5591 if (port_a)
5592 val |= 0xc010000;
5593 else
5594 val |= 0x4010000;
5596 tw32_f(MAC_SERDES_CFG, val);
5599 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5600 udelay(40);
5602 /* Link parallel detection - link is up */
5603 /* only if we have PCS_SYNC and not */
5604 /* receiving config code words */
5605 mac_status = tr32(MAC_STATUS);
5606 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5607 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5608 tg3_setup_flow_control(tp, 0, 0);
5609 current_link_up = true;
5610 tp->phy_flags |=
5611 TG3_PHYFLG_PARALLEL_DETECT;
5612 tp->serdes_counter =
5613 SERDES_PARALLEL_DET_TIMEOUT;
5614 } else
5615 goto restart_autoneg;
5618 } else {
5619 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5620 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5623 out:
5624 return current_link_up;
5627 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5629 bool current_link_up = false;
5631 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5632 goto out;
5634 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5635 u32 txflags, rxflags;
5636 int i;
5638 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5639 u32 local_adv = 0, remote_adv = 0;
5641 if (txflags & ANEG_CFG_PS1)
5642 local_adv |= ADVERTISE_1000XPAUSE;
5643 if (txflags & ANEG_CFG_PS2)
5644 local_adv |= ADVERTISE_1000XPSE_ASYM;
5646 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5647 remote_adv |= LPA_1000XPAUSE;
5648 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5649 remote_adv |= LPA_1000XPAUSE_ASYM;
5651 tp->link_config.rmt_adv =
5652 mii_adv_to_ethtool_adv_x(remote_adv);
5654 tg3_setup_flow_control(tp, local_adv, remote_adv);
5656 current_link_up = true;
5658 for (i = 0; i < 30; i++) {
5659 udelay(20);
5660 tw32_f(MAC_STATUS,
5661 (MAC_STATUS_SYNC_CHANGED |
5662 MAC_STATUS_CFG_CHANGED));
5663 udelay(40);
5664 if ((tr32(MAC_STATUS) &
5665 (MAC_STATUS_SYNC_CHANGED |
5666 MAC_STATUS_CFG_CHANGED)) == 0)
5667 break;
5670 mac_status = tr32(MAC_STATUS);
5671 if (!current_link_up &&
5672 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5673 !(mac_status & MAC_STATUS_RCVD_CFG))
5674 current_link_up = true;
5675 } else {
5676 tg3_setup_flow_control(tp, 0, 0);
5678 /* Forcing 1000FD link up. */
5679 current_link_up = true;
5681 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5682 udelay(40);
5684 tw32_f(MAC_MODE, tp->mac_mode);
5685 udelay(40);
5688 out:
5689 return current_link_up;
5692 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5694 u32 orig_pause_cfg;
5695 u16 orig_active_speed;
5696 u8 orig_active_duplex;
5697 u32 mac_status;
5698 bool current_link_up;
5699 int i;
5701 orig_pause_cfg = tp->link_config.active_flowctrl;
5702 orig_active_speed = tp->link_config.active_speed;
5703 orig_active_duplex = tp->link_config.active_duplex;
5705 if (!tg3_flag(tp, HW_AUTONEG) &&
5706 tp->link_up &&
5707 tg3_flag(tp, INIT_COMPLETE)) {
5708 mac_status = tr32(MAC_STATUS);
5709 mac_status &= (MAC_STATUS_PCS_SYNCED |
5710 MAC_STATUS_SIGNAL_DET |
5711 MAC_STATUS_CFG_CHANGED |
5712 MAC_STATUS_RCVD_CFG);
5713 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5714 MAC_STATUS_SIGNAL_DET)) {
5715 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5716 MAC_STATUS_CFG_CHANGED));
5717 return 0;
5721 tw32_f(MAC_TX_AUTO_NEG, 0);
5723 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5724 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5725 tw32_f(MAC_MODE, tp->mac_mode);
5726 udelay(40);
5728 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5729 tg3_init_bcm8002(tp);
5731 /* Enable link change event even when serdes polling. */
5732 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5733 udelay(40);
5735 current_link_up = false;
5736 tp->link_config.rmt_adv = 0;
5737 mac_status = tr32(MAC_STATUS);
5739 if (tg3_flag(tp, HW_AUTONEG))
5740 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5741 else
5742 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5744 tp->napi[0].hw_status->status =
5745 (SD_STATUS_UPDATED |
5746 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5748 for (i = 0; i < 100; i++) {
5749 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5750 MAC_STATUS_CFG_CHANGED));
5751 udelay(5);
5752 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5753 MAC_STATUS_CFG_CHANGED |
5754 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5755 break;
5758 mac_status = tr32(MAC_STATUS);
5759 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5760 current_link_up = false;
5761 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5762 tp->serdes_counter == 0) {
5763 tw32_f(MAC_MODE, (tp->mac_mode |
5764 MAC_MODE_SEND_CONFIGS));
5765 udelay(1);
5766 tw32_f(MAC_MODE, tp->mac_mode);
5770 if (current_link_up) {
5771 tp->link_config.active_speed = SPEED_1000;
5772 tp->link_config.active_duplex = DUPLEX_FULL;
5773 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5774 LED_CTRL_LNKLED_OVERRIDE |
5775 LED_CTRL_1000MBPS_ON));
5776 } else {
5777 tp->link_config.active_speed = SPEED_UNKNOWN;
5778 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5779 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5780 LED_CTRL_LNKLED_OVERRIDE |
5781 LED_CTRL_TRAFFIC_OVERRIDE));
5784 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5785 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5786 if (orig_pause_cfg != now_pause_cfg ||
5787 orig_active_speed != tp->link_config.active_speed ||
5788 orig_active_duplex != tp->link_config.active_duplex)
5789 tg3_link_report(tp);
5792 return 0;
5795 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5797 int err = 0;
5798 u32 bmsr, bmcr;
5799 u16 current_speed = SPEED_UNKNOWN;
5800 u8 current_duplex = DUPLEX_UNKNOWN;
5801 bool current_link_up = false;
5802 u32 local_adv, remote_adv, sgsr;
5804 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5805 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5806 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5807 (sgsr & SERDES_TG3_SGMII_MODE)) {
5809 if (force_reset)
5810 tg3_phy_reset(tp);
5812 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5814 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5815 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5816 } else {
5817 current_link_up = true;
5818 if (sgsr & SERDES_TG3_SPEED_1000) {
5819 current_speed = SPEED_1000;
5820 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5821 } else if (sgsr & SERDES_TG3_SPEED_100) {
5822 current_speed = SPEED_100;
5823 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5824 } else {
5825 current_speed = SPEED_10;
5826 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5829 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5830 current_duplex = DUPLEX_FULL;
5831 else
5832 current_duplex = DUPLEX_HALF;
5835 tw32_f(MAC_MODE, tp->mac_mode);
5836 udelay(40);
5838 tg3_clear_mac_status(tp);
5840 goto fiber_setup_done;
5843 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5844 tw32_f(MAC_MODE, tp->mac_mode);
5845 udelay(40);
5847 tg3_clear_mac_status(tp);
5849 if (force_reset)
5850 tg3_phy_reset(tp);
5852 tp->link_config.rmt_adv = 0;
5854 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5855 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5856 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5857 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5858 bmsr |= BMSR_LSTATUS;
5859 else
5860 bmsr &= ~BMSR_LSTATUS;
5863 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5865 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5866 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5867 /* do nothing, just check for link up at the end */
5868 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5869 u32 adv, newadv;
5871 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5872 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5873 ADVERTISE_1000XPAUSE |
5874 ADVERTISE_1000XPSE_ASYM |
5875 ADVERTISE_SLCT);
5877 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5878 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5880 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5881 tg3_writephy(tp, MII_ADVERTISE, newadv);
5882 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5883 tg3_writephy(tp, MII_BMCR, bmcr);
5885 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5886 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5887 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5889 return err;
5891 } else {
5892 u32 new_bmcr;
5894 bmcr &= ~BMCR_SPEED1000;
5895 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5897 if (tp->link_config.duplex == DUPLEX_FULL)
5898 new_bmcr |= BMCR_FULLDPLX;
5900 if (new_bmcr != bmcr) {
5901 /* BMCR_SPEED1000 is a reserved bit that needs
5902 * to be set on write.
5904 new_bmcr |= BMCR_SPEED1000;
5906 /* Force a linkdown */
5907 if (tp->link_up) {
5908 u32 adv;
5910 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5911 adv &= ~(ADVERTISE_1000XFULL |
5912 ADVERTISE_1000XHALF |
5913 ADVERTISE_SLCT);
5914 tg3_writephy(tp, MII_ADVERTISE, adv);
5915 tg3_writephy(tp, MII_BMCR, bmcr |
5916 BMCR_ANRESTART |
5917 BMCR_ANENABLE);
5918 udelay(10);
5919 tg3_carrier_off(tp);
5921 tg3_writephy(tp, MII_BMCR, new_bmcr);
5922 bmcr = new_bmcr;
5923 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5924 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5925 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5926 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5927 bmsr |= BMSR_LSTATUS;
5928 else
5929 bmsr &= ~BMSR_LSTATUS;
5931 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5935 if (bmsr & BMSR_LSTATUS) {
5936 current_speed = SPEED_1000;
5937 current_link_up = true;
5938 if (bmcr & BMCR_FULLDPLX)
5939 current_duplex = DUPLEX_FULL;
5940 else
5941 current_duplex = DUPLEX_HALF;
5943 local_adv = 0;
5944 remote_adv = 0;
5946 if (bmcr & BMCR_ANENABLE) {
5947 u32 common;
5949 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5950 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5951 common = local_adv & remote_adv;
5952 if (common & (ADVERTISE_1000XHALF |
5953 ADVERTISE_1000XFULL)) {
5954 if (common & ADVERTISE_1000XFULL)
5955 current_duplex = DUPLEX_FULL;
5956 else
5957 current_duplex = DUPLEX_HALF;
5959 tp->link_config.rmt_adv =
5960 mii_adv_to_ethtool_adv_x(remote_adv);
5961 } else if (!tg3_flag(tp, 5780_CLASS)) {
5962 /* Link is up via parallel detect */
5963 } else {
5964 current_link_up = false;
5969 fiber_setup_done:
5970 if (current_link_up && current_duplex == DUPLEX_FULL)
5971 tg3_setup_flow_control(tp, local_adv, remote_adv);
5973 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5974 if (tp->link_config.active_duplex == DUPLEX_HALF)
5975 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5977 tw32_f(MAC_MODE, tp->mac_mode);
5978 udelay(40);
5980 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5982 tp->link_config.active_speed = current_speed;
5983 tp->link_config.active_duplex = current_duplex;
5985 tg3_test_and_report_link_chg(tp, current_link_up);
5986 return err;
5989 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5991 if (tp->serdes_counter) {
5992 /* Give autoneg time to complete. */
5993 tp->serdes_counter--;
5994 return;
5997 if (!tp->link_up &&
5998 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5999 u32 bmcr;
6001 tg3_readphy(tp, MII_BMCR, &bmcr);
6002 if (bmcr & BMCR_ANENABLE) {
6003 u32 phy1, phy2;
6005 /* Select shadow register 0x1f */
6006 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6007 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6009 /* Select expansion interrupt status register */
6010 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6011 MII_TG3_DSP_EXP1_INT_STAT);
6012 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6013 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6015 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6016 /* We have signal detect and not receiving
6017 * config code words, link is up by parallel
6018 * detection.
6021 bmcr &= ~BMCR_ANENABLE;
6022 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6023 tg3_writephy(tp, MII_BMCR, bmcr);
6024 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6027 } else if (tp->link_up &&
6028 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6029 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6030 u32 phy2;
6032 /* Select expansion interrupt status register */
6033 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6034 MII_TG3_DSP_EXP1_INT_STAT);
6035 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6036 if (phy2 & 0x20) {
6037 u32 bmcr;
6039 /* Config code words received, turn on autoneg. */
6040 tg3_readphy(tp, MII_BMCR, &bmcr);
6041 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6043 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6049 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6051 u32 val;
6052 int err;
6054 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6055 err = tg3_setup_fiber_phy(tp, force_reset);
6056 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6057 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6058 else
6059 err = tg3_setup_copper_phy(tp, force_reset);
6061 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6062 u32 scale;
6064 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6065 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6066 scale = 65;
6067 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6068 scale = 6;
6069 else
6070 scale = 12;
6072 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6073 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6074 tw32(GRC_MISC_CFG, val);
6077 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6078 (6 << TX_LENGTHS_IPG_SHIFT);
6079 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6080 tg3_asic_rev(tp) == ASIC_REV_5762)
6081 val |= tr32(MAC_TX_LENGTHS) &
6082 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6083 TX_LENGTHS_CNT_DWN_VAL_MSK);
6085 if (tp->link_config.active_speed == SPEED_1000 &&
6086 tp->link_config.active_duplex == DUPLEX_HALF)
6087 tw32(MAC_TX_LENGTHS, val |
6088 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6089 else
6090 tw32(MAC_TX_LENGTHS, val |
6091 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6093 if (!tg3_flag(tp, 5705_PLUS)) {
6094 if (tp->link_up) {
6095 tw32(HOSTCC_STAT_COAL_TICKS,
6096 tp->coal.stats_block_coalesce_usecs);
6097 } else {
6098 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6102 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6103 val = tr32(PCIE_PWR_MGMT_THRESH);
6104 if (!tp->link_up)
6105 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6106 tp->pwrmgmt_thresh;
6107 else
6108 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6109 tw32(PCIE_PWR_MGMT_THRESH, val);
6112 return err;
6115 /* tp->lock must be held */
6116 static u64 tg3_refclk_read(struct tg3 *tp)
6118 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6119 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6122 /* tp->lock must be held */
6123 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6125 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6127 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6128 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6129 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6130 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6133 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6134 static inline void tg3_full_unlock(struct tg3 *tp);
6135 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6137 struct tg3 *tp = netdev_priv(dev);
6139 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6140 SOF_TIMESTAMPING_RX_SOFTWARE |
6141 SOF_TIMESTAMPING_SOFTWARE;
6143 if (tg3_flag(tp, PTP_CAPABLE)) {
6144 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6145 SOF_TIMESTAMPING_RX_HARDWARE |
6146 SOF_TIMESTAMPING_RAW_HARDWARE;
6149 if (tp->ptp_clock)
6150 info->phc_index = ptp_clock_index(tp->ptp_clock);
6151 else
6152 info->phc_index = -1;
6154 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6156 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6157 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6158 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6159 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6160 return 0;
6163 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6165 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6166 bool neg_adj = false;
6167 u32 correction = 0;
6169 if (ppb < 0) {
6170 neg_adj = true;
6171 ppb = -ppb;
6174 /* Frequency adjustment is performed using hardware with a 24 bit
6175 * accumulator and a programmable correction value. On each clk, the
6176 * correction value gets added to the accumulator and when it
6177 * overflows, the time counter is incremented/decremented.
6179 * So conversion from ppb to correction value is
6180 * ppb * (1 << 24) / 1000000000
6182 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6183 TG3_EAV_REF_CLK_CORRECT_MASK;
6185 tg3_full_lock(tp, 0);
6187 if (correction)
6188 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6189 TG3_EAV_REF_CLK_CORRECT_EN |
6190 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6191 else
6192 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6194 tg3_full_unlock(tp);
6196 return 0;
6199 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6201 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6203 tg3_full_lock(tp, 0);
6204 tp->ptp_adjust += delta;
6205 tg3_full_unlock(tp);
6207 return 0;
6210 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6212 u64 ns;
6213 u32 remainder;
6214 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6216 tg3_full_lock(tp, 0);
6217 ns = tg3_refclk_read(tp);
6218 ns += tp->ptp_adjust;
6219 tg3_full_unlock(tp);
6221 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6222 ts->tv_nsec = remainder;
6224 return 0;
6227 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6228 const struct timespec *ts)
6230 u64 ns;
6231 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6233 ns = timespec_to_ns(ts);
6235 tg3_full_lock(tp, 0);
6236 tg3_refclk_write(tp, ns);
6237 tp->ptp_adjust = 0;
6238 tg3_full_unlock(tp);
6240 return 0;
6243 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6244 struct ptp_clock_request *rq, int on)
6246 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6247 u32 clock_ctl;
6248 int rval = 0;
6250 switch (rq->type) {
6251 case PTP_CLK_REQ_PEROUT:
6252 if (rq->perout.index != 0)
6253 return -EINVAL;
6255 tg3_full_lock(tp, 0);
6256 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6257 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6259 if (on) {
6260 u64 nsec;
6262 nsec = rq->perout.start.sec * 1000000000ULL +
6263 rq->perout.start.nsec;
6265 if (rq->perout.period.sec || rq->perout.period.nsec) {
6266 netdev_warn(tp->dev,
6267 "Device supports only a one-shot timesync output, period must be 0\n");
6268 rval = -EINVAL;
6269 goto err_out;
6272 if (nsec & (1ULL << 63)) {
6273 netdev_warn(tp->dev,
6274 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6275 rval = -EINVAL;
6276 goto err_out;
6279 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6280 tw32(TG3_EAV_WATCHDOG0_MSB,
6281 TG3_EAV_WATCHDOG0_EN |
6282 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6284 tw32(TG3_EAV_REF_CLCK_CTL,
6285 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6286 } else {
6287 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6288 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6291 err_out:
6292 tg3_full_unlock(tp);
6293 return rval;
6295 default:
6296 break;
6299 return -EOPNOTSUPP;
6302 static const struct ptp_clock_info tg3_ptp_caps = {
6303 .owner = THIS_MODULE,
6304 .name = "tg3 clock",
6305 .max_adj = 250000000,
6306 .n_alarm = 0,
6307 .n_ext_ts = 0,
6308 .n_per_out = 1,
6309 .pps = 0,
6310 .adjfreq = tg3_ptp_adjfreq,
6311 .adjtime = tg3_ptp_adjtime,
6312 .gettime = tg3_ptp_gettime,
6313 .settime = tg3_ptp_settime,
6314 .enable = tg3_ptp_enable,
6317 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6318 struct skb_shared_hwtstamps *timestamp)
6320 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6321 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6322 tp->ptp_adjust);
6325 /* tp->lock must be held */
6326 static void tg3_ptp_init(struct tg3 *tp)
6328 if (!tg3_flag(tp, PTP_CAPABLE))
6329 return;
6331 /* Initialize the hardware clock to the system time. */
6332 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6333 tp->ptp_adjust = 0;
6334 tp->ptp_info = tg3_ptp_caps;
6337 /* tp->lock must be held */
6338 static void tg3_ptp_resume(struct tg3 *tp)
6340 if (!tg3_flag(tp, PTP_CAPABLE))
6341 return;
6343 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6344 tp->ptp_adjust = 0;
6347 static void tg3_ptp_fini(struct tg3 *tp)
6349 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6350 return;
6352 ptp_clock_unregister(tp->ptp_clock);
6353 tp->ptp_clock = NULL;
6354 tp->ptp_adjust = 0;
6357 static inline int tg3_irq_sync(struct tg3 *tp)
6359 return tp->irq_sync;
6362 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6364 int i;
6366 dst = (u32 *)((u8 *)dst + off);
6367 for (i = 0; i < len; i += sizeof(u32))
6368 *dst++ = tr32(off + i);
6371 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6373 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6374 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6375 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6376 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6377 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6378 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6379 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6380 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6381 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6382 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6383 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6384 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6385 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6386 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6387 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6388 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6389 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6390 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6391 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6393 if (tg3_flag(tp, SUPPORT_MSIX))
6394 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6396 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6397 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6398 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6399 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6400 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6401 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6402 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6403 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6405 if (!tg3_flag(tp, 5705_PLUS)) {
6406 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6407 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6408 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6411 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6412 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6413 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6414 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6415 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6417 if (tg3_flag(tp, NVRAM))
6418 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6421 static void tg3_dump_state(struct tg3 *tp)
6423 int i;
6424 u32 *regs;
6426 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6427 if (!regs)
6428 return;
6430 if (tg3_flag(tp, PCI_EXPRESS)) {
6431 /* Read up to but not including private PCI registers */
6432 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6433 regs[i / sizeof(u32)] = tr32(i);
6434 } else
6435 tg3_dump_legacy_regs(tp, regs);
6437 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6438 if (!regs[i + 0] && !regs[i + 1] &&
6439 !regs[i + 2] && !regs[i + 3])
6440 continue;
6442 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6443 i * 4,
6444 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6447 kfree(regs);
6449 for (i = 0; i < tp->irq_cnt; i++) {
6450 struct tg3_napi *tnapi = &tp->napi[i];
6452 /* SW status block */
6453 netdev_err(tp->dev,
6454 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6456 tnapi->hw_status->status,
6457 tnapi->hw_status->status_tag,
6458 tnapi->hw_status->rx_jumbo_consumer,
6459 tnapi->hw_status->rx_consumer,
6460 tnapi->hw_status->rx_mini_consumer,
6461 tnapi->hw_status->idx[0].rx_producer,
6462 tnapi->hw_status->idx[0].tx_consumer);
6464 netdev_err(tp->dev,
6465 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6467 tnapi->last_tag, tnapi->last_irq_tag,
6468 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6469 tnapi->rx_rcb_ptr,
6470 tnapi->prodring.rx_std_prod_idx,
6471 tnapi->prodring.rx_std_cons_idx,
6472 tnapi->prodring.rx_jmb_prod_idx,
6473 tnapi->prodring.rx_jmb_cons_idx);
6477 /* This is called whenever we suspect that the system chipset is re-
6478 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6479 * is bogus tx completions. We try to recover by setting the
6480 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6481 * in the workqueue.
6483 static void tg3_tx_recover(struct tg3 *tp)
6485 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6486 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6488 netdev_warn(tp->dev,
6489 "The system may be re-ordering memory-mapped I/O "
6490 "cycles to the network device, attempting to recover. "
6491 "Please report the problem to the driver maintainer "
6492 "and include system chipset information.\n");
6494 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6497 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6499 /* Tell compiler to fetch tx indices from memory. */
6500 barrier();
6501 return tnapi->tx_pending -
6502 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6505 /* Tigon3 never reports partial packet sends. So we do not
6506 * need special logic to handle SKBs that have not had all
6507 * of their frags sent yet, like SunGEM does.
6509 static void tg3_tx(struct tg3_napi *tnapi)
6511 struct tg3 *tp = tnapi->tp;
6512 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6513 u32 sw_idx = tnapi->tx_cons;
6514 struct netdev_queue *txq;
6515 int index = tnapi - tp->napi;
6516 unsigned int pkts_compl = 0, bytes_compl = 0;
6518 if (tg3_flag(tp, ENABLE_TSS))
6519 index--;
6521 txq = netdev_get_tx_queue(tp->dev, index);
6523 while (sw_idx != hw_idx) {
6524 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6525 struct sk_buff *skb = ri->skb;
6526 int i, tx_bug = 0;
6528 if (unlikely(skb == NULL)) {
6529 tg3_tx_recover(tp);
6530 return;
6533 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6534 struct skb_shared_hwtstamps timestamp;
6535 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6536 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6538 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6540 skb_tstamp_tx(skb, &timestamp);
6543 pci_unmap_single(tp->pdev,
6544 dma_unmap_addr(ri, mapping),
6545 skb_headlen(skb),
6546 PCI_DMA_TODEVICE);
6548 ri->skb = NULL;
6550 while (ri->fragmented) {
6551 ri->fragmented = false;
6552 sw_idx = NEXT_TX(sw_idx);
6553 ri = &tnapi->tx_buffers[sw_idx];
6556 sw_idx = NEXT_TX(sw_idx);
6558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6559 ri = &tnapi->tx_buffers[sw_idx];
6560 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6561 tx_bug = 1;
6563 pci_unmap_page(tp->pdev,
6564 dma_unmap_addr(ri, mapping),
6565 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6566 PCI_DMA_TODEVICE);
6568 while (ri->fragmented) {
6569 ri->fragmented = false;
6570 sw_idx = NEXT_TX(sw_idx);
6571 ri = &tnapi->tx_buffers[sw_idx];
6574 sw_idx = NEXT_TX(sw_idx);
6577 pkts_compl++;
6578 bytes_compl += skb->len;
6580 dev_kfree_skb(skb);
6582 if (unlikely(tx_bug)) {
6583 tg3_tx_recover(tp);
6584 return;
6588 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6590 tnapi->tx_cons = sw_idx;
6592 /* Need to make the tx_cons update visible to tg3_start_xmit()
6593 * before checking for netif_queue_stopped(). Without the
6594 * memory barrier, there is a small possibility that tg3_start_xmit()
6595 * will miss it and cause the queue to be stopped forever.
6597 smp_mb();
6599 if (unlikely(netif_tx_queue_stopped(txq) &&
6600 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6601 __netif_tx_lock(txq, smp_processor_id());
6602 if (netif_tx_queue_stopped(txq) &&
6603 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6604 netif_tx_wake_queue(txq);
6605 __netif_tx_unlock(txq);
6609 static void tg3_frag_free(bool is_frag, void *data)
6611 if (is_frag)
6612 put_page(virt_to_head_page(data));
6613 else
6614 kfree(data);
6617 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6619 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6620 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6622 if (!ri->data)
6623 return;
6625 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6626 map_sz, PCI_DMA_FROMDEVICE);
6627 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6628 ri->data = NULL;
6632 /* Returns size of skb allocated or < 0 on error.
6634 * We only need to fill in the address because the other members
6635 * of the RX descriptor are invariant, see tg3_init_rings.
6637 * Note the purposeful assymetry of cpu vs. chip accesses. For
6638 * posting buffers we only dirty the first cache line of the RX
6639 * descriptor (containing the address). Whereas for the RX status
6640 * buffers the cpu only reads the last cacheline of the RX descriptor
6641 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6643 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6644 u32 opaque_key, u32 dest_idx_unmasked,
6645 unsigned int *frag_size)
6647 struct tg3_rx_buffer_desc *desc;
6648 struct ring_info *map;
6649 u8 *data;
6650 dma_addr_t mapping;
6651 int skb_size, data_size, dest_idx;
6653 switch (opaque_key) {
6654 case RXD_OPAQUE_RING_STD:
6655 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6656 desc = &tpr->rx_std[dest_idx];
6657 map = &tpr->rx_std_buffers[dest_idx];
6658 data_size = tp->rx_pkt_map_sz;
6659 break;
6661 case RXD_OPAQUE_RING_JUMBO:
6662 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6663 desc = &tpr->rx_jmb[dest_idx].std;
6664 map = &tpr->rx_jmb_buffers[dest_idx];
6665 data_size = TG3_RX_JMB_MAP_SZ;
6666 break;
6668 default:
6669 return -EINVAL;
6672 /* Do not overwrite any of the map or rp information
6673 * until we are sure we can commit to a new buffer.
6675 * Callers depend upon this behavior and assume that
6676 * we leave everything unchanged if we fail.
6678 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6679 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6680 if (skb_size <= PAGE_SIZE) {
6681 data = netdev_alloc_frag(skb_size);
6682 *frag_size = skb_size;
6683 } else {
6684 data = kmalloc(skb_size, GFP_ATOMIC);
6685 *frag_size = 0;
6687 if (!data)
6688 return -ENOMEM;
6690 mapping = pci_map_single(tp->pdev,
6691 data + TG3_RX_OFFSET(tp),
6692 data_size,
6693 PCI_DMA_FROMDEVICE);
6694 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6695 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6696 return -EIO;
6699 map->data = data;
6700 dma_unmap_addr_set(map, mapping, mapping);
6702 desc->addr_hi = ((u64)mapping >> 32);
6703 desc->addr_lo = ((u64)mapping & 0xffffffff);
6705 return data_size;
6708 /* We only need to move over in the address because the other
6709 * members of the RX descriptor are invariant. See notes above
6710 * tg3_alloc_rx_data for full details.
6712 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6713 struct tg3_rx_prodring_set *dpr,
6714 u32 opaque_key, int src_idx,
6715 u32 dest_idx_unmasked)
6717 struct tg3 *tp = tnapi->tp;
6718 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6719 struct ring_info *src_map, *dest_map;
6720 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6721 int dest_idx;
6723 switch (opaque_key) {
6724 case RXD_OPAQUE_RING_STD:
6725 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6726 dest_desc = &dpr->rx_std[dest_idx];
6727 dest_map = &dpr->rx_std_buffers[dest_idx];
6728 src_desc = &spr->rx_std[src_idx];
6729 src_map = &spr->rx_std_buffers[src_idx];
6730 break;
6732 case RXD_OPAQUE_RING_JUMBO:
6733 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6734 dest_desc = &dpr->rx_jmb[dest_idx].std;
6735 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6736 src_desc = &spr->rx_jmb[src_idx].std;
6737 src_map = &spr->rx_jmb_buffers[src_idx];
6738 break;
6740 default:
6741 return;
6744 dest_map->data = src_map->data;
6745 dma_unmap_addr_set(dest_map, mapping,
6746 dma_unmap_addr(src_map, mapping));
6747 dest_desc->addr_hi = src_desc->addr_hi;
6748 dest_desc->addr_lo = src_desc->addr_lo;
6750 /* Ensure that the update to the skb happens after the physical
6751 * addresses have been transferred to the new BD location.
6753 smp_wmb();
6755 src_map->data = NULL;
6758 /* The RX ring scheme is composed of multiple rings which post fresh
6759 * buffers to the chip, and one special ring the chip uses to report
6760 * status back to the host.
6762 * The special ring reports the status of received packets to the
6763 * host. The chip does not write into the original descriptor the
6764 * RX buffer was obtained from. The chip simply takes the original
6765 * descriptor as provided by the host, updates the status and length
6766 * field, then writes this into the next status ring entry.
6768 * Each ring the host uses to post buffers to the chip is described
6769 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6770 * it is first placed into the on-chip ram. When the packet's length
6771 * is known, it walks down the TG3_BDINFO entries to select the ring.
6772 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6773 * which is within the range of the new packet's length is chosen.
6775 * The "separate ring for rx status" scheme may sound queer, but it makes
6776 * sense from a cache coherency perspective. If only the host writes
6777 * to the buffer post rings, and only the chip writes to the rx status
6778 * rings, then cache lines never move beyond shared-modified state.
6779 * If both the host and chip were to write into the same ring, cache line
6780 * eviction could occur since both entities want it in an exclusive state.
6782 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6784 struct tg3 *tp = tnapi->tp;
6785 u32 work_mask, rx_std_posted = 0;
6786 u32 std_prod_idx, jmb_prod_idx;
6787 u32 sw_idx = tnapi->rx_rcb_ptr;
6788 u16 hw_idx;
6789 int received;
6790 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6792 hw_idx = *(tnapi->rx_rcb_prod_idx);
6794 * We need to order the read of hw_idx and the read of
6795 * the opaque cookie.
6797 rmb();
6798 work_mask = 0;
6799 received = 0;
6800 std_prod_idx = tpr->rx_std_prod_idx;
6801 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6802 while (sw_idx != hw_idx && budget > 0) {
6803 struct ring_info *ri;
6804 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6805 unsigned int len;
6806 struct sk_buff *skb;
6807 dma_addr_t dma_addr;
6808 u32 opaque_key, desc_idx, *post_ptr;
6809 u8 *data;
6810 u64 tstamp = 0;
6812 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6813 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6814 if (opaque_key == RXD_OPAQUE_RING_STD) {
6815 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6816 dma_addr = dma_unmap_addr(ri, mapping);
6817 data = ri->data;
6818 post_ptr = &std_prod_idx;
6819 rx_std_posted++;
6820 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6821 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6822 dma_addr = dma_unmap_addr(ri, mapping);
6823 data = ri->data;
6824 post_ptr = &jmb_prod_idx;
6825 } else
6826 goto next_pkt_nopost;
6828 work_mask |= opaque_key;
6830 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6831 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6832 drop_it:
6833 tg3_recycle_rx(tnapi, tpr, opaque_key,
6834 desc_idx, *post_ptr);
6835 drop_it_no_recycle:
6836 /* Other statistics kept track of by card. */
6837 tp->rx_dropped++;
6838 goto next_pkt;
6841 prefetch(data + TG3_RX_OFFSET(tp));
6842 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6843 ETH_FCS_LEN;
6845 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6846 RXD_FLAG_PTPSTAT_PTPV1 ||
6847 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6848 RXD_FLAG_PTPSTAT_PTPV2) {
6849 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6850 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6853 if (len > TG3_RX_COPY_THRESH(tp)) {
6854 int skb_size;
6855 unsigned int frag_size;
6857 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6858 *post_ptr, &frag_size);
6859 if (skb_size < 0)
6860 goto drop_it;
6862 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6863 PCI_DMA_FROMDEVICE);
6865 /* Ensure that the update to the data happens
6866 * after the usage of the old DMA mapping.
6868 smp_wmb();
6870 ri->data = NULL;
6872 skb = build_skb(data, frag_size);
6873 if (!skb) {
6874 tg3_frag_free(frag_size != 0, data);
6875 goto drop_it_no_recycle;
6877 skb_reserve(skb, TG3_RX_OFFSET(tp));
6878 } else {
6879 tg3_recycle_rx(tnapi, tpr, opaque_key,
6880 desc_idx, *post_ptr);
6882 skb = netdev_alloc_skb(tp->dev,
6883 len + TG3_RAW_IP_ALIGN);
6884 if (skb == NULL)
6885 goto drop_it_no_recycle;
6887 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6888 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6889 memcpy(skb->data,
6890 data + TG3_RX_OFFSET(tp),
6891 len);
6892 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6895 skb_put(skb, len);
6896 if (tstamp)
6897 tg3_hwclock_to_timestamp(tp, tstamp,
6898 skb_hwtstamps(skb));
6900 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6901 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6902 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6903 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6904 skb->ip_summed = CHECKSUM_UNNECESSARY;
6905 else
6906 skb_checksum_none_assert(skb);
6908 skb->protocol = eth_type_trans(skb, tp->dev);
6910 if (len > (tp->dev->mtu + ETH_HLEN) &&
6911 skb->protocol != htons(ETH_P_8021Q)) {
6912 dev_kfree_skb(skb);
6913 goto drop_it_no_recycle;
6916 if (desc->type_flags & RXD_FLAG_VLAN &&
6917 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6918 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6919 desc->err_vlan & RXD_VLAN_MASK);
6921 napi_gro_receive(&tnapi->napi, skb);
6923 received++;
6924 budget--;
6926 next_pkt:
6927 (*post_ptr)++;
6929 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6930 tpr->rx_std_prod_idx = std_prod_idx &
6931 tp->rx_std_ring_mask;
6932 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6933 tpr->rx_std_prod_idx);
6934 work_mask &= ~RXD_OPAQUE_RING_STD;
6935 rx_std_posted = 0;
6937 next_pkt_nopost:
6938 sw_idx++;
6939 sw_idx &= tp->rx_ret_ring_mask;
6941 /* Refresh hw_idx to see if there is new work */
6942 if (sw_idx == hw_idx) {
6943 hw_idx = *(tnapi->rx_rcb_prod_idx);
6944 rmb();
6948 /* ACK the status ring. */
6949 tnapi->rx_rcb_ptr = sw_idx;
6950 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6952 /* Refill RX ring(s). */
6953 if (!tg3_flag(tp, ENABLE_RSS)) {
6954 /* Sync BD data before updating mailbox */
6955 wmb();
6957 if (work_mask & RXD_OPAQUE_RING_STD) {
6958 tpr->rx_std_prod_idx = std_prod_idx &
6959 tp->rx_std_ring_mask;
6960 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6961 tpr->rx_std_prod_idx);
6963 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6964 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6965 tp->rx_jmb_ring_mask;
6966 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6967 tpr->rx_jmb_prod_idx);
6969 mmiowb();
6970 } else if (work_mask) {
6971 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6972 * updated before the producer indices can be updated.
6974 smp_wmb();
6976 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6977 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6979 if (tnapi != &tp->napi[1]) {
6980 tp->rx_refill = true;
6981 napi_schedule(&tp->napi[1].napi);
6985 return received;
6988 static void tg3_poll_link(struct tg3 *tp)
6990 /* handle link change and other phy events */
6991 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6992 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6994 if (sblk->status & SD_STATUS_LINK_CHG) {
6995 sblk->status = SD_STATUS_UPDATED |
6996 (sblk->status & ~SD_STATUS_LINK_CHG);
6997 spin_lock(&tp->lock);
6998 if (tg3_flag(tp, USE_PHYLIB)) {
6999 tw32_f(MAC_STATUS,
7000 (MAC_STATUS_SYNC_CHANGED |
7001 MAC_STATUS_CFG_CHANGED |
7002 MAC_STATUS_MI_COMPLETION |
7003 MAC_STATUS_LNKSTATE_CHANGED));
7004 udelay(40);
7005 } else
7006 tg3_setup_phy(tp, false);
7007 spin_unlock(&tp->lock);
7012 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7013 struct tg3_rx_prodring_set *dpr,
7014 struct tg3_rx_prodring_set *spr)
7016 u32 si, di, cpycnt, src_prod_idx;
7017 int i, err = 0;
7019 while (1) {
7020 src_prod_idx = spr->rx_std_prod_idx;
7022 /* Make sure updates to the rx_std_buffers[] entries and the
7023 * standard producer index are seen in the correct order.
7025 smp_rmb();
7027 if (spr->rx_std_cons_idx == src_prod_idx)
7028 break;
7030 if (spr->rx_std_cons_idx < src_prod_idx)
7031 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7032 else
7033 cpycnt = tp->rx_std_ring_mask + 1 -
7034 spr->rx_std_cons_idx;
7036 cpycnt = min(cpycnt,
7037 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7039 si = spr->rx_std_cons_idx;
7040 di = dpr->rx_std_prod_idx;
7042 for (i = di; i < di + cpycnt; i++) {
7043 if (dpr->rx_std_buffers[i].data) {
7044 cpycnt = i - di;
7045 err = -ENOSPC;
7046 break;
7050 if (!cpycnt)
7051 break;
7053 /* Ensure that updates to the rx_std_buffers ring and the
7054 * shadowed hardware producer ring from tg3_recycle_skb() are
7055 * ordered correctly WRT the skb check above.
7057 smp_rmb();
7059 memcpy(&dpr->rx_std_buffers[di],
7060 &spr->rx_std_buffers[si],
7061 cpycnt * sizeof(struct ring_info));
7063 for (i = 0; i < cpycnt; i++, di++, si++) {
7064 struct tg3_rx_buffer_desc *sbd, *dbd;
7065 sbd = &spr->rx_std[si];
7066 dbd = &dpr->rx_std[di];
7067 dbd->addr_hi = sbd->addr_hi;
7068 dbd->addr_lo = sbd->addr_lo;
7071 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7072 tp->rx_std_ring_mask;
7073 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7074 tp->rx_std_ring_mask;
7077 while (1) {
7078 src_prod_idx = spr->rx_jmb_prod_idx;
7080 /* Make sure updates to the rx_jmb_buffers[] entries and
7081 * the jumbo producer index are seen in the correct order.
7083 smp_rmb();
7085 if (spr->rx_jmb_cons_idx == src_prod_idx)
7086 break;
7088 if (spr->rx_jmb_cons_idx < src_prod_idx)
7089 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7090 else
7091 cpycnt = tp->rx_jmb_ring_mask + 1 -
7092 spr->rx_jmb_cons_idx;
7094 cpycnt = min(cpycnt,
7095 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7097 si = spr->rx_jmb_cons_idx;
7098 di = dpr->rx_jmb_prod_idx;
7100 for (i = di; i < di + cpycnt; i++) {
7101 if (dpr->rx_jmb_buffers[i].data) {
7102 cpycnt = i - di;
7103 err = -ENOSPC;
7104 break;
7108 if (!cpycnt)
7109 break;
7111 /* Ensure that updates to the rx_jmb_buffers ring and the
7112 * shadowed hardware producer ring from tg3_recycle_skb() are
7113 * ordered correctly WRT the skb check above.
7115 smp_rmb();
7117 memcpy(&dpr->rx_jmb_buffers[di],
7118 &spr->rx_jmb_buffers[si],
7119 cpycnt * sizeof(struct ring_info));
7121 for (i = 0; i < cpycnt; i++, di++, si++) {
7122 struct tg3_rx_buffer_desc *sbd, *dbd;
7123 sbd = &spr->rx_jmb[si].std;
7124 dbd = &dpr->rx_jmb[di].std;
7125 dbd->addr_hi = sbd->addr_hi;
7126 dbd->addr_lo = sbd->addr_lo;
7129 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7130 tp->rx_jmb_ring_mask;
7131 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7132 tp->rx_jmb_ring_mask;
7135 return err;
7138 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7140 struct tg3 *tp = tnapi->tp;
7142 /* run TX completion thread */
7143 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7144 tg3_tx(tnapi);
7145 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7146 return work_done;
7149 if (!tnapi->rx_rcb_prod_idx)
7150 return work_done;
7152 /* run RX thread, within the bounds set by NAPI.
7153 * All RX "locking" is done by ensuring outside
7154 * code synchronizes with tg3->napi.poll()
7156 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7157 work_done += tg3_rx(tnapi, budget - work_done);
7159 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7160 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7161 int i, err = 0;
7162 u32 std_prod_idx = dpr->rx_std_prod_idx;
7163 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7165 tp->rx_refill = false;
7166 for (i = 1; i <= tp->rxq_cnt; i++)
7167 err |= tg3_rx_prodring_xfer(tp, dpr,
7168 &tp->napi[i].prodring);
7170 wmb();
7172 if (std_prod_idx != dpr->rx_std_prod_idx)
7173 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7174 dpr->rx_std_prod_idx);
7176 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7177 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7178 dpr->rx_jmb_prod_idx);
7180 mmiowb();
7182 if (err)
7183 tw32_f(HOSTCC_MODE, tp->coal_now);
7186 return work_done;
7189 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7191 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7192 schedule_work(&tp->reset_task);
7195 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7197 cancel_work_sync(&tp->reset_task);
7198 tg3_flag_clear(tp, RESET_TASK_PENDING);
7199 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7202 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7204 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7205 struct tg3 *tp = tnapi->tp;
7206 int work_done = 0;
7207 struct tg3_hw_status *sblk = tnapi->hw_status;
7209 while (1) {
7210 work_done = tg3_poll_work(tnapi, work_done, budget);
7212 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7213 goto tx_recovery;
7215 if (unlikely(work_done >= budget))
7216 break;
7218 /* tp->last_tag is used in tg3_int_reenable() below
7219 * to tell the hw how much work has been processed,
7220 * so we must read it before checking for more work.
7222 tnapi->last_tag = sblk->status_tag;
7223 tnapi->last_irq_tag = tnapi->last_tag;
7224 rmb();
7226 /* check for RX/TX work to do */
7227 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7228 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7230 /* This test here is not race free, but will reduce
7231 * the number of interrupts by looping again.
7233 if (tnapi == &tp->napi[1] && tp->rx_refill)
7234 continue;
7236 napi_complete(napi);
7237 /* Reenable interrupts. */
7238 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7240 /* This test here is synchronized by napi_schedule()
7241 * and napi_complete() to close the race condition.
7243 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7244 tw32(HOSTCC_MODE, tp->coalesce_mode |
7245 HOSTCC_MODE_ENABLE |
7246 tnapi->coal_now);
7248 mmiowb();
7249 break;
7253 return work_done;
7255 tx_recovery:
7256 /* work_done is guaranteed to be less than budget. */
7257 napi_complete(napi);
7258 tg3_reset_task_schedule(tp);
7259 return work_done;
7262 static void tg3_process_error(struct tg3 *tp)
7264 u32 val;
7265 bool real_error = false;
7267 if (tg3_flag(tp, ERROR_PROCESSED))
7268 return;
7270 /* Check Flow Attention register */
7271 val = tr32(HOSTCC_FLOW_ATTN);
7272 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7273 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7274 real_error = true;
7277 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7278 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7279 real_error = true;
7282 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7283 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7284 real_error = true;
7287 if (!real_error)
7288 return;
7290 tg3_dump_state(tp);
7292 tg3_flag_set(tp, ERROR_PROCESSED);
7293 tg3_reset_task_schedule(tp);
7296 static int tg3_poll(struct napi_struct *napi, int budget)
7298 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7299 struct tg3 *tp = tnapi->tp;
7300 int work_done = 0;
7301 struct tg3_hw_status *sblk = tnapi->hw_status;
7303 while (1) {
7304 if (sblk->status & SD_STATUS_ERROR)
7305 tg3_process_error(tp);
7307 tg3_poll_link(tp);
7309 work_done = tg3_poll_work(tnapi, work_done, budget);
7311 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7312 goto tx_recovery;
7314 if (unlikely(work_done >= budget))
7315 break;
7317 if (tg3_flag(tp, TAGGED_STATUS)) {
7318 /* tp->last_tag is used in tg3_int_reenable() below
7319 * to tell the hw how much work has been processed,
7320 * so we must read it before checking for more work.
7322 tnapi->last_tag = sblk->status_tag;
7323 tnapi->last_irq_tag = tnapi->last_tag;
7324 rmb();
7325 } else
7326 sblk->status &= ~SD_STATUS_UPDATED;
7328 if (likely(!tg3_has_work(tnapi))) {
7329 napi_complete(napi);
7330 tg3_int_reenable(tnapi);
7331 break;
7335 return work_done;
7337 tx_recovery:
7338 /* work_done is guaranteed to be less than budget. */
7339 napi_complete(napi);
7340 tg3_reset_task_schedule(tp);
7341 return work_done;
7344 static void tg3_napi_disable(struct tg3 *tp)
7346 int i;
7348 for (i = tp->irq_cnt - 1; i >= 0; i--)
7349 napi_disable(&tp->napi[i].napi);
7352 static void tg3_napi_enable(struct tg3 *tp)
7354 int i;
7356 for (i = 0; i < tp->irq_cnt; i++)
7357 napi_enable(&tp->napi[i].napi);
7360 static void tg3_napi_init(struct tg3 *tp)
7362 int i;
7364 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7365 for (i = 1; i < tp->irq_cnt; i++)
7366 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7369 static void tg3_napi_fini(struct tg3 *tp)
7371 int i;
7373 for (i = 0; i < tp->irq_cnt; i++)
7374 netif_napi_del(&tp->napi[i].napi);
7377 static inline void tg3_netif_stop(struct tg3 *tp)
7379 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7380 tg3_napi_disable(tp);
7381 netif_carrier_off(tp->dev);
7382 netif_tx_disable(tp->dev);
7385 /* tp->lock must be held */
7386 static inline void tg3_netif_start(struct tg3 *tp)
7388 tg3_ptp_resume(tp);
7390 /* NOTE: unconditional netif_tx_wake_all_queues is only
7391 * appropriate so long as all callers are assured to
7392 * have free tx slots (such as after tg3_init_hw)
7394 netif_tx_wake_all_queues(tp->dev);
7396 if (tp->link_up)
7397 netif_carrier_on(tp->dev);
7399 tg3_napi_enable(tp);
7400 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7401 tg3_enable_ints(tp);
7404 static void tg3_irq_quiesce(struct tg3 *tp)
7406 int i;
7408 BUG_ON(tp->irq_sync);
7410 tp->irq_sync = 1;
7411 smp_mb();
7413 for (i = 0; i < tp->irq_cnt; i++)
7414 synchronize_irq(tp->napi[i].irq_vec);
7417 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7418 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7419 * with as well. Most of the time, this is not necessary except when
7420 * shutting down the device.
7422 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7424 spin_lock_bh(&tp->lock);
7425 if (irq_sync)
7426 tg3_irq_quiesce(tp);
7429 static inline void tg3_full_unlock(struct tg3 *tp)
7431 spin_unlock_bh(&tp->lock);
7434 /* One-shot MSI handler - Chip automatically disables interrupt
7435 * after sending MSI so driver doesn't have to do it.
7437 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7439 struct tg3_napi *tnapi = dev_id;
7440 struct tg3 *tp = tnapi->tp;
7442 prefetch(tnapi->hw_status);
7443 if (tnapi->rx_rcb)
7444 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7446 if (likely(!tg3_irq_sync(tp)))
7447 napi_schedule(&tnapi->napi);
7449 return IRQ_HANDLED;
7452 /* MSI ISR - No need to check for interrupt sharing and no need to
7453 * flush status block and interrupt mailbox. PCI ordering rules
7454 * guarantee that MSI will arrive after the status block.
7456 static irqreturn_t tg3_msi(int irq, void *dev_id)
7458 struct tg3_napi *tnapi = dev_id;
7459 struct tg3 *tp = tnapi->tp;
7461 prefetch(tnapi->hw_status);
7462 if (tnapi->rx_rcb)
7463 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7465 * Writing any value to intr-mbox-0 clears PCI INTA# and
7466 * chip-internal interrupt pending events.
7467 * Writing non-zero to intr-mbox-0 additional tells the
7468 * NIC to stop sending us irqs, engaging "in-intr-handler"
7469 * event coalescing.
7471 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7472 if (likely(!tg3_irq_sync(tp)))
7473 napi_schedule(&tnapi->napi);
7475 return IRQ_RETVAL(1);
7478 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7480 struct tg3_napi *tnapi = dev_id;
7481 struct tg3 *tp = tnapi->tp;
7482 struct tg3_hw_status *sblk = tnapi->hw_status;
7483 unsigned int handled = 1;
7485 /* In INTx mode, it is possible for the interrupt to arrive at
7486 * the CPU before the status block posted prior to the interrupt.
7487 * Reading the PCI State register will confirm whether the
7488 * interrupt is ours and will flush the status block.
7490 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7491 if (tg3_flag(tp, CHIP_RESETTING) ||
7492 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7493 handled = 0;
7494 goto out;
7499 * Writing any value to intr-mbox-0 clears PCI INTA# and
7500 * chip-internal interrupt pending events.
7501 * Writing non-zero to intr-mbox-0 additional tells the
7502 * NIC to stop sending us irqs, engaging "in-intr-handler"
7503 * event coalescing.
7505 * Flush the mailbox to de-assert the IRQ immediately to prevent
7506 * spurious interrupts. The flush impacts performance but
7507 * excessive spurious interrupts can be worse in some cases.
7509 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7510 if (tg3_irq_sync(tp))
7511 goto out;
7512 sblk->status &= ~SD_STATUS_UPDATED;
7513 if (likely(tg3_has_work(tnapi))) {
7514 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7515 napi_schedule(&tnapi->napi);
7516 } else {
7517 /* No work, shared interrupt perhaps? re-enable
7518 * interrupts, and flush that PCI write
7520 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7521 0x00000000);
7523 out:
7524 return IRQ_RETVAL(handled);
7527 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7529 struct tg3_napi *tnapi = dev_id;
7530 struct tg3 *tp = tnapi->tp;
7531 struct tg3_hw_status *sblk = tnapi->hw_status;
7532 unsigned int handled = 1;
7534 /* In INTx mode, it is possible for the interrupt to arrive at
7535 * the CPU before the status block posted prior to the interrupt.
7536 * Reading the PCI State register will confirm whether the
7537 * interrupt is ours and will flush the status block.
7539 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7540 if (tg3_flag(tp, CHIP_RESETTING) ||
7541 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7542 handled = 0;
7543 goto out;
7548 * writing any value to intr-mbox-0 clears PCI INTA# and
7549 * chip-internal interrupt pending events.
7550 * writing non-zero to intr-mbox-0 additional tells the
7551 * NIC to stop sending us irqs, engaging "in-intr-handler"
7552 * event coalescing.
7554 * Flush the mailbox to de-assert the IRQ immediately to prevent
7555 * spurious interrupts. The flush impacts performance but
7556 * excessive spurious interrupts can be worse in some cases.
7558 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7561 * In a shared interrupt configuration, sometimes other devices'
7562 * interrupts will scream. We record the current status tag here
7563 * so that the above check can report that the screaming interrupts
7564 * are unhandled. Eventually they will be silenced.
7566 tnapi->last_irq_tag = sblk->status_tag;
7568 if (tg3_irq_sync(tp))
7569 goto out;
7571 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7573 napi_schedule(&tnapi->napi);
7575 out:
7576 return IRQ_RETVAL(handled);
7579 /* ISR for interrupt test */
7580 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7582 struct tg3_napi *tnapi = dev_id;
7583 struct tg3 *tp = tnapi->tp;
7584 struct tg3_hw_status *sblk = tnapi->hw_status;
7586 if ((sblk->status & SD_STATUS_UPDATED) ||
7587 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7588 tg3_disable_ints(tp);
7589 return IRQ_RETVAL(1);
7591 return IRQ_RETVAL(0);
7594 #ifdef CONFIG_NET_POLL_CONTROLLER
7595 static void tg3_poll_controller(struct net_device *dev)
7597 int i;
7598 struct tg3 *tp = netdev_priv(dev);
7600 if (tg3_irq_sync(tp))
7601 return;
7603 for (i = 0; i < tp->irq_cnt; i++)
7604 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7606 #endif
7608 static void tg3_tx_timeout(struct net_device *dev)
7610 struct tg3 *tp = netdev_priv(dev);
7612 if (netif_msg_tx_err(tp)) {
7613 netdev_err(dev, "transmit timed out, resetting\n");
7614 tg3_dump_state(tp);
7617 tg3_reset_task_schedule(tp);
7620 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7621 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7623 u32 base = (u32) mapping & 0xffffffff;
7625 return (base > 0xffffdcc0) && (base + len + 8 < base);
7628 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7629 * of any 4GB boundaries: 4G, 8G, etc
7631 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7632 u32 len, u32 mss)
7634 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7635 u32 base = (u32) mapping & 0xffffffff;
7637 return ((base + len + (mss & 0x3fff)) < base);
7639 return 0;
7642 /* Test for DMA addresses > 40-bit */
7643 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7644 int len)
7646 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7647 if (tg3_flag(tp, 40BIT_DMA_BUG))
7648 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7649 return 0;
7650 #else
7651 return 0;
7652 #endif
7655 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7656 dma_addr_t mapping, u32 len, u32 flags,
7657 u32 mss, u32 vlan)
7659 txbd->addr_hi = ((u64) mapping >> 32);
7660 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7661 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7662 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7665 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7666 dma_addr_t map, u32 len, u32 flags,
7667 u32 mss, u32 vlan)
7669 struct tg3 *tp = tnapi->tp;
7670 bool hwbug = false;
7672 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7673 hwbug = true;
7675 if (tg3_4g_overflow_test(map, len))
7676 hwbug = true;
7678 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7679 hwbug = true;
7681 if (tg3_40bit_overflow_test(tp, map, len))
7682 hwbug = true;
7684 if (tp->dma_limit) {
7685 u32 prvidx = *entry;
7686 u32 tmp_flag = flags & ~TXD_FLAG_END;
7687 while (len > tp->dma_limit && *budget) {
7688 u32 frag_len = tp->dma_limit;
7689 len -= tp->dma_limit;
7691 /* Avoid the 8byte DMA problem */
7692 if (len <= 8) {
7693 len += tp->dma_limit / 2;
7694 frag_len = tp->dma_limit / 2;
7697 tnapi->tx_buffers[*entry].fragmented = true;
7699 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7700 frag_len, tmp_flag, mss, vlan);
7701 *budget -= 1;
7702 prvidx = *entry;
7703 *entry = NEXT_TX(*entry);
7705 map += frag_len;
7708 if (len) {
7709 if (*budget) {
7710 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7711 len, flags, mss, vlan);
7712 *budget -= 1;
7713 *entry = NEXT_TX(*entry);
7714 } else {
7715 hwbug = true;
7716 tnapi->tx_buffers[prvidx].fragmented = false;
7719 } else {
7720 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7721 len, flags, mss, vlan);
7722 *entry = NEXT_TX(*entry);
7725 return hwbug;
7728 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7730 int i;
7731 struct sk_buff *skb;
7732 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7734 skb = txb->skb;
7735 txb->skb = NULL;
7737 pci_unmap_single(tnapi->tp->pdev,
7738 dma_unmap_addr(txb, mapping),
7739 skb_headlen(skb),
7740 PCI_DMA_TODEVICE);
7742 while (txb->fragmented) {
7743 txb->fragmented = false;
7744 entry = NEXT_TX(entry);
7745 txb = &tnapi->tx_buffers[entry];
7748 for (i = 0; i <= last; i++) {
7749 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7751 entry = NEXT_TX(entry);
7752 txb = &tnapi->tx_buffers[entry];
7754 pci_unmap_page(tnapi->tp->pdev,
7755 dma_unmap_addr(txb, mapping),
7756 skb_frag_size(frag), PCI_DMA_TODEVICE);
7758 while (txb->fragmented) {
7759 txb->fragmented = false;
7760 entry = NEXT_TX(entry);
7761 txb = &tnapi->tx_buffers[entry];
7766 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7767 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7768 struct sk_buff **pskb,
7769 u32 *entry, u32 *budget,
7770 u32 base_flags, u32 mss, u32 vlan)
7772 struct tg3 *tp = tnapi->tp;
7773 struct sk_buff *new_skb, *skb = *pskb;
7774 dma_addr_t new_addr = 0;
7775 int ret = 0;
7777 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7778 new_skb = skb_copy(skb, GFP_ATOMIC);
7779 else {
7780 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7782 new_skb = skb_copy_expand(skb,
7783 skb_headroom(skb) + more_headroom,
7784 skb_tailroom(skb), GFP_ATOMIC);
7787 if (!new_skb) {
7788 ret = -1;
7789 } else {
7790 /* New SKB is guaranteed to be linear. */
7791 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7792 PCI_DMA_TODEVICE);
7793 /* Make sure the mapping succeeded */
7794 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7795 dev_kfree_skb(new_skb);
7796 ret = -1;
7797 } else {
7798 u32 save_entry = *entry;
7800 base_flags |= TXD_FLAG_END;
7802 tnapi->tx_buffers[*entry].skb = new_skb;
7803 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7804 mapping, new_addr);
7806 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7807 new_skb->len, base_flags,
7808 mss, vlan)) {
7809 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7810 dev_kfree_skb(new_skb);
7811 ret = -1;
7816 dev_kfree_skb(skb);
7817 *pskb = new_skb;
7818 return ret;
7821 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7823 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7824 * TSO header is greater than 80 bytes.
7826 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7828 struct sk_buff *segs, *nskb;
7829 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7831 /* Estimate the number of fragments in the worst case */
7832 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7833 netif_stop_queue(tp->dev);
7835 /* netif_tx_stop_queue() must be done before checking
7836 * checking tx index in tg3_tx_avail() below, because in
7837 * tg3_tx(), we update tx index before checking for
7838 * netif_tx_queue_stopped().
7840 smp_mb();
7841 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7842 return NETDEV_TX_BUSY;
7844 netif_wake_queue(tp->dev);
7847 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7848 if (IS_ERR(segs))
7849 goto tg3_tso_bug_end;
7851 do {
7852 nskb = segs;
7853 segs = segs->next;
7854 nskb->next = NULL;
7855 tg3_start_xmit(nskb, tp->dev);
7856 } while (segs);
7858 tg3_tso_bug_end:
7859 dev_kfree_skb(skb);
7861 return NETDEV_TX_OK;
7864 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7865 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7867 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7869 struct tg3 *tp = netdev_priv(dev);
7870 u32 len, entry, base_flags, mss, vlan = 0;
7871 u32 budget;
7872 int i = -1, would_hit_hwbug;
7873 dma_addr_t mapping;
7874 struct tg3_napi *tnapi;
7875 struct netdev_queue *txq;
7876 unsigned int last;
7878 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7879 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7880 if (tg3_flag(tp, ENABLE_TSS))
7881 tnapi++;
7883 budget = tg3_tx_avail(tnapi);
7885 /* We are running in BH disabled context with netif_tx_lock
7886 * and TX reclaim runs via tp->napi.poll inside of a software
7887 * interrupt. Furthermore, IRQ processing runs lockless so we have
7888 * no IRQ context deadlocks to worry about either. Rejoice!
7890 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7891 if (!netif_tx_queue_stopped(txq)) {
7892 netif_tx_stop_queue(txq);
7894 /* This is a hard error, log it. */
7895 netdev_err(dev,
7896 "BUG! Tx Ring full when queue awake!\n");
7898 return NETDEV_TX_BUSY;
7901 entry = tnapi->tx_prod;
7902 base_flags = 0;
7903 if (skb->ip_summed == CHECKSUM_PARTIAL)
7904 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7906 mss = skb_shinfo(skb)->gso_size;
7907 if (mss) {
7908 struct iphdr *iph;
7909 u32 tcp_opt_len, hdr_len;
7911 if (skb_header_cloned(skb) &&
7912 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7913 goto drop;
7915 iph = ip_hdr(skb);
7916 tcp_opt_len = tcp_optlen(skb);
7918 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7920 if (!skb_is_gso_v6(skb)) {
7921 iph->check = 0;
7922 iph->tot_len = htons(mss + hdr_len);
7925 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7926 tg3_flag(tp, TSO_BUG))
7927 return tg3_tso_bug(tp, skb);
7929 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7930 TXD_FLAG_CPU_POST_DMA);
7932 if (tg3_flag(tp, HW_TSO_1) ||
7933 tg3_flag(tp, HW_TSO_2) ||
7934 tg3_flag(tp, HW_TSO_3)) {
7935 tcp_hdr(skb)->check = 0;
7936 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7937 } else
7938 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7939 iph->daddr, 0,
7940 IPPROTO_TCP,
7943 if (tg3_flag(tp, HW_TSO_3)) {
7944 mss |= (hdr_len & 0xc) << 12;
7945 if (hdr_len & 0x10)
7946 base_flags |= 0x00000010;
7947 base_flags |= (hdr_len & 0x3e0) << 5;
7948 } else if (tg3_flag(tp, HW_TSO_2))
7949 mss |= hdr_len << 9;
7950 else if (tg3_flag(tp, HW_TSO_1) ||
7951 tg3_asic_rev(tp) == ASIC_REV_5705) {
7952 if (tcp_opt_len || iph->ihl > 5) {
7953 int tsflags;
7955 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7956 mss |= (tsflags << 11);
7958 } else {
7959 if (tcp_opt_len || iph->ihl > 5) {
7960 int tsflags;
7962 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7963 base_flags |= tsflags << 12;
7968 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7969 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7970 base_flags |= TXD_FLAG_JMB_PKT;
7972 if (vlan_tx_tag_present(skb)) {
7973 base_flags |= TXD_FLAG_VLAN;
7974 vlan = vlan_tx_tag_get(skb);
7977 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7978 tg3_flag(tp, TX_TSTAMP_EN)) {
7979 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7980 base_flags |= TXD_FLAG_HWTSTAMP;
7983 len = skb_headlen(skb);
7985 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7986 if (pci_dma_mapping_error(tp->pdev, mapping))
7987 goto drop;
7990 tnapi->tx_buffers[entry].skb = skb;
7991 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7993 would_hit_hwbug = 0;
7995 if (tg3_flag(tp, 5701_DMA_BUG))
7996 would_hit_hwbug = 1;
7998 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7999 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8000 mss, vlan)) {
8001 would_hit_hwbug = 1;
8002 } else if (skb_shinfo(skb)->nr_frags > 0) {
8003 u32 tmp_mss = mss;
8005 if (!tg3_flag(tp, HW_TSO_1) &&
8006 !tg3_flag(tp, HW_TSO_2) &&
8007 !tg3_flag(tp, HW_TSO_3))
8008 tmp_mss = 0;
8010 /* Now loop through additional data
8011 * fragments, and queue them.
8013 last = skb_shinfo(skb)->nr_frags - 1;
8014 for (i = 0; i <= last; i++) {
8015 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8017 len = skb_frag_size(frag);
8018 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8019 len, DMA_TO_DEVICE);
8021 tnapi->tx_buffers[entry].skb = NULL;
8022 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8023 mapping);
8024 if (dma_mapping_error(&tp->pdev->dev, mapping))
8025 goto dma_error;
8027 if (!budget ||
8028 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8029 len, base_flags |
8030 ((i == last) ? TXD_FLAG_END : 0),
8031 tmp_mss, vlan)) {
8032 would_hit_hwbug = 1;
8033 break;
8038 if (would_hit_hwbug) {
8039 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8041 /* If the workaround fails due to memory/mapping
8042 * failure, silently drop this packet.
8044 entry = tnapi->tx_prod;
8045 budget = tg3_tx_avail(tnapi);
8046 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8047 base_flags, mss, vlan))
8048 goto drop_nofree;
8051 skb_tx_timestamp(skb);
8052 netdev_tx_sent_queue(txq, skb->len);
8054 /* Sync BD data before updating mailbox */
8055 wmb();
8057 /* Packets are ready, update Tx producer idx local and on card. */
8058 tw32_tx_mbox(tnapi->prodmbox, entry);
8060 tnapi->tx_prod = entry;
8061 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8062 netif_tx_stop_queue(txq);
8064 /* netif_tx_stop_queue() must be done before checking
8065 * checking tx index in tg3_tx_avail() below, because in
8066 * tg3_tx(), we update tx index before checking for
8067 * netif_tx_queue_stopped().
8069 smp_mb();
8070 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8071 netif_tx_wake_queue(txq);
8074 mmiowb();
8075 return NETDEV_TX_OK;
8077 dma_error:
8078 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8079 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8080 drop:
8081 dev_kfree_skb(skb);
8082 drop_nofree:
8083 tp->tx_dropped++;
8084 return NETDEV_TX_OK;
8087 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8089 if (enable) {
8090 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8091 MAC_MODE_PORT_MODE_MASK);
8093 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8095 if (!tg3_flag(tp, 5705_PLUS))
8096 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8098 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8099 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8100 else
8101 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8102 } else {
8103 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8105 if (tg3_flag(tp, 5705_PLUS) ||
8106 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8107 tg3_asic_rev(tp) == ASIC_REV_5700)
8108 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8111 tw32(MAC_MODE, tp->mac_mode);
8112 udelay(40);
8115 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8117 u32 val, bmcr, mac_mode, ptest = 0;
8119 tg3_phy_toggle_apd(tp, false);
8120 tg3_phy_toggle_automdix(tp, false);
8122 if (extlpbk && tg3_phy_set_extloopbk(tp))
8123 return -EIO;
8125 bmcr = BMCR_FULLDPLX;
8126 switch (speed) {
8127 case SPEED_10:
8128 break;
8129 case SPEED_100:
8130 bmcr |= BMCR_SPEED100;
8131 break;
8132 case SPEED_1000:
8133 default:
8134 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8135 speed = SPEED_100;
8136 bmcr |= BMCR_SPEED100;
8137 } else {
8138 speed = SPEED_1000;
8139 bmcr |= BMCR_SPEED1000;
8143 if (extlpbk) {
8144 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8145 tg3_readphy(tp, MII_CTRL1000, &val);
8146 val |= CTL1000_AS_MASTER |
8147 CTL1000_ENABLE_MASTER;
8148 tg3_writephy(tp, MII_CTRL1000, val);
8149 } else {
8150 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8151 MII_TG3_FET_PTEST_TRIM_2;
8152 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8154 } else
8155 bmcr |= BMCR_LOOPBACK;
8157 tg3_writephy(tp, MII_BMCR, bmcr);
8159 /* The write needs to be flushed for the FETs */
8160 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8161 tg3_readphy(tp, MII_BMCR, &bmcr);
8163 udelay(40);
8165 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8166 tg3_asic_rev(tp) == ASIC_REV_5785) {
8167 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8168 MII_TG3_FET_PTEST_FRC_TX_LINK |
8169 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8171 /* The write needs to be flushed for the AC131 */
8172 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8175 /* Reset to prevent losing 1st rx packet intermittently */
8176 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8177 tg3_flag(tp, 5780_CLASS)) {
8178 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8179 udelay(10);
8180 tw32_f(MAC_RX_MODE, tp->rx_mode);
8183 mac_mode = tp->mac_mode &
8184 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8185 if (speed == SPEED_1000)
8186 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8187 else
8188 mac_mode |= MAC_MODE_PORT_MODE_MII;
8190 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8191 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8193 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8194 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8195 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8196 mac_mode |= MAC_MODE_LINK_POLARITY;
8198 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8199 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8202 tw32(MAC_MODE, mac_mode);
8203 udelay(40);
8205 return 0;
8208 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8210 struct tg3 *tp = netdev_priv(dev);
8212 if (features & NETIF_F_LOOPBACK) {
8213 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8214 return;
8216 spin_lock_bh(&tp->lock);
8217 tg3_mac_loopback(tp, true);
8218 netif_carrier_on(tp->dev);
8219 spin_unlock_bh(&tp->lock);
8220 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8221 } else {
8222 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8223 return;
8225 spin_lock_bh(&tp->lock);
8226 tg3_mac_loopback(tp, false);
8227 /* Force link status check */
8228 tg3_setup_phy(tp, true);
8229 spin_unlock_bh(&tp->lock);
8230 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8234 static netdev_features_t tg3_fix_features(struct net_device *dev,
8235 netdev_features_t features)
8237 struct tg3 *tp = netdev_priv(dev);
8239 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8240 features &= ~NETIF_F_ALL_TSO;
8242 return features;
8245 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8247 netdev_features_t changed = dev->features ^ features;
8249 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8250 tg3_set_loopback(dev, features);
8252 return 0;
8255 static void tg3_rx_prodring_free(struct tg3 *tp,
8256 struct tg3_rx_prodring_set *tpr)
8258 int i;
8260 if (tpr != &tp->napi[0].prodring) {
8261 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8262 i = (i + 1) & tp->rx_std_ring_mask)
8263 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8264 tp->rx_pkt_map_sz);
8266 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8267 for (i = tpr->rx_jmb_cons_idx;
8268 i != tpr->rx_jmb_prod_idx;
8269 i = (i + 1) & tp->rx_jmb_ring_mask) {
8270 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8271 TG3_RX_JMB_MAP_SZ);
8275 return;
8278 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8279 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8280 tp->rx_pkt_map_sz);
8282 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8283 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8284 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8285 TG3_RX_JMB_MAP_SZ);
8289 /* Initialize rx rings for packet processing.
8291 * The chip has been shut down and the driver detached from
8292 * the networking, so no interrupts or new tx packets will
8293 * end up in the driver. tp->{tx,}lock are held and thus
8294 * we may not sleep.
8296 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8297 struct tg3_rx_prodring_set *tpr)
8299 u32 i, rx_pkt_dma_sz;
8301 tpr->rx_std_cons_idx = 0;
8302 tpr->rx_std_prod_idx = 0;
8303 tpr->rx_jmb_cons_idx = 0;
8304 tpr->rx_jmb_prod_idx = 0;
8306 if (tpr != &tp->napi[0].prodring) {
8307 memset(&tpr->rx_std_buffers[0], 0,
8308 TG3_RX_STD_BUFF_RING_SIZE(tp));
8309 if (tpr->rx_jmb_buffers)
8310 memset(&tpr->rx_jmb_buffers[0], 0,
8311 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8312 goto done;
8315 /* Zero out all descriptors. */
8316 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8318 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8319 if (tg3_flag(tp, 5780_CLASS) &&
8320 tp->dev->mtu > ETH_DATA_LEN)
8321 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8322 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8324 /* Initialize invariants of the rings, we only set this
8325 * stuff once. This works because the card does not
8326 * write into the rx buffer posting rings.
8328 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8329 struct tg3_rx_buffer_desc *rxd;
8331 rxd = &tpr->rx_std[i];
8332 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8333 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8334 rxd->opaque = (RXD_OPAQUE_RING_STD |
8335 (i << RXD_OPAQUE_INDEX_SHIFT));
8338 /* Now allocate fresh SKBs for each rx ring. */
8339 for (i = 0; i < tp->rx_pending; i++) {
8340 unsigned int frag_size;
8342 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8343 &frag_size) < 0) {
8344 netdev_warn(tp->dev,
8345 "Using a smaller RX standard ring. Only "
8346 "%d out of %d buffers were allocated "
8347 "successfully\n", i, tp->rx_pending);
8348 if (i == 0)
8349 goto initfail;
8350 tp->rx_pending = i;
8351 break;
8355 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8356 goto done;
8358 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8360 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8361 goto done;
8363 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8364 struct tg3_rx_buffer_desc *rxd;
8366 rxd = &tpr->rx_jmb[i].std;
8367 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8368 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8369 RXD_FLAG_JUMBO;
8370 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8371 (i << RXD_OPAQUE_INDEX_SHIFT));
8374 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8375 unsigned int frag_size;
8377 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8378 &frag_size) < 0) {
8379 netdev_warn(tp->dev,
8380 "Using a smaller RX jumbo ring. Only %d "
8381 "out of %d buffers were allocated "
8382 "successfully\n", i, tp->rx_jumbo_pending);
8383 if (i == 0)
8384 goto initfail;
8385 tp->rx_jumbo_pending = i;
8386 break;
8390 done:
8391 return 0;
8393 initfail:
8394 tg3_rx_prodring_free(tp, tpr);
8395 return -ENOMEM;
8398 static void tg3_rx_prodring_fini(struct tg3 *tp,
8399 struct tg3_rx_prodring_set *tpr)
8401 kfree(tpr->rx_std_buffers);
8402 tpr->rx_std_buffers = NULL;
8403 kfree(tpr->rx_jmb_buffers);
8404 tpr->rx_jmb_buffers = NULL;
8405 if (tpr->rx_std) {
8406 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8407 tpr->rx_std, tpr->rx_std_mapping);
8408 tpr->rx_std = NULL;
8410 if (tpr->rx_jmb) {
8411 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8412 tpr->rx_jmb, tpr->rx_jmb_mapping);
8413 tpr->rx_jmb = NULL;
8417 static int tg3_rx_prodring_init(struct tg3 *tp,
8418 struct tg3_rx_prodring_set *tpr)
8420 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8421 GFP_KERNEL);
8422 if (!tpr->rx_std_buffers)
8423 return -ENOMEM;
8425 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8426 TG3_RX_STD_RING_BYTES(tp),
8427 &tpr->rx_std_mapping,
8428 GFP_KERNEL);
8429 if (!tpr->rx_std)
8430 goto err_out;
8432 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8433 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8434 GFP_KERNEL);
8435 if (!tpr->rx_jmb_buffers)
8436 goto err_out;
8438 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8439 TG3_RX_JMB_RING_BYTES(tp),
8440 &tpr->rx_jmb_mapping,
8441 GFP_KERNEL);
8442 if (!tpr->rx_jmb)
8443 goto err_out;
8446 return 0;
8448 err_out:
8449 tg3_rx_prodring_fini(tp, tpr);
8450 return -ENOMEM;
8453 /* Free up pending packets in all rx/tx rings.
8455 * The chip has been shut down and the driver detached from
8456 * the networking, so no interrupts or new tx packets will
8457 * end up in the driver. tp->{tx,}lock is not held and we are not
8458 * in an interrupt context and thus may sleep.
8460 static void tg3_free_rings(struct tg3 *tp)
8462 int i, j;
8464 for (j = 0; j < tp->irq_cnt; j++) {
8465 struct tg3_napi *tnapi = &tp->napi[j];
8467 tg3_rx_prodring_free(tp, &tnapi->prodring);
8469 if (!tnapi->tx_buffers)
8470 continue;
8472 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8473 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8475 if (!skb)
8476 continue;
8478 tg3_tx_skb_unmap(tnapi, i,
8479 skb_shinfo(skb)->nr_frags - 1);
8481 dev_kfree_skb_any(skb);
8483 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8487 /* Initialize tx/rx rings for packet processing.
8489 * The chip has been shut down and the driver detached from
8490 * the networking, so no interrupts or new tx packets will
8491 * end up in the driver. tp->{tx,}lock are held and thus
8492 * we may not sleep.
8494 static int tg3_init_rings(struct tg3 *tp)
8496 int i;
8498 /* Free up all the SKBs. */
8499 tg3_free_rings(tp);
8501 for (i = 0; i < tp->irq_cnt; i++) {
8502 struct tg3_napi *tnapi = &tp->napi[i];
8504 tnapi->last_tag = 0;
8505 tnapi->last_irq_tag = 0;
8506 tnapi->hw_status->status = 0;
8507 tnapi->hw_status->status_tag = 0;
8508 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8510 tnapi->tx_prod = 0;
8511 tnapi->tx_cons = 0;
8512 if (tnapi->tx_ring)
8513 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8515 tnapi->rx_rcb_ptr = 0;
8516 if (tnapi->rx_rcb)
8517 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8519 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8520 tg3_free_rings(tp);
8521 return -ENOMEM;
8525 return 0;
8528 static void tg3_mem_tx_release(struct tg3 *tp)
8530 int i;
8532 for (i = 0; i < tp->irq_max; i++) {
8533 struct tg3_napi *tnapi = &tp->napi[i];
8535 if (tnapi->tx_ring) {
8536 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8537 tnapi->tx_ring, tnapi->tx_desc_mapping);
8538 tnapi->tx_ring = NULL;
8541 kfree(tnapi->tx_buffers);
8542 tnapi->tx_buffers = NULL;
8546 static int tg3_mem_tx_acquire(struct tg3 *tp)
8548 int i;
8549 struct tg3_napi *tnapi = &tp->napi[0];
8551 /* If multivector TSS is enabled, vector 0 does not handle
8552 * tx interrupts. Don't allocate any resources for it.
8554 if (tg3_flag(tp, ENABLE_TSS))
8555 tnapi++;
8557 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8558 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8559 TG3_TX_RING_SIZE, GFP_KERNEL);
8560 if (!tnapi->tx_buffers)
8561 goto err_out;
8563 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8564 TG3_TX_RING_BYTES,
8565 &tnapi->tx_desc_mapping,
8566 GFP_KERNEL);
8567 if (!tnapi->tx_ring)
8568 goto err_out;
8571 return 0;
8573 err_out:
8574 tg3_mem_tx_release(tp);
8575 return -ENOMEM;
8578 static void tg3_mem_rx_release(struct tg3 *tp)
8580 int i;
8582 for (i = 0; i < tp->irq_max; i++) {
8583 struct tg3_napi *tnapi = &tp->napi[i];
8585 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8587 if (!tnapi->rx_rcb)
8588 continue;
8590 dma_free_coherent(&tp->pdev->dev,
8591 TG3_RX_RCB_RING_BYTES(tp),
8592 tnapi->rx_rcb,
8593 tnapi->rx_rcb_mapping);
8594 tnapi->rx_rcb = NULL;
8598 static int tg3_mem_rx_acquire(struct tg3 *tp)
8600 unsigned int i, limit;
8602 limit = tp->rxq_cnt;
8604 /* If RSS is enabled, we need a (dummy) producer ring
8605 * set on vector zero. This is the true hw prodring.
8607 if (tg3_flag(tp, ENABLE_RSS))
8608 limit++;
8610 for (i = 0; i < limit; i++) {
8611 struct tg3_napi *tnapi = &tp->napi[i];
8613 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8614 goto err_out;
8616 /* If multivector RSS is enabled, vector 0
8617 * does not handle rx or tx interrupts.
8618 * Don't allocate any resources for it.
8620 if (!i && tg3_flag(tp, ENABLE_RSS))
8621 continue;
8623 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8624 TG3_RX_RCB_RING_BYTES(tp),
8625 &tnapi->rx_rcb_mapping,
8626 GFP_KERNEL);
8627 if (!tnapi->rx_rcb)
8628 goto err_out;
8631 return 0;
8633 err_out:
8634 tg3_mem_rx_release(tp);
8635 return -ENOMEM;
8639 * Must not be invoked with interrupt sources disabled and
8640 * the hardware shutdown down.
8642 static void tg3_free_consistent(struct tg3 *tp)
8644 int i;
8646 for (i = 0; i < tp->irq_cnt; i++) {
8647 struct tg3_napi *tnapi = &tp->napi[i];
8649 if (tnapi->hw_status) {
8650 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8651 tnapi->hw_status,
8652 tnapi->status_mapping);
8653 tnapi->hw_status = NULL;
8657 tg3_mem_rx_release(tp);
8658 tg3_mem_tx_release(tp);
8660 if (tp->hw_stats) {
8661 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8662 tp->hw_stats, tp->stats_mapping);
8663 tp->hw_stats = NULL;
8668 * Must not be invoked with interrupt sources disabled and
8669 * the hardware shutdown down. Can sleep.
8671 static int tg3_alloc_consistent(struct tg3 *tp)
8673 int i;
8675 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8676 sizeof(struct tg3_hw_stats),
8677 &tp->stats_mapping, GFP_KERNEL);
8678 if (!tp->hw_stats)
8679 goto err_out;
8681 for (i = 0; i < tp->irq_cnt; i++) {
8682 struct tg3_napi *tnapi = &tp->napi[i];
8683 struct tg3_hw_status *sblk;
8685 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8686 TG3_HW_STATUS_SIZE,
8687 &tnapi->status_mapping,
8688 GFP_KERNEL);
8689 if (!tnapi->hw_status)
8690 goto err_out;
8692 sblk = tnapi->hw_status;
8694 if (tg3_flag(tp, ENABLE_RSS)) {
8695 u16 *prodptr = NULL;
8698 * When RSS is enabled, the status block format changes
8699 * slightly. The "rx_jumbo_consumer", "reserved",
8700 * and "rx_mini_consumer" members get mapped to the
8701 * other three rx return ring producer indexes.
8703 switch (i) {
8704 case 1:
8705 prodptr = &sblk->idx[0].rx_producer;
8706 break;
8707 case 2:
8708 prodptr = &sblk->rx_jumbo_consumer;
8709 break;
8710 case 3:
8711 prodptr = &sblk->reserved;
8712 break;
8713 case 4:
8714 prodptr = &sblk->rx_mini_consumer;
8715 break;
8717 tnapi->rx_rcb_prod_idx = prodptr;
8718 } else {
8719 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8723 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8724 goto err_out;
8726 return 0;
8728 err_out:
8729 tg3_free_consistent(tp);
8730 return -ENOMEM;
8733 #define MAX_WAIT_CNT 1000
8735 /* To stop a block, clear the enable bit and poll till it
8736 * clears. tp->lock is held.
8738 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8740 unsigned int i;
8741 u32 val;
8743 if (tg3_flag(tp, 5705_PLUS)) {
8744 switch (ofs) {
8745 case RCVLSC_MODE:
8746 case DMAC_MODE:
8747 case MBFREE_MODE:
8748 case BUFMGR_MODE:
8749 case MEMARB_MODE:
8750 /* We can't enable/disable these bits of the
8751 * 5705/5750, just say success.
8753 return 0;
8755 default:
8756 break;
8760 val = tr32(ofs);
8761 val &= ~enable_bit;
8762 tw32_f(ofs, val);
8764 for (i = 0; i < MAX_WAIT_CNT; i++) {
8765 if (pci_channel_offline(tp->pdev)) {
8766 dev_err(&tp->pdev->dev,
8767 "tg3_stop_block device offline, "
8768 "ofs=%lx enable_bit=%x\n",
8769 ofs, enable_bit);
8770 return -ENODEV;
8773 udelay(100);
8774 val = tr32(ofs);
8775 if ((val & enable_bit) == 0)
8776 break;
8779 if (i == MAX_WAIT_CNT && !silent) {
8780 dev_err(&tp->pdev->dev,
8781 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8782 ofs, enable_bit);
8783 return -ENODEV;
8786 return 0;
8789 /* tp->lock is held. */
8790 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8792 int i, err;
8794 tg3_disable_ints(tp);
8796 if (pci_channel_offline(tp->pdev)) {
8797 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8798 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8799 err = -ENODEV;
8800 goto err_no_dev;
8803 tp->rx_mode &= ~RX_MODE_ENABLE;
8804 tw32_f(MAC_RX_MODE, tp->rx_mode);
8805 udelay(10);
8807 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8808 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8809 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8810 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8811 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8812 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8814 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8815 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8816 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8817 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8818 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8819 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8820 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8822 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8823 tw32_f(MAC_MODE, tp->mac_mode);
8824 udelay(40);
8826 tp->tx_mode &= ~TX_MODE_ENABLE;
8827 tw32_f(MAC_TX_MODE, tp->tx_mode);
8829 for (i = 0; i < MAX_WAIT_CNT; i++) {
8830 udelay(100);
8831 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8832 break;
8834 if (i >= MAX_WAIT_CNT) {
8835 dev_err(&tp->pdev->dev,
8836 "%s timed out, TX_MODE_ENABLE will not clear "
8837 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8838 err |= -ENODEV;
8841 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8842 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8843 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8845 tw32(FTQ_RESET, 0xffffffff);
8846 tw32(FTQ_RESET, 0x00000000);
8848 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8849 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8851 err_no_dev:
8852 for (i = 0; i < tp->irq_cnt; i++) {
8853 struct tg3_napi *tnapi = &tp->napi[i];
8854 if (tnapi->hw_status)
8855 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8858 return err;
8861 /* Save PCI command register before chip reset */
8862 static void tg3_save_pci_state(struct tg3 *tp)
8864 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8867 /* Restore PCI state after chip reset */
8868 static void tg3_restore_pci_state(struct tg3 *tp)
8870 u32 val;
8872 /* Re-enable indirect register accesses. */
8873 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8874 tp->misc_host_ctrl);
8876 /* Set MAX PCI retry to zero. */
8877 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8878 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8879 tg3_flag(tp, PCIX_MODE))
8880 val |= PCISTATE_RETRY_SAME_DMA;
8881 /* Allow reads and writes to the APE register and memory space. */
8882 if (tg3_flag(tp, ENABLE_APE))
8883 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8884 PCISTATE_ALLOW_APE_SHMEM_WR |
8885 PCISTATE_ALLOW_APE_PSPACE_WR;
8886 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8888 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8890 if (!tg3_flag(tp, PCI_EXPRESS)) {
8891 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8892 tp->pci_cacheline_sz);
8893 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8894 tp->pci_lat_timer);
8897 /* Make sure PCI-X relaxed ordering bit is clear. */
8898 if (tg3_flag(tp, PCIX_MODE)) {
8899 u16 pcix_cmd;
8901 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8902 &pcix_cmd);
8903 pcix_cmd &= ~PCI_X_CMD_ERO;
8904 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8905 pcix_cmd);
8908 if (tg3_flag(tp, 5780_CLASS)) {
8910 /* Chip reset on 5780 will reset MSI enable bit,
8911 * so need to restore it.
8913 if (tg3_flag(tp, USING_MSI)) {
8914 u16 ctrl;
8916 pci_read_config_word(tp->pdev,
8917 tp->msi_cap + PCI_MSI_FLAGS,
8918 &ctrl);
8919 pci_write_config_word(tp->pdev,
8920 tp->msi_cap + PCI_MSI_FLAGS,
8921 ctrl | PCI_MSI_FLAGS_ENABLE);
8922 val = tr32(MSGINT_MODE);
8923 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8928 /* tp->lock is held. */
8929 static int tg3_chip_reset(struct tg3 *tp)
8931 u32 val;
8932 void (*write_op)(struct tg3 *, u32, u32);
8933 int i, err;
8935 tg3_nvram_lock(tp);
8937 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8939 /* No matching tg3_nvram_unlock() after this because
8940 * chip reset below will undo the nvram lock.
8942 tp->nvram_lock_cnt = 0;
8944 /* GRC_MISC_CFG core clock reset will clear the memory
8945 * enable bit in PCI register 4 and the MSI enable bit
8946 * on some chips, so we save relevant registers here.
8948 tg3_save_pci_state(tp);
8950 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8951 tg3_flag(tp, 5755_PLUS))
8952 tw32(GRC_FASTBOOT_PC, 0);
8955 * We must avoid the readl() that normally takes place.
8956 * It locks machines, causes machine checks, and other
8957 * fun things. So, temporarily disable the 5701
8958 * hardware workaround, while we do the reset.
8960 write_op = tp->write32;
8961 if (write_op == tg3_write_flush_reg32)
8962 tp->write32 = tg3_write32;
8964 /* Prevent the irq handler from reading or writing PCI registers
8965 * during chip reset when the memory enable bit in the PCI command
8966 * register may be cleared. The chip does not generate interrupt
8967 * at this time, but the irq handler may still be called due to irq
8968 * sharing or irqpoll.
8970 tg3_flag_set(tp, CHIP_RESETTING);
8971 for (i = 0; i < tp->irq_cnt; i++) {
8972 struct tg3_napi *tnapi = &tp->napi[i];
8973 if (tnapi->hw_status) {
8974 tnapi->hw_status->status = 0;
8975 tnapi->hw_status->status_tag = 0;
8977 tnapi->last_tag = 0;
8978 tnapi->last_irq_tag = 0;
8980 smp_mb();
8982 for (i = 0; i < tp->irq_cnt; i++)
8983 synchronize_irq(tp->napi[i].irq_vec);
8985 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8986 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8987 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8990 /* do the reset */
8991 val = GRC_MISC_CFG_CORECLK_RESET;
8993 if (tg3_flag(tp, PCI_EXPRESS)) {
8994 /* Force PCIe 1.0a mode */
8995 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8996 !tg3_flag(tp, 57765_PLUS) &&
8997 tr32(TG3_PCIE_PHY_TSTCTL) ==
8998 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8999 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9001 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9002 tw32(GRC_MISC_CFG, (1 << 29));
9003 val |= (1 << 29);
9007 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9008 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9009 tw32(GRC_VCPU_EXT_CTRL,
9010 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9013 /* Manage gphy power for all CPMU absent PCIe devices. */
9014 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9015 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9017 tw32(GRC_MISC_CFG, val);
9019 /* restore 5701 hardware bug workaround write method */
9020 tp->write32 = write_op;
9022 /* Unfortunately, we have to delay before the PCI read back.
9023 * Some 575X chips even will not respond to a PCI cfg access
9024 * when the reset command is given to the chip.
9026 * How do these hardware designers expect things to work
9027 * properly if the PCI write is posted for a long period
9028 * of time? It is always necessary to have some method by
9029 * which a register read back can occur to push the write
9030 * out which does the reset.
9032 * For most tg3 variants the trick below was working.
9033 * Ho hum...
9035 udelay(120);
9037 /* Flush PCI posted writes. The normal MMIO registers
9038 * are inaccessible at this time so this is the only
9039 * way to make this reliably (actually, this is no longer
9040 * the case, see above). I tried to use indirect
9041 * register read/write but this upset some 5701 variants.
9043 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9045 udelay(120);
9047 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9048 u16 val16;
9050 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9051 int j;
9052 u32 cfg_val;
9054 /* Wait for link training to complete. */
9055 for (j = 0; j < 5000; j++)
9056 udelay(100);
9058 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9059 pci_write_config_dword(tp->pdev, 0xc4,
9060 cfg_val | (1 << 15));
9063 /* Clear the "no snoop" and "relaxed ordering" bits. */
9064 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9066 * Older PCIe devices only support the 128 byte
9067 * MPS setting. Enforce the restriction.
9069 if (!tg3_flag(tp, CPMU_PRESENT))
9070 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9071 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9073 /* Clear error status */
9074 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9075 PCI_EXP_DEVSTA_CED |
9076 PCI_EXP_DEVSTA_NFED |
9077 PCI_EXP_DEVSTA_FED |
9078 PCI_EXP_DEVSTA_URD);
9081 tg3_restore_pci_state(tp);
9083 tg3_flag_clear(tp, CHIP_RESETTING);
9084 tg3_flag_clear(tp, ERROR_PROCESSED);
9086 val = 0;
9087 if (tg3_flag(tp, 5780_CLASS))
9088 val = tr32(MEMARB_MODE);
9089 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9091 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9092 tg3_stop_fw(tp);
9093 tw32(0x5000, 0x400);
9096 if (tg3_flag(tp, IS_SSB_CORE)) {
9098 * BCM4785: In order to avoid repercussions from using
9099 * potentially defective internal ROM, stop the Rx RISC CPU,
9100 * which is not required.
9102 tg3_stop_fw(tp);
9103 tg3_halt_cpu(tp, RX_CPU_BASE);
9106 err = tg3_poll_fw(tp);
9107 if (err)
9108 return err;
9110 tw32(GRC_MODE, tp->grc_mode);
9112 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9113 val = tr32(0xc4);
9115 tw32(0xc4, val | (1 << 15));
9118 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9119 tg3_asic_rev(tp) == ASIC_REV_5705) {
9120 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9121 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9122 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9123 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9126 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9127 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9128 val = tp->mac_mode;
9129 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9130 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9131 val = tp->mac_mode;
9132 } else
9133 val = 0;
9135 tw32_f(MAC_MODE, val);
9136 udelay(40);
9138 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9140 tg3_mdio_start(tp);
9142 if (tg3_flag(tp, PCI_EXPRESS) &&
9143 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9144 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9145 !tg3_flag(tp, 57765_PLUS)) {
9146 val = tr32(0x7c00);
9148 tw32(0x7c00, val | (1 << 25));
9151 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9152 val = tr32(TG3_CPMU_CLCK_ORIDE);
9153 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9156 /* Reprobe ASF enable state. */
9157 tg3_flag_clear(tp, ENABLE_ASF);
9158 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9159 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9161 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9162 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9163 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9164 u32 nic_cfg;
9166 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9167 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9168 tg3_flag_set(tp, ENABLE_ASF);
9169 tp->last_event_jiffies = jiffies;
9170 if (tg3_flag(tp, 5750_PLUS))
9171 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9173 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9174 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9175 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9176 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9177 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9181 return 0;
9184 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9185 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9187 /* tp->lock is held. */
9188 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9190 int err;
9192 tg3_stop_fw(tp);
9194 tg3_write_sig_pre_reset(tp, kind);
9196 tg3_abort_hw(tp, silent);
9197 err = tg3_chip_reset(tp);
9199 __tg3_set_mac_addr(tp, false);
9201 tg3_write_sig_legacy(tp, kind);
9202 tg3_write_sig_post_reset(tp, kind);
9204 if (tp->hw_stats) {
9205 /* Save the stats across chip resets... */
9206 tg3_get_nstats(tp, &tp->net_stats_prev);
9207 tg3_get_estats(tp, &tp->estats_prev);
9209 /* And make sure the next sample is new data */
9210 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9213 return err;
9216 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9218 struct tg3 *tp = netdev_priv(dev);
9219 struct sockaddr *addr = p;
9220 int err = 0;
9221 bool skip_mac_1 = false;
9223 if (!is_valid_ether_addr(addr->sa_data))
9224 return -EADDRNOTAVAIL;
9226 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9228 if (!netif_running(dev))
9229 return 0;
9231 if (tg3_flag(tp, ENABLE_ASF)) {
9232 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9234 addr0_high = tr32(MAC_ADDR_0_HIGH);
9235 addr0_low = tr32(MAC_ADDR_0_LOW);
9236 addr1_high = tr32(MAC_ADDR_1_HIGH);
9237 addr1_low = tr32(MAC_ADDR_1_LOW);
9239 /* Skip MAC addr 1 if ASF is using it. */
9240 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9241 !(addr1_high == 0 && addr1_low == 0))
9242 skip_mac_1 = true;
9244 spin_lock_bh(&tp->lock);
9245 __tg3_set_mac_addr(tp, skip_mac_1);
9246 spin_unlock_bh(&tp->lock);
9248 return err;
9251 /* tp->lock is held. */
9252 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9253 dma_addr_t mapping, u32 maxlen_flags,
9254 u32 nic_addr)
9256 tg3_write_mem(tp,
9257 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9258 ((u64) mapping >> 32));
9259 tg3_write_mem(tp,
9260 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9261 ((u64) mapping & 0xffffffff));
9262 tg3_write_mem(tp,
9263 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9264 maxlen_flags);
9266 if (!tg3_flag(tp, 5705_PLUS))
9267 tg3_write_mem(tp,
9268 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9269 nic_addr);
9273 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9275 int i = 0;
9277 if (!tg3_flag(tp, ENABLE_TSS)) {
9278 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9279 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9280 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9281 } else {
9282 tw32(HOSTCC_TXCOL_TICKS, 0);
9283 tw32(HOSTCC_TXMAX_FRAMES, 0);
9284 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9286 for (; i < tp->txq_cnt; i++) {
9287 u32 reg;
9289 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9290 tw32(reg, ec->tx_coalesce_usecs);
9291 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9292 tw32(reg, ec->tx_max_coalesced_frames);
9293 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9294 tw32(reg, ec->tx_max_coalesced_frames_irq);
9298 for (; i < tp->irq_max - 1; i++) {
9299 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9300 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9301 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9305 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9307 int i = 0;
9308 u32 limit = tp->rxq_cnt;
9310 if (!tg3_flag(tp, ENABLE_RSS)) {
9311 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9312 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9313 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9314 limit--;
9315 } else {
9316 tw32(HOSTCC_RXCOL_TICKS, 0);
9317 tw32(HOSTCC_RXMAX_FRAMES, 0);
9318 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9321 for (; i < limit; i++) {
9322 u32 reg;
9324 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9325 tw32(reg, ec->rx_coalesce_usecs);
9326 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9327 tw32(reg, ec->rx_max_coalesced_frames);
9328 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9329 tw32(reg, ec->rx_max_coalesced_frames_irq);
9332 for (; i < tp->irq_max - 1; i++) {
9333 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9334 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9335 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9339 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9341 tg3_coal_tx_init(tp, ec);
9342 tg3_coal_rx_init(tp, ec);
9344 if (!tg3_flag(tp, 5705_PLUS)) {
9345 u32 val = ec->stats_block_coalesce_usecs;
9347 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9348 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9350 if (!tp->link_up)
9351 val = 0;
9353 tw32(HOSTCC_STAT_COAL_TICKS, val);
9357 /* tp->lock is held. */
9358 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9360 u32 txrcb, limit;
9362 /* Disable all transmit rings but the first. */
9363 if (!tg3_flag(tp, 5705_PLUS))
9364 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9365 else if (tg3_flag(tp, 5717_PLUS))
9366 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9367 else if (tg3_flag(tp, 57765_CLASS) ||
9368 tg3_asic_rev(tp) == ASIC_REV_5762)
9369 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9370 else
9371 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9373 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9374 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9375 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9376 BDINFO_FLAGS_DISABLED);
9379 /* tp->lock is held. */
9380 static void tg3_tx_rcbs_init(struct tg3 *tp)
9382 int i = 0;
9383 u32 txrcb = NIC_SRAM_SEND_RCB;
9385 if (tg3_flag(tp, ENABLE_TSS))
9386 i++;
9388 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9389 struct tg3_napi *tnapi = &tp->napi[i];
9391 if (!tnapi->tx_ring)
9392 continue;
9394 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9395 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9396 NIC_SRAM_TX_BUFFER_DESC);
9400 /* tp->lock is held. */
9401 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9403 u32 rxrcb, limit;
9405 /* Disable all receive return rings but the first. */
9406 if (tg3_flag(tp, 5717_PLUS))
9407 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9408 else if (!tg3_flag(tp, 5705_PLUS))
9409 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9410 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9411 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9412 tg3_flag(tp, 57765_CLASS))
9413 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9414 else
9415 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9417 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9418 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9419 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9420 BDINFO_FLAGS_DISABLED);
9423 /* tp->lock is held. */
9424 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9426 int i = 0;
9427 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9429 if (tg3_flag(tp, ENABLE_RSS))
9430 i++;
9432 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9433 struct tg3_napi *tnapi = &tp->napi[i];
9435 if (!tnapi->rx_rcb)
9436 continue;
9438 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9439 (tp->rx_ret_ring_mask + 1) <<
9440 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9444 /* tp->lock is held. */
9445 static void tg3_rings_reset(struct tg3 *tp)
9447 int i;
9448 u32 stblk;
9449 struct tg3_napi *tnapi = &tp->napi[0];
9451 tg3_tx_rcbs_disable(tp);
9453 tg3_rx_ret_rcbs_disable(tp);
9455 /* Disable interrupts */
9456 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9457 tp->napi[0].chk_msi_cnt = 0;
9458 tp->napi[0].last_rx_cons = 0;
9459 tp->napi[0].last_tx_cons = 0;
9461 /* Zero mailbox registers. */
9462 if (tg3_flag(tp, SUPPORT_MSIX)) {
9463 for (i = 1; i < tp->irq_max; i++) {
9464 tp->napi[i].tx_prod = 0;
9465 tp->napi[i].tx_cons = 0;
9466 if (tg3_flag(tp, ENABLE_TSS))
9467 tw32_mailbox(tp->napi[i].prodmbox, 0);
9468 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9469 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9470 tp->napi[i].chk_msi_cnt = 0;
9471 tp->napi[i].last_rx_cons = 0;
9472 tp->napi[i].last_tx_cons = 0;
9474 if (!tg3_flag(tp, ENABLE_TSS))
9475 tw32_mailbox(tp->napi[0].prodmbox, 0);
9476 } else {
9477 tp->napi[0].tx_prod = 0;
9478 tp->napi[0].tx_cons = 0;
9479 tw32_mailbox(tp->napi[0].prodmbox, 0);
9480 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9483 /* Make sure the NIC-based send BD rings are disabled. */
9484 if (!tg3_flag(tp, 5705_PLUS)) {
9485 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9486 for (i = 0; i < 16; i++)
9487 tw32_tx_mbox(mbox + i * 8, 0);
9490 /* Clear status block in ram. */
9491 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9493 /* Set status block DMA address */
9494 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9495 ((u64) tnapi->status_mapping >> 32));
9496 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9497 ((u64) tnapi->status_mapping & 0xffffffff));
9499 stblk = HOSTCC_STATBLCK_RING1;
9501 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9502 u64 mapping = (u64)tnapi->status_mapping;
9503 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9504 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9505 stblk += 8;
9507 /* Clear status block in ram. */
9508 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9511 tg3_tx_rcbs_init(tp);
9512 tg3_rx_ret_rcbs_init(tp);
9515 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9517 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9519 if (!tg3_flag(tp, 5750_PLUS) ||
9520 tg3_flag(tp, 5780_CLASS) ||
9521 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9522 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9523 tg3_flag(tp, 57765_PLUS))
9524 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9525 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9526 tg3_asic_rev(tp) == ASIC_REV_5787)
9527 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9528 else
9529 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9531 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9532 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9534 val = min(nic_rep_thresh, host_rep_thresh);
9535 tw32(RCVBDI_STD_THRESH, val);
9537 if (tg3_flag(tp, 57765_PLUS))
9538 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9540 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9541 return;
9543 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9545 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9547 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9548 tw32(RCVBDI_JUMBO_THRESH, val);
9550 if (tg3_flag(tp, 57765_PLUS))
9551 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9554 static inline u32 calc_crc(unsigned char *buf, int len)
9556 u32 reg;
9557 u32 tmp;
9558 int j, k;
9560 reg = 0xffffffff;
9562 for (j = 0; j < len; j++) {
9563 reg ^= buf[j];
9565 for (k = 0; k < 8; k++) {
9566 tmp = reg & 0x01;
9568 reg >>= 1;
9570 if (tmp)
9571 reg ^= 0xedb88320;
9575 return ~reg;
9578 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9580 /* accept or reject all multicast frames */
9581 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9582 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9583 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9584 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9587 static void __tg3_set_rx_mode(struct net_device *dev)
9589 struct tg3 *tp = netdev_priv(dev);
9590 u32 rx_mode;
9592 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9593 RX_MODE_KEEP_VLAN_TAG);
9595 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9596 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9597 * flag clear.
9599 if (!tg3_flag(tp, ENABLE_ASF))
9600 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9601 #endif
9603 if (dev->flags & IFF_PROMISC) {
9604 /* Promiscuous mode. */
9605 rx_mode |= RX_MODE_PROMISC;
9606 } else if (dev->flags & IFF_ALLMULTI) {
9607 /* Accept all multicast. */
9608 tg3_set_multi(tp, 1);
9609 } else if (netdev_mc_empty(dev)) {
9610 /* Reject all multicast. */
9611 tg3_set_multi(tp, 0);
9612 } else {
9613 /* Accept one or more multicast(s). */
9614 struct netdev_hw_addr *ha;
9615 u32 mc_filter[4] = { 0, };
9616 u32 regidx;
9617 u32 bit;
9618 u32 crc;
9620 netdev_for_each_mc_addr(ha, dev) {
9621 crc = calc_crc(ha->addr, ETH_ALEN);
9622 bit = ~crc & 0x7f;
9623 regidx = (bit & 0x60) >> 5;
9624 bit &= 0x1f;
9625 mc_filter[regidx] |= (1 << bit);
9628 tw32(MAC_HASH_REG_0, mc_filter[0]);
9629 tw32(MAC_HASH_REG_1, mc_filter[1]);
9630 tw32(MAC_HASH_REG_2, mc_filter[2]);
9631 tw32(MAC_HASH_REG_3, mc_filter[3]);
9634 if (rx_mode != tp->rx_mode) {
9635 tp->rx_mode = rx_mode;
9636 tw32_f(MAC_RX_MODE, rx_mode);
9637 udelay(10);
9641 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9643 int i;
9645 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9646 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9649 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9651 int i;
9653 if (!tg3_flag(tp, SUPPORT_MSIX))
9654 return;
9656 if (tp->rxq_cnt == 1) {
9657 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9658 return;
9661 /* Validate table against current IRQ count */
9662 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9663 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9664 break;
9667 if (i != TG3_RSS_INDIR_TBL_SIZE)
9668 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9671 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9673 int i = 0;
9674 u32 reg = MAC_RSS_INDIR_TBL_0;
9676 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9677 u32 val = tp->rss_ind_tbl[i];
9678 i++;
9679 for (; i % 8; i++) {
9680 val <<= 4;
9681 val |= tp->rss_ind_tbl[i];
9683 tw32(reg, val);
9684 reg += 4;
9688 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9690 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9691 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9692 else
9693 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9696 /* tp->lock is held. */
9697 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9699 u32 val, rdmac_mode;
9700 int i, err, limit;
9701 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9703 tg3_disable_ints(tp);
9705 tg3_stop_fw(tp);
9707 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9709 if (tg3_flag(tp, INIT_COMPLETE))
9710 tg3_abort_hw(tp, 1);
9712 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9713 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9714 tg3_phy_pull_config(tp);
9715 tg3_eee_pull_config(tp, NULL);
9716 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9719 /* Enable MAC control of LPI */
9720 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9721 tg3_setup_eee(tp);
9723 if (reset_phy)
9724 tg3_phy_reset(tp);
9726 err = tg3_chip_reset(tp);
9727 if (err)
9728 return err;
9730 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9732 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9733 val = tr32(TG3_CPMU_CTRL);
9734 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9735 tw32(TG3_CPMU_CTRL, val);
9737 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9738 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9739 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9740 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9742 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9743 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9744 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9745 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9747 val = tr32(TG3_CPMU_HST_ACC);
9748 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9749 val |= CPMU_HST_ACC_MACCLK_6_25;
9750 tw32(TG3_CPMU_HST_ACC, val);
9753 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9754 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9755 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9756 PCIE_PWR_MGMT_L1_THRESH_4MS;
9757 tw32(PCIE_PWR_MGMT_THRESH, val);
9759 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9760 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9762 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9764 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9765 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9768 if (tg3_flag(tp, L1PLLPD_EN)) {
9769 u32 grc_mode = tr32(GRC_MODE);
9771 /* Access the lower 1K of PL PCIE block registers. */
9772 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9773 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9775 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9776 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9777 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9779 tw32(GRC_MODE, grc_mode);
9782 if (tg3_flag(tp, 57765_CLASS)) {
9783 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9784 u32 grc_mode = tr32(GRC_MODE);
9786 /* Access the lower 1K of PL PCIE block registers. */
9787 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9788 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9790 val = tr32(TG3_PCIE_TLDLPL_PORT +
9791 TG3_PCIE_PL_LO_PHYCTL5);
9792 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9793 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9795 tw32(GRC_MODE, grc_mode);
9798 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9799 u32 grc_mode;
9801 /* Fix transmit hangs */
9802 val = tr32(TG3_CPMU_PADRNG_CTL);
9803 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9804 tw32(TG3_CPMU_PADRNG_CTL, val);
9806 grc_mode = tr32(GRC_MODE);
9808 /* Access the lower 1K of DL PCIE block registers. */
9809 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9810 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9812 val = tr32(TG3_PCIE_TLDLPL_PORT +
9813 TG3_PCIE_DL_LO_FTSMAX);
9814 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9815 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9816 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9818 tw32(GRC_MODE, grc_mode);
9821 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9822 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9823 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9824 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9827 /* This works around an issue with Athlon chipsets on
9828 * B3 tigon3 silicon. This bit has no effect on any
9829 * other revision. But do not set this on PCI Express
9830 * chips and don't even touch the clocks if the CPMU is present.
9832 if (!tg3_flag(tp, CPMU_PRESENT)) {
9833 if (!tg3_flag(tp, PCI_EXPRESS))
9834 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9835 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9838 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9839 tg3_flag(tp, PCIX_MODE)) {
9840 val = tr32(TG3PCI_PCISTATE);
9841 val |= PCISTATE_RETRY_SAME_DMA;
9842 tw32(TG3PCI_PCISTATE, val);
9845 if (tg3_flag(tp, ENABLE_APE)) {
9846 /* Allow reads and writes to the
9847 * APE register and memory space.
9849 val = tr32(TG3PCI_PCISTATE);
9850 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9851 PCISTATE_ALLOW_APE_SHMEM_WR |
9852 PCISTATE_ALLOW_APE_PSPACE_WR;
9853 tw32(TG3PCI_PCISTATE, val);
9856 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9857 /* Enable some hw fixes. */
9858 val = tr32(TG3PCI_MSI_DATA);
9859 val |= (1 << 26) | (1 << 28) | (1 << 29);
9860 tw32(TG3PCI_MSI_DATA, val);
9863 /* Descriptor ring init may make accesses to the
9864 * NIC SRAM area to setup the TX descriptors, so we
9865 * can only do this after the hardware has been
9866 * successfully reset.
9868 err = tg3_init_rings(tp);
9869 if (err)
9870 return err;
9872 if (tg3_flag(tp, 57765_PLUS)) {
9873 val = tr32(TG3PCI_DMA_RW_CTRL) &
9874 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9875 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9876 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9877 if (!tg3_flag(tp, 57765_CLASS) &&
9878 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9879 tg3_asic_rev(tp) != ASIC_REV_5762)
9880 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9881 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9882 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9883 tg3_asic_rev(tp) != ASIC_REV_5761) {
9884 /* This value is determined during the probe time DMA
9885 * engine test, tg3_test_dma.
9887 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9890 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9891 GRC_MODE_4X_NIC_SEND_RINGS |
9892 GRC_MODE_NO_TX_PHDR_CSUM |
9893 GRC_MODE_NO_RX_PHDR_CSUM);
9894 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9896 /* Pseudo-header checksum is done by hardware logic and not
9897 * the offload processers, so make the chip do the pseudo-
9898 * header checksums on receive. For transmit it is more
9899 * convenient to do the pseudo-header checksum in software
9900 * as Linux does that on transmit for us in all cases.
9902 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9904 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9905 if (tp->rxptpctl)
9906 tw32(TG3_RX_PTP_CTL,
9907 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9909 if (tg3_flag(tp, PTP_CAPABLE))
9910 val |= GRC_MODE_TIME_SYNC_ENABLE;
9912 tw32(GRC_MODE, tp->grc_mode | val);
9914 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9915 val = tr32(GRC_MISC_CFG);
9916 val &= ~0xff;
9917 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9918 tw32(GRC_MISC_CFG, val);
9920 /* Initialize MBUF/DESC pool. */
9921 if (tg3_flag(tp, 5750_PLUS)) {
9922 /* Do nothing. */
9923 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9924 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9925 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9926 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9927 else
9928 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9929 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9930 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9931 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9932 int fw_len;
9934 fw_len = tp->fw_len;
9935 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9936 tw32(BUFMGR_MB_POOL_ADDR,
9937 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9938 tw32(BUFMGR_MB_POOL_SIZE,
9939 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9942 if (tp->dev->mtu <= ETH_DATA_LEN) {
9943 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9944 tp->bufmgr_config.mbuf_read_dma_low_water);
9945 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9946 tp->bufmgr_config.mbuf_mac_rx_low_water);
9947 tw32(BUFMGR_MB_HIGH_WATER,
9948 tp->bufmgr_config.mbuf_high_water);
9949 } else {
9950 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9951 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9952 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9953 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9954 tw32(BUFMGR_MB_HIGH_WATER,
9955 tp->bufmgr_config.mbuf_high_water_jumbo);
9957 tw32(BUFMGR_DMA_LOW_WATER,
9958 tp->bufmgr_config.dma_low_water);
9959 tw32(BUFMGR_DMA_HIGH_WATER,
9960 tp->bufmgr_config.dma_high_water);
9962 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9963 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9964 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9965 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9966 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9967 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9968 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9969 tw32(BUFMGR_MODE, val);
9970 for (i = 0; i < 2000; i++) {
9971 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9972 break;
9973 udelay(10);
9975 if (i >= 2000) {
9976 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9977 return -ENODEV;
9980 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9981 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9983 tg3_setup_rxbd_thresholds(tp);
9985 /* Initialize TG3_BDINFO's at:
9986 * RCVDBDI_STD_BD: standard eth size rx ring
9987 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9988 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9990 * like so:
9991 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9992 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9993 * ring attribute flags
9994 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9996 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9997 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9999 * The size of each ring is fixed in the firmware, but the location is
10000 * configurable.
10002 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10003 ((u64) tpr->rx_std_mapping >> 32));
10004 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10005 ((u64) tpr->rx_std_mapping & 0xffffffff));
10006 if (!tg3_flag(tp, 5717_PLUS))
10007 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10008 NIC_SRAM_RX_BUFFER_DESC);
10010 /* Disable the mini ring */
10011 if (!tg3_flag(tp, 5705_PLUS))
10012 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10013 BDINFO_FLAGS_DISABLED);
10015 /* Program the jumbo buffer descriptor ring control
10016 * blocks on those devices that have them.
10018 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10019 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10021 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10022 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10023 ((u64) tpr->rx_jmb_mapping >> 32));
10024 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10025 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10026 val = TG3_RX_JMB_RING_SIZE(tp) <<
10027 BDINFO_FLAGS_MAXLEN_SHIFT;
10028 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10029 val | BDINFO_FLAGS_USE_EXT_RECV);
10030 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10031 tg3_flag(tp, 57765_CLASS) ||
10032 tg3_asic_rev(tp) == ASIC_REV_5762)
10033 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10034 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10035 } else {
10036 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10037 BDINFO_FLAGS_DISABLED);
10040 if (tg3_flag(tp, 57765_PLUS)) {
10041 val = TG3_RX_STD_RING_SIZE(tp);
10042 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10043 val |= (TG3_RX_STD_DMA_SZ << 2);
10044 } else
10045 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10046 } else
10047 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10049 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10051 tpr->rx_std_prod_idx = tp->rx_pending;
10052 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10054 tpr->rx_jmb_prod_idx =
10055 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10056 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10058 tg3_rings_reset(tp);
10060 /* Initialize MAC address and backoff seed. */
10061 __tg3_set_mac_addr(tp, false);
10063 /* MTU + ethernet header + FCS + optional VLAN tag */
10064 tw32(MAC_RX_MTU_SIZE,
10065 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10067 /* The slot time is changed by tg3_setup_phy if we
10068 * run at gigabit with half duplex.
10070 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10071 (6 << TX_LENGTHS_IPG_SHIFT) |
10072 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10074 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10075 tg3_asic_rev(tp) == ASIC_REV_5762)
10076 val |= tr32(MAC_TX_LENGTHS) &
10077 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10078 TX_LENGTHS_CNT_DWN_VAL_MSK);
10080 tw32(MAC_TX_LENGTHS, val);
10082 /* Receive rules. */
10083 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10084 tw32(RCVLPC_CONFIG, 0x0181);
10086 /* Calculate RDMAC_MODE setting early, we need it to determine
10087 * the RCVLPC_STATE_ENABLE mask.
10089 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10090 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10091 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10092 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10093 RDMAC_MODE_LNGREAD_ENAB);
10095 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10096 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10098 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10099 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10100 tg3_asic_rev(tp) == ASIC_REV_57780)
10101 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10102 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10103 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10105 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10106 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10107 if (tg3_flag(tp, TSO_CAPABLE) &&
10108 tg3_asic_rev(tp) == ASIC_REV_5705) {
10109 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10110 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10111 !tg3_flag(tp, IS_5788)) {
10112 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10116 if (tg3_flag(tp, PCI_EXPRESS))
10117 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10119 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10120 tp->dma_limit = 0;
10121 if (tp->dev->mtu <= ETH_DATA_LEN) {
10122 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10123 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10127 if (tg3_flag(tp, HW_TSO_1) ||
10128 tg3_flag(tp, HW_TSO_2) ||
10129 tg3_flag(tp, HW_TSO_3))
10130 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10132 if (tg3_flag(tp, 57765_PLUS) ||
10133 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10134 tg3_asic_rev(tp) == ASIC_REV_57780)
10135 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10137 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10138 tg3_asic_rev(tp) == ASIC_REV_5762)
10139 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10141 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10142 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10143 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10144 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10145 tg3_flag(tp, 57765_PLUS)) {
10146 u32 tgtreg;
10148 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10149 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10150 else
10151 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10153 val = tr32(tgtreg);
10154 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10155 tg3_asic_rev(tp) == ASIC_REV_5762) {
10156 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10157 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10158 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10159 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10160 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10161 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10163 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10166 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10167 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10168 tg3_asic_rev(tp) == ASIC_REV_5762) {
10169 u32 tgtreg;
10171 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10172 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10173 else
10174 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10176 val = tr32(tgtreg);
10177 tw32(tgtreg, val |
10178 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10179 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10182 /* Receive/send statistics. */
10183 if (tg3_flag(tp, 5750_PLUS)) {
10184 val = tr32(RCVLPC_STATS_ENABLE);
10185 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10186 tw32(RCVLPC_STATS_ENABLE, val);
10187 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10188 tg3_flag(tp, TSO_CAPABLE)) {
10189 val = tr32(RCVLPC_STATS_ENABLE);
10190 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10191 tw32(RCVLPC_STATS_ENABLE, val);
10192 } else {
10193 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10195 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10196 tw32(SNDDATAI_STATSENAB, 0xffffff);
10197 tw32(SNDDATAI_STATSCTRL,
10198 (SNDDATAI_SCTRL_ENABLE |
10199 SNDDATAI_SCTRL_FASTUPD));
10201 /* Setup host coalescing engine. */
10202 tw32(HOSTCC_MODE, 0);
10203 for (i = 0; i < 2000; i++) {
10204 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10205 break;
10206 udelay(10);
10209 __tg3_set_coalesce(tp, &tp->coal);
10211 if (!tg3_flag(tp, 5705_PLUS)) {
10212 /* Status/statistics block address. See tg3_timer,
10213 * the tg3_periodic_fetch_stats call there, and
10214 * tg3_get_stats to see how this works for 5705/5750 chips.
10216 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10217 ((u64) tp->stats_mapping >> 32));
10218 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10219 ((u64) tp->stats_mapping & 0xffffffff));
10220 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10222 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10224 /* Clear statistics and status block memory areas */
10225 for (i = NIC_SRAM_STATS_BLK;
10226 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10227 i += sizeof(u32)) {
10228 tg3_write_mem(tp, i, 0);
10229 udelay(40);
10233 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10235 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10236 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10237 if (!tg3_flag(tp, 5705_PLUS))
10238 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10240 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10241 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10242 /* reset to prevent losing 1st rx packet intermittently */
10243 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10244 udelay(10);
10247 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10248 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10249 MAC_MODE_FHDE_ENABLE;
10250 if (tg3_flag(tp, ENABLE_APE))
10251 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10252 if (!tg3_flag(tp, 5705_PLUS) &&
10253 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10254 tg3_asic_rev(tp) != ASIC_REV_5700)
10255 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10256 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10257 udelay(40);
10259 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10260 * If TG3_FLAG_IS_NIC is zero, we should read the
10261 * register to preserve the GPIO settings for LOMs. The GPIOs,
10262 * whether used as inputs or outputs, are set by boot code after
10263 * reset.
10265 if (!tg3_flag(tp, IS_NIC)) {
10266 u32 gpio_mask;
10268 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10269 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10270 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10272 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10273 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10274 GRC_LCLCTRL_GPIO_OUTPUT3;
10276 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10277 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10279 tp->grc_local_ctrl &= ~gpio_mask;
10280 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10282 /* GPIO1 must be driven high for eeprom write protect */
10283 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10284 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10285 GRC_LCLCTRL_GPIO_OUTPUT1);
10287 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10288 udelay(100);
10290 if (tg3_flag(tp, USING_MSIX)) {
10291 val = tr32(MSGINT_MODE);
10292 val |= MSGINT_MODE_ENABLE;
10293 if (tp->irq_cnt > 1)
10294 val |= MSGINT_MODE_MULTIVEC_EN;
10295 if (!tg3_flag(tp, 1SHOT_MSI))
10296 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10297 tw32(MSGINT_MODE, val);
10300 if (!tg3_flag(tp, 5705_PLUS)) {
10301 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10302 udelay(40);
10305 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10306 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10307 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10308 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10309 WDMAC_MODE_LNGREAD_ENAB);
10311 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10312 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10313 if (tg3_flag(tp, TSO_CAPABLE) &&
10314 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10315 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10316 /* nothing */
10317 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10318 !tg3_flag(tp, IS_5788)) {
10319 val |= WDMAC_MODE_RX_ACCEL;
10323 /* Enable host coalescing bug fix */
10324 if (tg3_flag(tp, 5755_PLUS))
10325 val |= WDMAC_MODE_STATUS_TAG_FIX;
10327 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10328 val |= WDMAC_MODE_BURST_ALL_DATA;
10330 tw32_f(WDMAC_MODE, val);
10331 udelay(40);
10333 if (tg3_flag(tp, PCIX_MODE)) {
10334 u16 pcix_cmd;
10336 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10337 &pcix_cmd);
10338 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10339 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10340 pcix_cmd |= PCI_X_CMD_READ_2K;
10341 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10342 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10343 pcix_cmd |= PCI_X_CMD_READ_2K;
10345 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10346 pcix_cmd);
10349 tw32_f(RDMAC_MODE, rdmac_mode);
10350 udelay(40);
10352 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10353 tg3_asic_rev(tp) == ASIC_REV_5720) {
10354 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10355 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10356 break;
10358 if (i < TG3_NUM_RDMA_CHANNELS) {
10359 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10360 val |= tg3_lso_rd_dma_workaround_bit(tp);
10361 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10362 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10366 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10367 if (!tg3_flag(tp, 5705_PLUS))
10368 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10370 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10371 tw32(SNDDATAC_MODE,
10372 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10373 else
10374 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10376 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10377 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10378 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10379 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10380 val |= RCVDBDI_MODE_LRG_RING_SZ;
10381 tw32(RCVDBDI_MODE, val);
10382 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10383 if (tg3_flag(tp, HW_TSO_1) ||
10384 tg3_flag(tp, HW_TSO_2) ||
10385 tg3_flag(tp, HW_TSO_3))
10386 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10387 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10388 if (tg3_flag(tp, ENABLE_TSS))
10389 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10390 tw32(SNDBDI_MODE, val);
10391 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10393 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10394 err = tg3_load_5701_a0_firmware_fix(tp);
10395 if (err)
10396 return err;
10399 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10400 /* Ignore any errors for the firmware download. If download
10401 * fails, the device will operate with EEE disabled
10403 tg3_load_57766_firmware(tp);
10406 if (tg3_flag(tp, TSO_CAPABLE)) {
10407 err = tg3_load_tso_firmware(tp);
10408 if (err)
10409 return err;
10412 tp->tx_mode = TX_MODE_ENABLE;
10414 if (tg3_flag(tp, 5755_PLUS) ||
10415 tg3_asic_rev(tp) == ASIC_REV_5906)
10416 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10418 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10419 tg3_asic_rev(tp) == ASIC_REV_5762) {
10420 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10421 tp->tx_mode &= ~val;
10422 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10425 tw32_f(MAC_TX_MODE, tp->tx_mode);
10426 udelay(100);
10428 if (tg3_flag(tp, ENABLE_RSS)) {
10429 tg3_rss_write_indir_tbl(tp);
10431 /* Setup the "secret" hash key. */
10432 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10433 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10434 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10435 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10436 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10437 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10438 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10439 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10440 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10441 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10444 tp->rx_mode = RX_MODE_ENABLE;
10445 if (tg3_flag(tp, 5755_PLUS))
10446 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10448 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10449 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10451 if (tg3_flag(tp, ENABLE_RSS))
10452 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10453 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10454 RX_MODE_RSS_IPV6_HASH_EN |
10455 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10456 RX_MODE_RSS_IPV4_HASH_EN |
10457 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10459 tw32_f(MAC_RX_MODE, tp->rx_mode);
10460 udelay(10);
10462 tw32(MAC_LED_CTRL, tp->led_ctrl);
10464 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10465 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10466 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10467 udelay(10);
10469 tw32_f(MAC_RX_MODE, tp->rx_mode);
10470 udelay(10);
10472 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10473 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10474 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10475 /* Set drive transmission level to 1.2V */
10476 /* only if the signal pre-emphasis bit is not set */
10477 val = tr32(MAC_SERDES_CFG);
10478 val &= 0xfffff000;
10479 val |= 0x880;
10480 tw32(MAC_SERDES_CFG, val);
10482 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10483 tw32(MAC_SERDES_CFG, 0x616000);
10486 /* Prevent chip from dropping frames when flow control
10487 * is enabled.
10489 if (tg3_flag(tp, 57765_CLASS))
10490 val = 1;
10491 else
10492 val = 2;
10493 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10495 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10496 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10497 /* Use hardware link auto-negotiation */
10498 tg3_flag_set(tp, HW_AUTONEG);
10501 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10502 tg3_asic_rev(tp) == ASIC_REV_5714) {
10503 u32 tmp;
10505 tmp = tr32(SERDES_RX_CTRL);
10506 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10507 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10508 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10509 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10512 if (!tg3_flag(tp, USE_PHYLIB)) {
10513 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10514 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10516 err = tg3_setup_phy(tp, false);
10517 if (err)
10518 return err;
10520 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10521 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10522 u32 tmp;
10524 /* Clear CRC stats. */
10525 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10526 tg3_writephy(tp, MII_TG3_TEST1,
10527 tmp | MII_TG3_TEST1_CRC_EN);
10528 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10533 __tg3_set_rx_mode(tp->dev);
10535 /* Initialize receive rules. */
10536 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10537 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10538 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10539 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10541 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10542 limit = 8;
10543 else
10544 limit = 16;
10545 if (tg3_flag(tp, ENABLE_ASF))
10546 limit -= 4;
10547 switch (limit) {
10548 case 16:
10549 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10550 case 15:
10551 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10552 case 14:
10553 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10554 case 13:
10555 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10556 case 12:
10557 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10558 case 11:
10559 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10560 case 10:
10561 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10562 case 9:
10563 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10564 case 8:
10565 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10566 case 7:
10567 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10568 case 6:
10569 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10570 case 5:
10571 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10572 case 4:
10573 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10574 case 3:
10575 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10576 case 2:
10577 case 1:
10579 default:
10580 break;
10583 if (tg3_flag(tp, ENABLE_APE))
10584 /* Write our heartbeat update interval to APE. */
10585 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10586 APE_HOST_HEARTBEAT_INT_DISABLE);
10588 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10590 return 0;
10593 /* Called at device open time to get the chip ready for
10594 * packet processing. Invoked with tp->lock held.
10596 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10598 /* Chip may have been just powered on. If so, the boot code may still
10599 * be running initialization. Wait for it to finish to avoid races in
10600 * accessing the hardware.
10602 tg3_enable_register_access(tp);
10603 tg3_poll_fw(tp);
10605 tg3_switch_clocks(tp);
10607 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10609 return tg3_reset_hw(tp, reset_phy);
10612 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10614 int i;
10616 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10617 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10619 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10620 off += len;
10622 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10623 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10624 memset(ocir, 0, TG3_OCIR_LEN);
10628 /* sysfs attributes for hwmon */
10629 static ssize_t tg3_show_temp(struct device *dev,
10630 struct device_attribute *devattr, char *buf)
10632 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10633 struct tg3 *tp = dev_get_drvdata(dev);
10634 u32 temperature;
10636 spin_lock_bh(&tp->lock);
10637 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10638 sizeof(temperature));
10639 spin_unlock_bh(&tp->lock);
10640 return sprintf(buf, "%u\n", temperature);
10644 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10645 TG3_TEMP_SENSOR_OFFSET);
10646 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10647 TG3_TEMP_CAUTION_OFFSET);
10648 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10649 TG3_TEMP_MAX_OFFSET);
10651 static struct attribute *tg3_attrs[] = {
10652 &sensor_dev_attr_temp1_input.dev_attr.attr,
10653 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10654 &sensor_dev_attr_temp1_max.dev_attr.attr,
10655 NULL
10657 ATTRIBUTE_GROUPS(tg3);
10659 static void tg3_hwmon_close(struct tg3 *tp)
10661 if (tp->hwmon_dev) {
10662 hwmon_device_unregister(tp->hwmon_dev);
10663 tp->hwmon_dev = NULL;
10667 static void tg3_hwmon_open(struct tg3 *tp)
10669 int i;
10670 u32 size = 0;
10671 struct pci_dev *pdev = tp->pdev;
10672 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10674 tg3_sd_scan_scratchpad(tp, ocirs);
10676 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10677 if (!ocirs[i].src_data_length)
10678 continue;
10680 size += ocirs[i].src_hdr_length;
10681 size += ocirs[i].src_data_length;
10684 if (!size)
10685 return;
10687 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10688 tp, tg3_groups);
10689 if (IS_ERR(tp->hwmon_dev)) {
10690 tp->hwmon_dev = NULL;
10691 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10696 #define TG3_STAT_ADD32(PSTAT, REG) \
10697 do { u32 __val = tr32(REG); \
10698 (PSTAT)->low += __val; \
10699 if ((PSTAT)->low < __val) \
10700 (PSTAT)->high += 1; \
10701 } while (0)
10703 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10705 struct tg3_hw_stats *sp = tp->hw_stats;
10707 if (!tp->link_up)
10708 return;
10710 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10711 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10712 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10713 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10714 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10715 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10716 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10717 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10718 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10719 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10720 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10721 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10722 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10723 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10724 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10725 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10726 u32 val;
10728 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10729 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10730 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10731 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10734 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10735 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10736 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10737 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10738 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10739 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10740 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10741 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10742 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10743 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10744 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10745 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10746 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10747 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10749 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10750 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10751 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10752 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10753 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10754 } else {
10755 u32 val = tr32(HOSTCC_FLOW_ATTN);
10756 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10757 if (val) {
10758 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10759 sp->rx_discards.low += val;
10760 if (sp->rx_discards.low < val)
10761 sp->rx_discards.high += 1;
10763 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10765 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10768 static void tg3_chk_missed_msi(struct tg3 *tp)
10770 u32 i;
10772 for (i = 0; i < tp->irq_cnt; i++) {
10773 struct tg3_napi *tnapi = &tp->napi[i];
10775 if (tg3_has_work(tnapi)) {
10776 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10777 tnapi->last_tx_cons == tnapi->tx_cons) {
10778 if (tnapi->chk_msi_cnt < 1) {
10779 tnapi->chk_msi_cnt++;
10780 return;
10782 tg3_msi(0, tnapi);
10785 tnapi->chk_msi_cnt = 0;
10786 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10787 tnapi->last_tx_cons = tnapi->tx_cons;
10791 static void tg3_timer(unsigned long __opaque)
10793 struct tg3 *tp = (struct tg3 *) __opaque;
10795 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10796 goto restart_timer;
10798 spin_lock(&tp->lock);
10800 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10801 tg3_flag(tp, 57765_CLASS))
10802 tg3_chk_missed_msi(tp);
10804 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10805 /* BCM4785: Flush posted writes from GbE to host memory. */
10806 tr32(HOSTCC_MODE);
10809 if (!tg3_flag(tp, TAGGED_STATUS)) {
10810 /* All of this garbage is because when using non-tagged
10811 * IRQ status the mailbox/status_block protocol the chip
10812 * uses with the cpu is race prone.
10814 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10815 tw32(GRC_LOCAL_CTRL,
10816 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10817 } else {
10818 tw32(HOSTCC_MODE, tp->coalesce_mode |
10819 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10822 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10823 spin_unlock(&tp->lock);
10824 tg3_reset_task_schedule(tp);
10825 goto restart_timer;
10829 /* This part only runs once per second. */
10830 if (!--tp->timer_counter) {
10831 if (tg3_flag(tp, 5705_PLUS))
10832 tg3_periodic_fetch_stats(tp);
10834 if (tp->setlpicnt && !--tp->setlpicnt)
10835 tg3_phy_eee_enable(tp);
10837 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10838 u32 mac_stat;
10839 int phy_event;
10841 mac_stat = tr32(MAC_STATUS);
10843 phy_event = 0;
10844 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10845 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10846 phy_event = 1;
10847 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10848 phy_event = 1;
10850 if (phy_event)
10851 tg3_setup_phy(tp, false);
10852 } else if (tg3_flag(tp, POLL_SERDES)) {
10853 u32 mac_stat = tr32(MAC_STATUS);
10854 int need_setup = 0;
10856 if (tp->link_up &&
10857 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10858 need_setup = 1;
10860 if (!tp->link_up &&
10861 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10862 MAC_STATUS_SIGNAL_DET))) {
10863 need_setup = 1;
10865 if (need_setup) {
10866 if (!tp->serdes_counter) {
10867 tw32_f(MAC_MODE,
10868 (tp->mac_mode &
10869 ~MAC_MODE_PORT_MODE_MASK));
10870 udelay(40);
10871 tw32_f(MAC_MODE, tp->mac_mode);
10872 udelay(40);
10874 tg3_setup_phy(tp, false);
10876 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10877 tg3_flag(tp, 5780_CLASS)) {
10878 tg3_serdes_parallel_detect(tp);
10881 tp->timer_counter = tp->timer_multiplier;
10884 /* Heartbeat is only sent once every 2 seconds.
10886 * The heartbeat is to tell the ASF firmware that the host
10887 * driver is still alive. In the event that the OS crashes,
10888 * ASF needs to reset the hardware to free up the FIFO space
10889 * that may be filled with rx packets destined for the host.
10890 * If the FIFO is full, ASF will no longer function properly.
10892 * Unintended resets have been reported on real time kernels
10893 * where the timer doesn't run on time. Netpoll will also have
10894 * same problem.
10896 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10897 * to check the ring condition when the heartbeat is expiring
10898 * before doing the reset. This will prevent most unintended
10899 * resets.
10901 if (!--tp->asf_counter) {
10902 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10903 tg3_wait_for_event_ack(tp);
10905 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10906 FWCMD_NICDRV_ALIVE3);
10907 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10908 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10909 TG3_FW_UPDATE_TIMEOUT_SEC);
10911 tg3_generate_fw_event(tp);
10913 tp->asf_counter = tp->asf_multiplier;
10916 spin_unlock(&tp->lock);
10918 restart_timer:
10919 tp->timer.expires = jiffies + tp->timer_offset;
10920 add_timer(&tp->timer);
10923 static void tg3_timer_init(struct tg3 *tp)
10925 if (tg3_flag(tp, TAGGED_STATUS) &&
10926 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10927 !tg3_flag(tp, 57765_CLASS))
10928 tp->timer_offset = HZ;
10929 else
10930 tp->timer_offset = HZ / 10;
10932 BUG_ON(tp->timer_offset > HZ);
10934 tp->timer_multiplier = (HZ / tp->timer_offset);
10935 tp->asf_multiplier = (HZ / tp->timer_offset) *
10936 TG3_FW_UPDATE_FREQ_SEC;
10938 init_timer(&tp->timer);
10939 tp->timer.data = (unsigned long) tp;
10940 tp->timer.function = tg3_timer;
10943 static void tg3_timer_start(struct tg3 *tp)
10945 tp->asf_counter = tp->asf_multiplier;
10946 tp->timer_counter = tp->timer_multiplier;
10948 tp->timer.expires = jiffies + tp->timer_offset;
10949 add_timer(&tp->timer);
10952 static void tg3_timer_stop(struct tg3 *tp)
10954 del_timer_sync(&tp->timer);
10957 /* Restart hardware after configuration changes, self-test, etc.
10958 * Invoked with tp->lock held.
10960 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10961 __releases(tp->lock)
10962 __acquires(tp->lock)
10964 int err;
10966 err = tg3_init_hw(tp, reset_phy);
10967 if (err) {
10968 netdev_err(tp->dev,
10969 "Failed to re-initialize device, aborting\n");
10970 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10971 tg3_full_unlock(tp);
10972 tg3_timer_stop(tp);
10973 tp->irq_sync = 0;
10974 tg3_napi_enable(tp);
10975 dev_close(tp->dev);
10976 tg3_full_lock(tp, 0);
10978 return err;
10981 static void tg3_reset_task(struct work_struct *work)
10983 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10984 int err;
10986 tg3_full_lock(tp, 0);
10988 if (!netif_running(tp->dev)) {
10989 tg3_flag_clear(tp, RESET_TASK_PENDING);
10990 tg3_full_unlock(tp);
10991 return;
10994 tg3_full_unlock(tp);
10996 tg3_phy_stop(tp);
10998 tg3_netif_stop(tp);
11000 tg3_full_lock(tp, 1);
11002 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11003 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11004 tp->write32_rx_mbox = tg3_write_flush_reg32;
11005 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11006 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11009 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11010 err = tg3_init_hw(tp, true);
11011 if (err)
11012 goto out;
11014 tg3_netif_start(tp);
11016 out:
11017 tg3_full_unlock(tp);
11019 if (!err)
11020 tg3_phy_start(tp);
11022 tg3_flag_clear(tp, RESET_TASK_PENDING);
11025 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11027 irq_handler_t fn;
11028 unsigned long flags;
11029 char *name;
11030 struct tg3_napi *tnapi = &tp->napi[irq_num];
11032 if (tp->irq_cnt == 1)
11033 name = tp->dev->name;
11034 else {
11035 name = &tnapi->irq_lbl[0];
11036 if (tnapi->tx_buffers && tnapi->rx_rcb)
11037 snprintf(name, IFNAMSIZ,
11038 "%s-txrx-%d", tp->dev->name, irq_num);
11039 else if (tnapi->tx_buffers)
11040 snprintf(name, IFNAMSIZ,
11041 "%s-tx-%d", tp->dev->name, irq_num);
11042 else if (tnapi->rx_rcb)
11043 snprintf(name, IFNAMSIZ,
11044 "%s-rx-%d", tp->dev->name, irq_num);
11045 else
11046 snprintf(name, IFNAMSIZ,
11047 "%s-%d", tp->dev->name, irq_num);
11048 name[IFNAMSIZ-1] = 0;
11051 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11052 fn = tg3_msi;
11053 if (tg3_flag(tp, 1SHOT_MSI))
11054 fn = tg3_msi_1shot;
11055 flags = 0;
11056 } else {
11057 fn = tg3_interrupt;
11058 if (tg3_flag(tp, TAGGED_STATUS))
11059 fn = tg3_interrupt_tagged;
11060 flags = IRQF_SHARED;
11063 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11066 static int tg3_test_interrupt(struct tg3 *tp)
11068 struct tg3_napi *tnapi = &tp->napi[0];
11069 struct net_device *dev = tp->dev;
11070 int err, i, intr_ok = 0;
11071 u32 val;
11073 if (!netif_running(dev))
11074 return -ENODEV;
11076 tg3_disable_ints(tp);
11078 free_irq(tnapi->irq_vec, tnapi);
11081 * Turn off MSI one shot mode. Otherwise this test has no
11082 * observable way to know whether the interrupt was delivered.
11084 if (tg3_flag(tp, 57765_PLUS)) {
11085 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11086 tw32(MSGINT_MODE, val);
11089 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11090 IRQF_SHARED, dev->name, tnapi);
11091 if (err)
11092 return err;
11094 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11095 tg3_enable_ints(tp);
11097 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11098 tnapi->coal_now);
11100 for (i = 0; i < 5; i++) {
11101 u32 int_mbox, misc_host_ctrl;
11103 int_mbox = tr32_mailbox(tnapi->int_mbox);
11104 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11106 if ((int_mbox != 0) ||
11107 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11108 intr_ok = 1;
11109 break;
11112 if (tg3_flag(tp, 57765_PLUS) &&
11113 tnapi->hw_status->status_tag != tnapi->last_tag)
11114 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11116 msleep(10);
11119 tg3_disable_ints(tp);
11121 free_irq(tnapi->irq_vec, tnapi);
11123 err = tg3_request_irq(tp, 0);
11125 if (err)
11126 return err;
11128 if (intr_ok) {
11129 /* Reenable MSI one shot mode. */
11130 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11131 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11132 tw32(MSGINT_MODE, val);
11134 return 0;
11137 return -EIO;
11140 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11141 * successfully restored
11143 static int tg3_test_msi(struct tg3 *tp)
11145 int err;
11146 u16 pci_cmd;
11148 if (!tg3_flag(tp, USING_MSI))
11149 return 0;
11151 /* Turn off SERR reporting in case MSI terminates with Master
11152 * Abort.
11154 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11155 pci_write_config_word(tp->pdev, PCI_COMMAND,
11156 pci_cmd & ~PCI_COMMAND_SERR);
11158 err = tg3_test_interrupt(tp);
11160 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11162 if (!err)
11163 return 0;
11165 /* other failures */
11166 if (err != -EIO)
11167 return err;
11169 /* MSI test failed, go back to INTx mode */
11170 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11171 "to INTx mode. Please report this failure to the PCI "
11172 "maintainer and include system chipset information\n");
11174 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11176 pci_disable_msi(tp->pdev);
11178 tg3_flag_clear(tp, USING_MSI);
11179 tp->napi[0].irq_vec = tp->pdev->irq;
11181 err = tg3_request_irq(tp, 0);
11182 if (err)
11183 return err;
11185 /* Need to reset the chip because the MSI cycle may have terminated
11186 * with Master Abort.
11188 tg3_full_lock(tp, 1);
11190 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11191 err = tg3_init_hw(tp, true);
11193 tg3_full_unlock(tp);
11195 if (err)
11196 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11198 return err;
11201 static int tg3_request_firmware(struct tg3 *tp)
11203 const struct tg3_firmware_hdr *fw_hdr;
11205 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11206 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11207 tp->fw_needed);
11208 return -ENOENT;
11211 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11213 /* Firmware blob starts with version numbers, followed by
11214 * start address and _full_ length including BSS sections
11215 * (which must be longer than the actual data, of course
11218 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11219 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11220 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11221 tp->fw_len, tp->fw_needed);
11222 release_firmware(tp->fw);
11223 tp->fw = NULL;
11224 return -EINVAL;
11227 /* We no longer need firmware; we have it. */
11228 tp->fw_needed = NULL;
11229 return 0;
11232 static u32 tg3_irq_count(struct tg3 *tp)
11234 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11236 if (irq_cnt > 1) {
11237 /* We want as many rx rings enabled as there are cpus.
11238 * In multiqueue MSI-X mode, the first MSI-X vector
11239 * only deals with link interrupts, etc, so we add
11240 * one to the number of vectors we are requesting.
11242 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11245 return irq_cnt;
11248 static bool tg3_enable_msix(struct tg3 *tp)
11250 int i, rc;
11251 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11253 tp->txq_cnt = tp->txq_req;
11254 tp->rxq_cnt = tp->rxq_req;
11255 if (!tp->rxq_cnt)
11256 tp->rxq_cnt = netif_get_num_default_rss_queues();
11257 if (tp->rxq_cnt > tp->rxq_max)
11258 tp->rxq_cnt = tp->rxq_max;
11260 /* Disable multiple TX rings by default. Simple round-robin hardware
11261 * scheduling of the TX rings can cause starvation of rings with
11262 * small packets when other rings have TSO or jumbo packets.
11264 if (!tp->txq_req)
11265 tp->txq_cnt = 1;
11267 tp->irq_cnt = tg3_irq_count(tp);
11269 for (i = 0; i < tp->irq_max; i++) {
11270 msix_ent[i].entry = i;
11271 msix_ent[i].vector = 0;
11274 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11275 if (rc < 0) {
11276 return false;
11277 } else if (rc != 0) {
11278 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11279 return false;
11280 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11281 tp->irq_cnt, rc);
11282 tp->irq_cnt = rc;
11283 tp->rxq_cnt = max(rc - 1, 1);
11284 if (tp->txq_cnt)
11285 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11288 for (i = 0; i < tp->irq_max; i++)
11289 tp->napi[i].irq_vec = msix_ent[i].vector;
11291 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11292 pci_disable_msix(tp->pdev);
11293 return false;
11296 if (tp->irq_cnt == 1)
11297 return true;
11299 tg3_flag_set(tp, ENABLE_RSS);
11301 if (tp->txq_cnt > 1)
11302 tg3_flag_set(tp, ENABLE_TSS);
11304 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11306 return true;
11309 static void tg3_ints_init(struct tg3 *tp)
11311 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11312 !tg3_flag(tp, TAGGED_STATUS)) {
11313 /* All MSI supporting chips should support tagged
11314 * status. Assert that this is the case.
11316 netdev_warn(tp->dev,
11317 "MSI without TAGGED_STATUS? Not using MSI\n");
11318 goto defcfg;
11321 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11322 tg3_flag_set(tp, USING_MSIX);
11323 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11324 tg3_flag_set(tp, USING_MSI);
11326 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11327 u32 msi_mode = tr32(MSGINT_MODE);
11328 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11329 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11330 if (!tg3_flag(tp, 1SHOT_MSI))
11331 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11332 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11334 defcfg:
11335 if (!tg3_flag(tp, USING_MSIX)) {
11336 tp->irq_cnt = 1;
11337 tp->napi[0].irq_vec = tp->pdev->irq;
11340 if (tp->irq_cnt == 1) {
11341 tp->txq_cnt = 1;
11342 tp->rxq_cnt = 1;
11343 netif_set_real_num_tx_queues(tp->dev, 1);
11344 netif_set_real_num_rx_queues(tp->dev, 1);
11348 static void tg3_ints_fini(struct tg3 *tp)
11350 if (tg3_flag(tp, USING_MSIX))
11351 pci_disable_msix(tp->pdev);
11352 else if (tg3_flag(tp, USING_MSI))
11353 pci_disable_msi(tp->pdev);
11354 tg3_flag_clear(tp, USING_MSI);
11355 tg3_flag_clear(tp, USING_MSIX);
11356 tg3_flag_clear(tp, ENABLE_RSS);
11357 tg3_flag_clear(tp, ENABLE_TSS);
11360 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11361 bool init)
11363 struct net_device *dev = tp->dev;
11364 int i, err;
11367 * Setup interrupts first so we know how
11368 * many NAPI resources to allocate
11370 tg3_ints_init(tp);
11372 tg3_rss_check_indir_tbl(tp);
11374 /* The placement of this call is tied
11375 * to the setup and use of Host TX descriptors.
11377 err = tg3_alloc_consistent(tp);
11378 if (err)
11379 goto out_ints_fini;
11381 tg3_napi_init(tp);
11383 tg3_napi_enable(tp);
11385 for (i = 0; i < tp->irq_cnt; i++) {
11386 struct tg3_napi *tnapi = &tp->napi[i];
11387 err = tg3_request_irq(tp, i);
11388 if (err) {
11389 for (i--; i >= 0; i--) {
11390 tnapi = &tp->napi[i];
11391 free_irq(tnapi->irq_vec, tnapi);
11393 goto out_napi_fini;
11397 tg3_full_lock(tp, 0);
11399 if (init)
11400 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11402 err = tg3_init_hw(tp, reset_phy);
11403 if (err) {
11404 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11405 tg3_free_rings(tp);
11408 tg3_full_unlock(tp);
11410 if (err)
11411 goto out_free_irq;
11413 if (test_irq && tg3_flag(tp, USING_MSI)) {
11414 err = tg3_test_msi(tp);
11416 if (err) {
11417 tg3_full_lock(tp, 0);
11418 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11419 tg3_free_rings(tp);
11420 tg3_full_unlock(tp);
11422 goto out_napi_fini;
11425 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11426 u32 val = tr32(PCIE_TRANSACTION_CFG);
11428 tw32(PCIE_TRANSACTION_CFG,
11429 val | PCIE_TRANS_CFG_1SHOT_MSI);
11433 tg3_phy_start(tp);
11435 tg3_hwmon_open(tp);
11437 tg3_full_lock(tp, 0);
11439 tg3_timer_start(tp);
11440 tg3_flag_set(tp, INIT_COMPLETE);
11441 tg3_enable_ints(tp);
11443 if (init)
11444 tg3_ptp_init(tp);
11445 else
11446 tg3_ptp_resume(tp);
11449 tg3_full_unlock(tp);
11451 netif_tx_start_all_queues(dev);
11454 * Reset loopback feature if it was turned on while the device was down
11455 * make sure that it's installed properly now.
11457 if (dev->features & NETIF_F_LOOPBACK)
11458 tg3_set_loopback(dev, dev->features);
11460 return 0;
11462 out_free_irq:
11463 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11464 struct tg3_napi *tnapi = &tp->napi[i];
11465 free_irq(tnapi->irq_vec, tnapi);
11468 out_napi_fini:
11469 tg3_napi_disable(tp);
11470 tg3_napi_fini(tp);
11471 tg3_free_consistent(tp);
11473 out_ints_fini:
11474 tg3_ints_fini(tp);
11476 return err;
11479 static void tg3_stop(struct tg3 *tp)
11481 int i;
11483 tg3_reset_task_cancel(tp);
11484 tg3_netif_stop(tp);
11486 tg3_timer_stop(tp);
11488 tg3_hwmon_close(tp);
11490 tg3_phy_stop(tp);
11492 tg3_full_lock(tp, 1);
11494 tg3_disable_ints(tp);
11496 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11497 tg3_free_rings(tp);
11498 tg3_flag_clear(tp, INIT_COMPLETE);
11500 tg3_full_unlock(tp);
11502 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11503 struct tg3_napi *tnapi = &tp->napi[i];
11504 free_irq(tnapi->irq_vec, tnapi);
11507 tg3_ints_fini(tp);
11509 tg3_napi_fini(tp);
11511 tg3_free_consistent(tp);
11514 static int tg3_open(struct net_device *dev)
11516 struct tg3 *tp = netdev_priv(dev);
11517 int err;
11519 if (tp->fw_needed) {
11520 err = tg3_request_firmware(tp);
11521 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11522 if (err) {
11523 netdev_warn(tp->dev, "EEE capability disabled\n");
11524 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11525 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11526 netdev_warn(tp->dev, "EEE capability restored\n");
11527 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11529 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11530 if (err)
11531 return err;
11532 } else if (err) {
11533 netdev_warn(tp->dev, "TSO capability disabled\n");
11534 tg3_flag_clear(tp, TSO_CAPABLE);
11535 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11536 netdev_notice(tp->dev, "TSO capability restored\n");
11537 tg3_flag_set(tp, TSO_CAPABLE);
11541 tg3_carrier_off(tp);
11543 err = tg3_power_up(tp);
11544 if (err)
11545 return err;
11547 tg3_full_lock(tp, 0);
11549 tg3_disable_ints(tp);
11550 tg3_flag_clear(tp, INIT_COMPLETE);
11552 tg3_full_unlock(tp);
11554 err = tg3_start(tp,
11555 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11556 true, true);
11557 if (err) {
11558 tg3_frob_aux_power(tp, false);
11559 pci_set_power_state(tp->pdev, PCI_D3hot);
11562 if (tg3_flag(tp, PTP_CAPABLE)) {
11563 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11564 &tp->pdev->dev);
11565 if (IS_ERR(tp->ptp_clock))
11566 tp->ptp_clock = NULL;
11569 return err;
11572 static int tg3_close(struct net_device *dev)
11574 struct tg3 *tp = netdev_priv(dev);
11576 tg3_ptp_fini(tp);
11578 tg3_stop(tp);
11580 /* Clear stats across close / open calls */
11581 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11582 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11584 tg3_power_down_prepare(tp);
11586 tg3_carrier_off(tp);
11588 return 0;
11591 static inline u64 get_stat64(tg3_stat64_t *val)
11593 return ((u64)val->high << 32) | ((u64)val->low);
11596 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11598 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11600 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11601 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11602 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11603 u32 val;
11605 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11606 tg3_writephy(tp, MII_TG3_TEST1,
11607 val | MII_TG3_TEST1_CRC_EN);
11608 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11609 } else
11610 val = 0;
11612 tp->phy_crc_errors += val;
11614 return tp->phy_crc_errors;
11617 return get_stat64(&hw_stats->rx_fcs_errors);
11620 #define ESTAT_ADD(member) \
11621 estats->member = old_estats->member + \
11622 get_stat64(&hw_stats->member)
11624 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11626 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11627 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11629 ESTAT_ADD(rx_octets);
11630 ESTAT_ADD(rx_fragments);
11631 ESTAT_ADD(rx_ucast_packets);
11632 ESTAT_ADD(rx_mcast_packets);
11633 ESTAT_ADD(rx_bcast_packets);
11634 ESTAT_ADD(rx_fcs_errors);
11635 ESTAT_ADD(rx_align_errors);
11636 ESTAT_ADD(rx_xon_pause_rcvd);
11637 ESTAT_ADD(rx_xoff_pause_rcvd);
11638 ESTAT_ADD(rx_mac_ctrl_rcvd);
11639 ESTAT_ADD(rx_xoff_entered);
11640 ESTAT_ADD(rx_frame_too_long_errors);
11641 ESTAT_ADD(rx_jabbers);
11642 ESTAT_ADD(rx_undersize_packets);
11643 ESTAT_ADD(rx_in_length_errors);
11644 ESTAT_ADD(rx_out_length_errors);
11645 ESTAT_ADD(rx_64_or_less_octet_packets);
11646 ESTAT_ADD(rx_65_to_127_octet_packets);
11647 ESTAT_ADD(rx_128_to_255_octet_packets);
11648 ESTAT_ADD(rx_256_to_511_octet_packets);
11649 ESTAT_ADD(rx_512_to_1023_octet_packets);
11650 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11651 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11652 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11653 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11654 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11656 ESTAT_ADD(tx_octets);
11657 ESTAT_ADD(tx_collisions);
11658 ESTAT_ADD(tx_xon_sent);
11659 ESTAT_ADD(tx_xoff_sent);
11660 ESTAT_ADD(tx_flow_control);
11661 ESTAT_ADD(tx_mac_errors);
11662 ESTAT_ADD(tx_single_collisions);
11663 ESTAT_ADD(tx_mult_collisions);
11664 ESTAT_ADD(tx_deferred);
11665 ESTAT_ADD(tx_excessive_collisions);
11666 ESTAT_ADD(tx_late_collisions);
11667 ESTAT_ADD(tx_collide_2times);
11668 ESTAT_ADD(tx_collide_3times);
11669 ESTAT_ADD(tx_collide_4times);
11670 ESTAT_ADD(tx_collide_5times);
11671 ESTAT_ADD(tx_collide_6times);
11672 ESTAT_ADD(tx_collide_7times);
11673 ESTAT_ADD(tx_collide_8times);
11674 ESTAT_ADD(tx_collide_9times);
11675 ESTAT_ADD(tx_collide_10times);
11676 ESTAT_ADD(tx_collide_11times);
11677 ESTAT_ADD(tx_collide_12times);
11678 ESTAT_ADD(tx_collide_13times);
11679 ESTAT_ADD(tx_collide_14times);
11680 ESTAT_ADD(tx_collide_15times);
11681 ESTAT_ADD(tx_ucast_packets);
11682 ESTAT_ADD(tx_mcast_packets);
11683 ESTAT_ADD(tx_bcast_packets);
11684 ESTAT_ADD(tx_carrier_sense_errors);
11685 ESTAT_ADD(tx_discards);
11686 ESTAT_ADD(tx_errors);
11688 ESTAT_ADD(dma_writeq_full);
11689 ESTAT_ADD(dma_write_prioq_full);
11690 ESTAT_ADD(rxbds_empty);
11691 ESTAT_ADD(rx_discards);
11692 ESTAT_ADD(rx_errors);
11693 ESTAT_ADD(rx_threshold_hit);
11695 ESTAT_ADD(dma_readq_full);
11696 ESTAT_ADD(dma_read_prioq_full);
11697 ESTAT_ADD(tx_comp_queue_full);
11699 ESTAT_ADD(ring_set_send_prod_index);
11700 ESTAT_ADD(ring_status_update);
11701 ESTAT_ADD(nic_irqs);
11702 ESTAT_ADD(nic_avoided_irqs);
11703 ESTAT_ADD(nic_tx_threshold_hit);
11705 ESTAT_ADD(mbuf_lwm_thresh_hit);
11708 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11710 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11711 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11713 stats->rx_packets = old_stats->rx_packets +
11714 get_stat64(&hw_stats->rx_ucast_packets) +
11715 get_stat64(&hw_stats->rx_mcast_packets) +
11716 get_stat64(&hw_stats->rx_bcast_packets);
11718 stats->tx_packets = old_stats->tx_packets +
11719 get_stat64(&hw_stats->tx_ucast_packets) +
11720 get_stat64(&hw_stats->tx_mcast_packets) +
11721 get_stat64(&hw_stats->tx_bcast_packets);
11723 stats->rx_bytes = old_stats->rx_bytes +
11724 get_stat64(&hw_stats->rx_octets);
11725 stats->tx_bytes = old_stats->tx_bytes +
11726 get_stat64(&hw_stats->tx_octets);
11728 stats->rx_errors = old_stats->rx_errors +
11729 get_stat64(&hw_stats->rx_errors);
11730 stats->tx_errors = old_stats->tx_errors +
11731 get_stat64(&hw_stats->tx_errors) +
11732 get_stat64(&hw_stats->tx_mac_errors) +
11733 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11734 get_stat64(&hw_stats->tx_discards);
11736 stats->multicast = old_stats->multicast +
11737 get_stat64(&hw_stats->rx_mcast_packets);
11738 stats->collisions = old_stats->collisions +
11739 get_stat64(&hw_stats->tx_collisions);
11741 stats->rx_length_errors = old_stats->rx_length_errors +
11742 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11743 get_stat64(&hw_stats->rx_undersize_packets);
11745 stats->rx_over_errors = old_stats->rx_over_errors +
11746 get_stat64(&hw_stats->rxbds_empty);
11747 stats->rx_frame_errors = old_stats->rx_frame_errors +
11748 get_stat64(&hw_stats->rx_align_errors);
11749 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11750 get_stat64(&hw_stats->tx_discards);
11751 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11752 get_stat64(&hw_stats->tx_carrier_sense_errors);
11754 stats->rx_crc_errors = old_stats->rx_crc_errors +
11755 tg3_calc_crc_errors(tp);
11757 stats->rx_missed_errors = old_stats->rx_missed_errors +
11758 get_stat64(&hw_stats->rx_discards);
11760 stats->rx_dropped = tp->rx_dropped;
11761 stats->tx_dropped = tp->tx_dropped;
11764 static int tg3_get_regs_len(struct net_device *dev)
11766 return TG3_REG_BLK_SIZE;
11769 static void tg3_get_regs(struct net_device *dev,
11770 struct ethtool_regs *regs, void *_p)
11772 struct tg3 *tp = netdev_priv(dev);
11774 regs->version = 0;
11776 memset(_p, 0, TG3_REG_BLK_SIZE);
11778 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11779 return;
11781 tg3_full_lock(tp, 0);
11783 tg3_dump_legacy_regs(tp, (u32 *)_p);
11785 tg3_full_unlock(tp);
11788 static int tg3_get_eeprom_len(struct net_device *dev)
11790 struct tg3 *tp = netdev_priv(dev);
11792 return tp->nvram_size;
11795 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11797 struct tg3 *tp = netdev_priv(dev);
11798 int ret;
11799 u8 *pd;
11800 u32 i, offset, len, b_offset, b_count;
11801 __be32 val;
11803 if (tg3_flag(tp, NO_NVRAM))
11804 return -EINVAL;
11806 offset = eeprom->offset;
11807 len = eeprom->len;
11808 eeprom->len = 0;
11810 eeprom->magic = TG3_EEPROM_MAGIC;
11812 if (offset & 3) {
11813 /* adjustments to start on required 4 byte boundary */
11814 b_offset = offset & 3;
11815 b_count = 4 - b_offset;
11816 if (b_count > len) {
11817 /* i.e. offset=1 len=2 */
11818 b_count = len;
11820 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11821 if (ret)
11822 return ret;
11823 memcpy(data, ((char *)&val) + b_offset, b_count);
11824 len -= b_count;
11825 offset += b_count;
11826 eeprom->len += b_count;
11829 /* read bytes up to the last 4 byte boundary */
11830 pd = &data[eeprom->len];
11831 for (i = 0; i < (len - (len & 3)); i += 4) {
11832 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11833 if (ret) {
11834 eeprom->len += i;
11835 return ret;
11837 memcpy(pd + i, &val, 4);
11839 eeprom->len += i;
11841 if (len & 3) {
11842 /* read last bytes not ending on 4 byte boundary */
11843 pd = &data[eeprom->len];
11844 b_count = len & 3;
11845 b_offset = offset + len - b_count;
11846 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11847 if (ret)
11848 return ret;
11849 memcpy(pd, &val, b_count);
11850 eeprom->len += b_count;
11852 return 0;
11855 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11857 struct tg3 *tp = netdev_priv(dev);
11858 int ret;
11859 u32 offset, len, b_offset, odd_len;
11860 u8 *buf;
11861 __be32 start, end;
11863 if (tg3_flag(tp, NO_NVRAM) ||
11864 eeprom->magic != TG3_EEPROM_MAGIC)
11865 return -EINVAL;
11867 offset = eeprom->offset;
11868 len = eeprom->len;
11870 if ((b_offset = (offset & 3))) {
11871 /* adjustments to start on required 4 byte boundary */
11872 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11873 if (ret)
11874 return ret;
11875 len += b_offset;
11876 offset &= ~3;
11877 if (len < 4)
11878 len = 4;
11881 odd_len = 0;
11882 if (len & 3) {
11883 /* adjustments to end on required 4 byte boundary */
11884 odd_len = 1;
11885 len = (len + 3) & ~3;
11886 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11887 if (ret)
11888 return ret;
11891 buf = data;
11892 if (b_offset || odd_len) {
11893 buf = kmalloc(len, GFP_KERNEL);
11894 if (!buf)
11895 return -ENOMEM;
11896 if (b_offset)
11897 memcpy(buf, &start, 4);
11898 if (odd_len)
11899 memcpy(buf+len-4, &end, 4);
11900 memcpy(buf + b_offset, data, eeprom->len);
11903 ret = tg3_nvram_write_block(tp, offset, len, buf);
11905 if (buf != data)
11906 kfree(buf);
11908 return ret;
11911 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11913 struct tg3 *tp = netdev_priv(dev);
11915 if (tg3_flag(tp, USE_PHYLIB)) {
11916 struct phy_device *phydev;
11917 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11918 return -EAGAIN;
11919 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
11920 return phy_ethtool_gset(phydev, cmd);
11923 cmd->supported = (SUPPORTED_Autoneg);
11925 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11926 cmd->supported |= (SUPPORTED_1000baseT_Half |
11927 SUPPORTED_1000baseT_Full);
11929 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11930 cmd->supported |= (SUPPORTED_100baseT_Half |
11931 SUPPORTED_100baseT_Full |
11932 SUPPORTED_10baseT_Half |
11933 SUPPORTED_10baseT_Full |
11934 SUPPORTED_TP);
11935 cmd->port = PORT_TP;
11936 } else {
11937 cmd->supported |= SUPPORTED_FIBRE;
11938 cmd->port = PORT_FIBRE;
11941 cmd->advertising = tp->link_config.advertising;
11942 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11943 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11944 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11945 cmd->advertising |= ADVERTISED_Pause;
11946 } else {
11947 cmd->advertising |= ADVERTISED_Pause |
11948 ADVERTISED_Asym_Pause;
11950 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11951 cmd->advertising |= ADVERTISED_Asym_Pause;
11954 if (netif_running(dev) && tp->link_up) {
11955 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11956 cmd->duplex = tp->link_config.active_duplex;
11957 cmd->lp_advertising = tp->link_config.rmt_adv;
11958 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11959 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11960 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11961 else
11962 cmd->eth_tp_mdix = ETH_TP_MDI;
11964 } else {
11965 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11966 cmd->duplex = DUPLEX_UNKNOWN;
11967 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11969 cmd->phy_address = tp->phy_addr;
11970 cmd->transceiver = XCVR_INTERNAL;
11971 cmd->autoneg = tp->link_config.autoneg;
11972 cmd->maxtxpkt = 0;
11973 cmd->maxrxpkt = 0;
11974 return 0;
11977 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11979 struct tg3 *tp = netdev_priv(dev);
11980 u32 speed = ethtool_cmd_speed(cmd);
11982 if (tg3_flag(tp, USE_PHYLIB)) {
11983 struct phy_device *phydev;
11984 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11985 return -EAGAIN;
11986 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
11987 return phy_ethtool_sset(phydev, cmd);
11990 if (cmd->autoneg != AUTONEG_ENABLE &&
11991 cmd->autoneg != AUTONEG_DISABLE)
11992 return -EINVAL;
11994 if (cmd->autoneg == AUTONEG_DISABLE &&
11995 cmd->duplex != DUPLEX_FULL &&
11996 cmd->duplex != DUPLEX_HALF)
11997 return -EINVAL;
11999 if (cmd->autoneg == AUTONEG_ENABLE) {
12000 u32 mask = ADVERTISED_Autoneg |
12001 ADVERTISED_Pause |
12002 ADVERTISED_Asym_Pause;
12004 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12005 mask |= ADVERTISED_1000baseT_Half |
12006 ADVERTISED_1000baseT_Full;
12008 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12009 mask |= ADVERTISED_100baseT_Half |
12010 ADVERTISED_100baseT_Full |
12011 ADVERTISED_10baseT_Half |
12012 ADVERTISED_10baseT_Full |
12013 ADVERTISED_TP;
12014 else
12015 mask |= ADVERTISED_FIBRE;
12017 if (cmd->advertising & ~mask)
12018 return -EINVAL;
12020 mask &= (ADVERTISED_1000baseT_Half |
12021 ADVERTISED_1000baseT_Full |
12022 ADVERTISED_100baseT_Half |
12023 ADVERTISED_100baseT_Full |
12024 ADVERTISED_10baseT_Half |
12025 ADVERTISED_10baseT_Full);
12027 cmd->advertising &= mask;
12028 } else {
12029 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12030 if (speed != SPEED_1000)
12031 return -EINVAL;
12033 if (cmd->duplex != DUPLEX_FULL)
12034 return -EINVAL;
12035 } else {
12036 if (speed != SPEED_100 &&
12037 speed != SPEED_10)
12038 return -EINVAL;
12042 tg3_full_lock(tp, 0);
12044 tp->link_config.autoneg = cmd->autoneg;
12045 if (cmd->autoneg == AUTONEG_ENABLE) {
12046 tp->link_config.advertising = (cmd->advertising |
12047 ADVERTISED_Autoneg);
12048 tp->link_config.speed = SPEED_UNKNOWN;
12049 tp->link_config.duplex = DUPLEX_UNKNOWN;
12050 } else {
12051 tp->link_config.advertising = 0;
12052 tp->link_config.speed = speed;
12053 tp->link_config.duplex = cmd->duplex;
12056 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12058 tg3_warn_mgmt_link_flap(tp);
12060 if (netif_running(dev))
12061 tg3_setup_phy(tp, true);
12063 tg3_full_unlock(tp);
12065 return 0;
12068 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12070 struct tg3 *tp = netdev_priv(dev);
12072 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12073 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12074 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12075 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12078 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12080 struct tg3 *tp = netdev_priv(dev);
12082 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12083 wol->supported = WAKE_MAGIC;
12084 else
12085 wol->supported = 0;
12086 wol->wolopts = 0;
12087 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12088 wol->wolopts = WAKE_MAGIC;
12089 memset(&wol->sopass, 0, sizeof(wol->sopass));
12092 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12094 struct tg3 *tp = netdev_priv(dev);
12095 struct device *dp = &tp->pdev->dev;
12097 if (wol->wolopts & ~WAKE_MAGIC)
12098 return -EINVAL;
12099 if ((wol->wolopts & WAKE_MAGIC) &&
12100 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12101 return -EINVAL;
12103 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12105 if (device_may_wakeup(dp))
12106 tg3_flag_set(tp, WOL_ENABLE);
12107 else
12108 tg3_flag_clear(tp, WOL_ENABLE);
12110 return 0;
12113 static u32 tg3_get_msglevel(struct net_device *dev)
12115 struct tg3 *tp = netdev_priv(dev);
12116 return tp->msg_enable;
12119 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12121 struct tg3 *tp = netdev_priv(dev);
12122 tp->msg_enable = value;
12125 static int tg3_nway_reset(struct net_device *dev)
12127 struct tg3 *tp = netdev_priv(dev);
12128 int r;
12130 if (!netif_running(dev))
12131 return -EAGAIN;
12133 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12134 return -EINVAL;
12136 tg3_warn_mgmt_link_flap(tp);
12138 if (tg3_flag(tp, USE_PHYLIB)) {
12139 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12140 return -EAGAIN;
12141 r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12142 } else {
12143 u32 bmcr;
12145 spin_lock_bh(&tp->lock);
12146 r = -EINVAL;
12147 tg3_readphy(tp, MII_BMCR, &bmcr);
12148 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12149 ((bmcr & BMCR_ANENABLE) ||
12150 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12151 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12152 BMCR_ANENABLE);
12153 r = 0;
12155 spin_unlock_bh(&tp->lock);
12158 return r;
12161 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12163 struct tg3 *tp = netdev_priv(dev);
12165 ering->rx_max_pending = tp->rx_std_ring_mask;
12166 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12167 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12168 else
12169 ering->rx_jumbo_max_pending = 0;
12171 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12173 ering->rx_pending = tp->rx_pending;
12174 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12175 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12176 else
12177 ering->rx_jumbo_pending = 0;
12179 ering->tx_pending = tp->napi[0].tx_pending;
12182 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12184 struct tg3 *tp = netdev_priv(dev);
12185 int i, irq_sync = 0, err = 0;
12187 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12188 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12189 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12190 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12191 (tg3_flag(tp, TSO_BUG) &&
12192 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12193 return -EINVAL;
12195 if (netif_running(dev)) {
12196 tg3_phy_stop(tp);
12197 tg3_netif_stop(tp);
12198 irq_sync = 1;
12201 tg3_full_lock(tp, irq_sync);
12203 tp->rx_pending = ering->rx_pending;
12205 if (tg3_flag(tp, MAX_RXPEND_64) &&
12206 tp->rx_pending > 63)
12207 tp->rx_pending = 63;
12208 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12210 for (i = 0; i < tp->irq_max; i++)
12211 tp->napi[i].tx_pending = ering->tx_pending;
12213 if (netif_running(dev)) {
12214 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12215 err = tg3_restart_hw(tp, false);
12216 if (!err)
12217 tg3_netif_start(tp);
12220 tg3_full_unlock(tp);
12222 if (irq_sync && !err)
12223 tg3_phy_start(tp);
12225 return err;
12228 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12230 struct tg3 *tp = netdev_priv(dev);
12232 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12234 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12235 epause->rx_pause = 1;
12236 else
12237 epause->rx_pause = 0;
12239 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12240 epause->tx_pause = 1;
12241 else
12242 epause->tx_pause = 0;
12245 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12247 struct tg3 *tp = netdev_priv(dev);
12248 int err = 0;
12250 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12251 tg3_warn_mgmt_link_flap(tp);
12253 if (tg3_flag(tp, USE_PHYLIB)) {
12254 u32 newadv;
12255 struct phy_device *phydev;
12257 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12259 if (!(phydev->supported & SUPPORTED_Pause) ||
12260 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12261 (epause->rx_pause != epause->tx_pause)))
12262 return -EINVAL;
12264 tp->link_config.flowctrl = 0;
12265 if (epause->rx_pause) {
12266 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12268 if (epause->tx_pause) {
12269 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12270 newadv = ADVERTISED_Pause;
12271 } else
12272 newadv = ADVERTISED_Pause |
12273 ADVERTISED_Asym_Pause;
12274 } else if (epause->tx_pause) {
12275 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12276 newadv = ADVERTISED_Asym_Pause;
12277 } else
12278 newadv = 0;
12280 if (epause->autoneg)
12281 tg3_flag_set(tp, PAUSE_AUTONEG);
12282 else
12283 tg3_flag_clear(tp, PAUSE_AUTONEG);
12285 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12286 u32 oldadv = phydev->advertising &
12287 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12288 if (oldadv != newadv) {
12289 phydev->advertising &=
12290 ~(ADVERTISED_Pause |
12291 ADVERTISED_Asym_Pause);
12292 phydev->advertising |= newadv;
12293 if (phydev->autoneg) {
12295 * Always renegotiate the link to
12296 * inform our link partner of our
12297 * flow control settings, even if the
12298 * flow control is forced. Let
12299 * tg3_adjust_link() do the final
12300 * flow control setup.
12302 return phy_start_aneg(phydev);
12306 if (!epause->autoneg)
12307 tg3_setup_flow_control(tp, 0, 0);
12308 } else {
12309 tp->link_config.advertising &=
12310 ~(ADVERTISED_Pause |
12311 ADVERTISED_Asym_Pause);
12312 tp->link_config.advertising |= newadv;
12314 } else {
12315 int irq_sync = 0;
12317 if (netif_running(dev)) {
12318 tg3_netif_stop(tp);
12319 irq_sync = 1;
12322 tg3_full_lock(tp, irq_sync);
12324 if (epause->autoneg)
12325 tg3_flag_set(tp, PAUSE_AUTONEG);
12326 else
12327 tg3_flag_clear(tp, PAUSE_AUTONEG);
12328 if (epause->rx_pause)
12329 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12330 else
12331 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12332 if (epause->tx_pause)
12333 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12334 else
12335 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12337 if (netif_running(dev)) {
12338 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12339 err = tg3_restart_hw(tp, false);
12340 if (!err)
12341 tg3_netif_start(tp);
12344 tg3_full_unlock(tp);
12347 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12349 return err;
12352 static int tg3_get_sset_count(struct net_device *dev, int sset)
12354 switch (sset) {
12355 case ETH_SS_TEST:
12356 return TG3_NUM_TEST;
12357 case ETH_SS_STATS:
12358 return TG3_NUM_STATS;
12359 default:
12360 return -EOPNOTSUPP;
12364 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12365 u32 *rules __always_unused)
12367 struct tg3 *tp = netdev_priv(dev);
12369 if (!tg3_flag(tp, SUPPORT_MSIX))
12370 return -EOPNOTSUPP;
12372 switch (info->cmd) {
12373 case ETHTOOL_GRXRINGS:
12374 if (netif_running(tp->dev))
12375 info->data = tp->rxq_cnt;
12376 else {
12377 info->data = num_online_cpus();
12378 if (info->data > TG3_RSS_MAX_NUM_QS)
12379 info->data = TG3_RSS_MAX_NUM_QS;
12382 /* The first interrupt vector only
12383 * handles link interrupts.
12385 info->data -= 1;
12386 return 0;
12388 default:
12389 return -EOPNOTSUPP;
12393 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12395 u32 size = 0;
12396 struct tg3 *tp = netdev_priv(dev);
12398 if (tg3_flag(tp, SUPPORT_MSIX))
12399 size = TG3_RSS_INDIR_TBL_SIZE;
12401 return size;
12404 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12406 struct tg3 *tp = netdev_priv(dev);
12407 int i;
12409 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12410 indir[i] = tp->rss_ind_tbl[i];
12412 return 0;
12415 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12417 struct tg3 *tp = netdev_priv(dev);
12418 size_t i;
12420 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12421 tp->rss_ind_tbl[i] = indir[i];
12423 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12424 return 0;
12426 /* It is legal to write the indirection
12427 * table while the device is running.
12429 tg3_full_lock(tp, 0);
12430 tg3_rss_write_indir_tbl(tp);
12431 tg3_full_unlock(tp);
12433 return 0;
12436 static void tg3_get_channels(struct net_device *dev,
12437 struct ethtool_channels *channel)
12439 struct tg3 *tp = netdev_priv(dev);
12440 u32 deflt_qs = netif_get_num_default_rss_queues();
12442 channel->max_rx = tp->rxq_max;
12443 channel->max_tx = tp->txq_max;
12445 if (netif_running(dev)) {
12446 channel->rx_count = tp->rxq_cnt;
12447 channel->tx_count = tp->txq_cnt;
12448 } else {
12449 if (tp->rxq_req)
12450 channel->rx_count = tp->rxq_req;
12451 else
12452 channel->rx_count = min(deflt_qs, tp->rxq_max);
12454 if (tp->txq_req)
12455 channel->tx_count = tp->txq_req;
12456 else
12457 channel->tx_count = min(deflt_qs, tp->txq_max);
12461 static int tg3_set_channels(struct net_device *dev,
12462 struct ethtool_channels *channel)
12464 struct tg3 *tp = netdev_priv(dev);
12466 if (!tg3_flag(tp, SUPPORT_MSIX))
12467 return -EOPNOTSUPP;
12469 if (channel->rx_count > tp->rxq_max ||
12470 channel->tx_count > tp->txq_max)
12471 return -EINVAL;
12473 tp->rxq_req = channel->rx_count;
12474 tp->txq_req = channel->tx_count;
12476 if (!netif_running(dev))
12477 return 0;
12479 tg3_stop(tp);
12481 tg3_carrier_off(tp);
12483 tg3_start(tp, true, false, false);
12485 return 0;
12488 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12490 switch (stringset) {
12491 case ETH_SS_STATS:
12492 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12493 break;
12494 case ETH_SS_TEST:
12495 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12496 break;
12497 default:
12498 WARN_ON(1); /* we need a WARN() */
12499 break;
12503 static int tg3_set_phys_id(struct net_device *dev,
12504 enum ethtool_phys_id_state state)
12506 struct tg3 *tp = netdev_priv(dev);
12508 if (!netif_running(tp->dev))
12509 return -EAGAIN;
12511 switch (state) {
12512 case ETHTOOL_ID_ACTIVE:
12513 return 1; /* cycle on/off once per second */
12515 case ETHTOOL_ID_ON:
12516 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12517 LED_CTRL_1000MBPS_ON |
12518 LED_CTRL_100MBPS_ON |
12519 LED_CTRL_10MBPS_ON |
12520 LED_CTRL_TRAFFIC_OVERRIDE |
12521 LED_CTRL_TRAFFIC_BLINK |
12522 LED_CTRL_TRAFFIC_LED);
12523 break;
12525 case ETHTOOL_ID_OFF:
12526 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12527 LED_CTRL_TRAFFIC_OVERRIDE);
12528 break;
12530 case ETHTOOL_ID_INACTIVE:
12531 tw32(MAC_LED_CTRL, tp->led_ctrl);
12532 break;
12535 return 0;
12538 static void tg3_get_ethtool_stats(struct net_device *dev,
12539 struct ethtool_stats *estats, u64 *tmp_stats)
12541 struct tg3 *tp = netdev_priv(dev);
12543 if (tp->hw_stats)
12544 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12545 else
12546 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12549 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12551 int i;
12552 __be32 *buf;
12553 u32 offset = 0, len = 0;
12554 u32 magic, val;
12556 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12557 return NULL;
12559 if (magic == TG3_EEPROM_MAGIC) {
12560 for (offset = TG3_NVM_DIR_START;
12561 offset < TG3_NVM_DIR_END;
12562 offset += TG3_NVM_DIRENT_SIZE) {
12563 if (tg3_nvram_read(tp, offset, &val))
12564 return NULL;
12566 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12567 TG3_NVM_DIRTYPE_EXTVPD)
12568 break;
12571 if (offset != TG3_NVM_DIR_END) {
12572 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12573 if (tg3_nvram_read(tp, offset + 4, &offset))
12574 return NULL;
12576 offset = tg3_nvram_logical_addr(tp, offset);
12580 if (!offset || !len) {
12581 offset = TG3_NVM_VPD_OFF;
12582 len = TG3_NVM_VPD_LEN;
12585 buf = kmalloc(len, GFP_KERNEL);
12586 if (buf == NULL)
12587 return NULL;
12589 if (magic == TG3_EEPROM_MAGIC) {
12590 for (i = 0; i < len; i += 4) {
12591 /* The data is in little-endian format in NVRAM.
12592 * Use the big-endian read routines to preserve
12593 * the byte order as it exists in NVRAM.
12595 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12596 goto error;
12598 } else {
12599 u8 *ptr;
12600 ssize_t cnt;
12601 unsigned int pos = 0;
12603 ptr = (u8 *)&buf[0];
12604 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12605 cnt = pci_read_vpd(tp->pdev, pos,
12606 len - pos, ptr);
12607 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12608 cnt = 0;
12609 else if (cnt < 0)
12610 goto error;
12612 if (pos != len)
12613 goto error;
12616 *vpdlen = len;
12618 return buf;
12620 error:
12621 kfree(buf);
12622 return NULL;
12625 #define NVRAM_TEST_SIZE 0x100
12626 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12627 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12628 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12629 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12630 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12631 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12632 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12633 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12635 static int tg3_test_nvram(struct tg3 *tp)
12637 u32 csum, magic, len;
12638 __be32 *buf;
12639 int i, j, k, err = 0, size;
12641 if (tg3_flag(tp, NO_NVRAM))
12642 return 0;
12644 if (tg3_nvram_read(tp, 0, &magic) != 0)
12645 return -EIO;
12647 if (magic == TG3_EEPROM_MAGIC)
12648 size = NVRAM_TEST_SIZE;
12649 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12650 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12651 TG3_EEPROM_SB_FORMAT_1) {
12652 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12653 case TG3_EEPROM_SB_REVISION_0:
12654 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12655 break;
12656 case TG3_EEPROM_SB_REVISION_2:
12657 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12658 break;
12659 case TG3_EEPROM_SB_REVISION_3:
12660 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12661 break;
12662 case TG3_EEPROM_SB_REVISION_4:
12663 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12664 break;
12665 case TG3_EEPROM_SB_REVISION_5:
12666 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12667 break;
12668 case TG3_EEPROM_SB_REVISION_6:
12669 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12670 break;
12671 default:
12672 return -EIO;
12674 } else
12675 return 0;
12676 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12677 size = NVRAM_SELFBOOT_HW_SIZE;
12678 else
12679 return -EIO;
12681 buf = kmalloc(size, GFP_KERNEL);
12682 if (buf == NULL)
12683 return -ENOMEM;
12685 err = -EIO;
12686 for (i = 0, j = 0; i < size; i += 4, j++) {
12687 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12688 if (err)
12689 break;
12691 if (i < size)
12692 goto out;
12694 /* Selfboot format */
12695 magic = be32_to_cpu(buf[0]);
12696 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12697 TG3_EEPROM_MAGIC_FW) {
12698 u8 *buf8 = (u8 *) buf, csum8 = 0;
12700 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12701 TG3_EEPROM_SB_REVISION_2) {
12702 /* For rev 2, the csum doesn't include the MBA. */
12703 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12704 csum8 += buf8[i];
12705 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12706 csum8 += buf8[i];
12707 } else {
12708 for (i = 0; i < size; i++)
12709 csum8 += buf8[i];
12712 if (csum8 == 0) {
12713 err = 0;
12714 goto out;
12717 err = -EIO;
12718 goto out;
12721 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12722 TG3_EEPROM_MAGIC_HW) {
12723 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12724 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12725 u8 *buf8 = (u8 *) buf;
12727 /* Separate the parity bits and the data bytes. */
12728 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12729 if ((i == 0) || (i == 8)) {
12730 int l;
12731 u8 msk;
12733 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12734 parity[k++] = buf8[i] & msk;
12735 i++;
12736 } else if (i == 16) {
12737 int l;
12738 u8 msk;
12740 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12741 parity[k++] = buf8[i] & msk;
12742 i++;
12744 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12745 parity[k++] = buf8[i] & msk;
12746 i++;
12748 data[j++] = buf8[i];
12751 err = -EIO;
12752 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12753 u8 hw8 = hweight8(data[i]);
12755 if ((hw8 & 0x1) && parity[i])
12756 goto out;
12757 else if (!(hw8 & 0x1) && !parity[i])
12758 goto out;
12760 err = 0;
12761 goto out;
12764 err = -EIO;
12766 /* Bootstrap checksum at offset 0x10 */
12767 csum = calc_crc((unsigned char *) buf, 0x10);
12768 if (csum != le32_to_cpu(buf[0x10/4]))
12769 goto out;
12771 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12772 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12773 if (csum != le32_to_cpu(buf[0xfc/4]))
12774 goto out;
12776 kfree(buf);
12778 buf = tg3_vpd_readblock(tp, &len);
12779 if (!buf)
12780 return -ENOMEM;
12782 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12783 if (i > 0) {
12784 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12785 if (j < 0)
12786 goto out;
12788 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12789 goto out;
12791 i += PCI_VPD_LRDT_TAG_SIZE;
12792 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12793 PCI_VPD_RO_KEYWORD_CHKSUM);
12794 if (j > 0) {
12795 u8 csum8 = 0;
12797 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12799 for (i = 0; i <= j; i++)
12800 csum8 += ((u8 *)buf)[i];
12802 if (csum8)
12803 goto out;
12807 err = 0;
12809 out:
12810 kfree(buf);
12811 return err;
12814 #define TG3_SERDES_TIMEOUT_SEC 2
12815 #define TG3_COPPER_TIMEOUT_SEC 6
12817 static int tg3_test_link(struct tg3 *tp)
12819 int i, max;
12821 if (!netif_running(tp->dev))
12822 return -ENODEV;
12824 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12825 max = TG3_SERDES_TIMEOUT_SEC;
12826 else
12827 max = TG3_COPPER_TIMEOUT_SEC;
12829 for (i = 0; i < max; i++) {
12830 if (tp->link_up)
12831 return 0;
12833 if (msleep_interruptible(1000))
12834 break;
12837 return -EIO;
12840 /* Only test the commonly used registers */
12841 static int tg3_test_registers(struct tg3 *tp)
12843 int i, is_5705, is_5750;
12844 u32 offset, read_mask, write_mask, val, save_val, read_val;
12845 static struct {
12846 u16 offset;
12847 u16 flags;
12848 #define TG3_FL_5705 0x1
12849 #define TG3_FL_NOT_5705 0x2
12850 #define TG3_FL_NOT_5788 0x4
12851 #define TG3_FL_NOT_5750 0x8
12852 u32 read_mask;
12853 u32 write_mask;
12854 } reg_tbl[] = {
12855 /* MAC Control Registers */
12856 { MAC_MODE, TG3_FL_NOT_5705,
12857 0x00000000, 0x00ef6f8c },
12858 { MAC_MODE, TG3_FL_5705,
12859 0x00000000, 0x01ef6b8c },
12860 { MAC_STATUS, TG3_FL_NOT_5705,
12861 0x03800107, 0x00000000 },
12862 { MAC_STATUS, TG3_FL_5705,
12863 0x03800100, 0x00000000 },
12864 { MAC_ADDR_0_HIGH, 0x0000,
12865 0x00000000, 0x0000ffff },
12866 { MAC_ADDR_0_LOW, 0x0000,
12867 0x00000000, 0xffffffff },
12868 { MAC_RX_MTU_SIZE, 0x0000,
12869 0x00000000, 0x0000ffff },
12870 { MAC_TX_MODE, 0x0000,
12871 0x00000000, 0x00000070 },
12872 { MAC_TX_LENGTHS, 0x0000,
12873 0x00000000, 0x00003fff },
12874 { MAC_RX_MODE, TG3_FL_NOT_5705,
12875 0x00000000, 0x000007fc },
12876 { MAC_RX_MODE, TG3_FL_5705,
12877 0x00000000, 0x000007dc },
12878 { MAC_HASH_REG_0, 0x0000,
12879 0x00000000, 0xffffffff },
12880 { MAC_HASH_REG_1, 0x0000,
12881 0x00000000, 0xffffffff },
12882 { MAC_HASH_REG_2, 0x0000,
12883 0x00000000, 0xffffffff },
12884 { MAC_HASH_REG_3, 0x0000,
12885 0x00000000, 0xffffffff },
12887 /* Receive Data and Receive BD Initiator Control Registers. */
12888 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12889 0x00000000, 0xffffffff },
12890 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12891 0x00000000, 0xffffffff },
12892 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12893 0x00000000, 0x00000003 },
12894 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12895 0x00000000, 0xffffffff },
12896 { RCVDBDI_STD_BD+0, 0x0000,
12897 0x00000000, 0xffffffff },
12898 { RCVDBDI_STD_BD+4, 0x0000,
12899 0x00000000, 0xffffffff },
12900 { RCVDBDI_STD_BD+8, 0x0000,
12901 0x00000000, 0xffff0002 },
12902 { RCVDBDI_STD_BD+0xc, 0x0000,
12903 0x00000000, 0xffffffff },
12905 /* Receive BD Initiator Control Registers. */
12906 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12907 0x00000000, 0xffffffff },
12908 { RCVBDI_STD_THRESH, TG3_FL_5705,
12909 0x00000000, 0x000003ff },
12910 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12911 0x00000000, 0xffffffff },
12913 /* Host Coalescing Control Registers. */
12914 { HOSTCC_MODE, TG3_FL_NOT_5705,
12915 0x00000000, 0x00000004 },
12916 { HOSTCC_MODE, TG3_FL_5705,
12917 0x00000000, 0x000000f6 },
12918 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12919 0x00000000, 0xffffffff },
12920 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12921 0x00000000, 0x000003ff },
12922 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12923 0x00000000, 0xffffffff },
12924 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12925 0x00000000, 0x000003ff },
12926 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12927 0x00000000, 0xffffffff },
12928 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12929 0x00000000, 0x000000ff },
12930 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12931 0x00000000, 0xffffffff },
12932 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12933 0x00000000, 0x000000ff },
12934 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12935 0x00000000, 0xffffffff },
12936 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12937 0x00000000, 0xffffffff },
12938 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12939 0x00000000, 0xffffffff },
12940 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12941 0x00000000, 0x000000ff },
12942 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12943 0x00000000, 0xffffffff },
12944 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12945 0x00000000, 0x000000ff },
12946 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12947 0x00000000, 0xffffffff },
12948 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12949 0x00000000, 0xffffffff },
12950 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12951 0x00000000, 0xffffffff },
12952 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12953 0x00000000, 0xffffffff },
12954 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12955 0x00000000, 0xffffffff },
12956 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12957 0xffffffff, 0x00000000 },
12958 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12959 0xffffffff, 0x00000000 },
12961 /* Buffer Manager Control Registers. */
12962 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12963 0x00000000, 0x007fff80 },
12964 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12965 0x00000000, 0x007fffff },
12966 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12967 0x00000000, 0x0000003f },
12968 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12969 0x00000000, 0x000001ff },
12970 { BUFMGR_MB_HIGH_WATER, 0x0000,
12971 0x00000000, 0x000001ff },
12972 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12973 0xffffffff, 0x00000000 },
12974 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12975 0xffffffff, 0x00000000 },
12977 /* Mailbox Registers */
12978 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12979 0x00000000, 0x000001ff },
12980 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12981 0x00000000, 0x000001ff },
12982 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12983 0x00000000, 0x000007ff },
12984 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12985 0x00000000, 0x000001ff },
12987 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12990 is_5705 = is_5750 = 0;
12991 if (tg3_flag(tp, 5705_PLUS)) {
12992 is_5705 = 1;
12993 if (tg3_flag(tp, 5750_PLUS))
12994 is_5750 = 1;
12997 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12998 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12999 continue;
13001 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13002 continue;
13004 if (tg3_flag(tp, IS_5788) &&
13005 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13006 continue;
13008 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13009 continue;
13011 offset = (u32) reg_tbl[i].offset;
13012 read_mask = reg_tbl[i].read_mask;
13013 write_mask = reg_tbl[i].write_mask;
13015 /* Save the original register content */
13016 save_val = tr32(offset);
13018 /* Determine the read-only value. */
13019 read_val = save_val & read_mask;
13021 /* Write zero to the register, then make sure the read-only bits
13022 * are not changed and the read/write bits are all zeros.
13024 tw32(offset, 0);
13026 val = tr32(offset);
13028 /* Test the read-only and read/write bits. */
13029 if (((val & read_mask) != read_val) || (val & write_mask))
13030 goto out;
13032 /* Write ones to all the bits defined by RdMask and WrMask, then
13033 * make sure the read-only bits are not changed and the
13034 * read/write bits are all ones.
13036 tw32(offset, read_mask | write_mask);
13038 val = tr32(offset);
13040 /* Test the read-only bits. */
13041 if ((val & read_mask) != read_val)
13042 goto out;
13044 /* Test the read/write bits. */
13045 if ((val & write_mask) != write_mask)
13046 goto out;
13048 tw32(offset, save_val);
13051 return 0;
13053 out:
13054 if (netif_msg_hw(tp))
13055 netdev_err(tp->dev,
13056 "Register test failed at offset %x\n", offset);
13057 tw32(offset, save_val);
13058 return -EIO;
13061 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13063 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13064 int i;
13065 u32 j;
13067 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13068 for (j = 0; j < len; j += 4) {
13069 u32 val;
13071 tg3_write_mem(tp, offset + j, test_pattern[i]);
13072 tg3_read_mem(tp, offset + j, &val);
13073 if (val != test_pattern[i])
13074 return -EIO;
13077 return 0;
13080 static int tg3_test_memory(struct tg3 *tp)
13082 static struct mem_entry {
13083 u32 offset;
13084 u32 len;
13085 } mem_tbl_570x[] = {
13086 { 0x00000000, 0x00b50},
13087 { 0x00002000, 0x1c000},
13088 { 0xffffffff, 0x00000}
13089 }, mem_tbl_5705[] = {
13090 { 0x00000100, 0x0000c},
13091 { 0x00000200, 0x00008},
13092 { 0x00004000, 0x00800},
13093 { 0x00006000, 0x01000},
13094 { 0x00008000, 0x02000},
13095 { 0x00010000, 0x0e000},
13096 { 0xffffffff, 0x00000}
13097 }, mem_tbl_5755[] = {
13098 { 0x00000200, 0x00008},
13099 { 0x00004000, 0x00800},
13100 { 0x00006000, 0x00800},
13101 { 0x00008000, 0x02000},
13102 { 0x00010000, 0x0c000},
13103 { 0xffffffff, 0x00000}
13104 }, mem_tbl_5906[] = {
13105 { 0x00000200, 0x00008},
13106 { 0x00004000, 0x00400},
13107 { 0x00006000, 0x00400},
13108 { 0x00008000, 0x01000},
13109 { 0x00010000, 0x01000},
13110 { 0xffffffff, 0x00000}
13111 }, mem_tbl_5717[] = {
13112 { 0x00000200, 0x00008},
13113 { 0x00010000, 0x0a000},
13114 { 0x00020000, 0x13c00},
13115 { 0xffffffff, 0x00000}
13116 }, mem_tbl_57765[] = {
13117 { 0x00000200, 0x00008},
13118 { 0x00004000, 0x00800},
13119 { 0x00006000, 0x09800},
13120 { 0x00010000, 0x0a000},
13121 { 0xffffffff, 0x00000}
13123 struct mem_entry *mem_tbl;
13124 int err = 0;
13125 int i;
13127 if (tg3_flag(tp, 5717_PLUS))
13128 mem_tbl = mem_tbl_5717;
13129 else if (tg3_flag(tp, 57765_CLASS) ||
13130 tg3_asic_rev(tp) == ASIC_REV_5762)
13131 mem_tbl = mem_tbl_57765;
13132 else if (tg3_flag(tp, 5755_PLUS))
13133 mem_tbl = mem_tbl_5755;
13134 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13135 mem_tbl = mem_tbl_5906;
13136 else if (tg3_flag(tp, 5705_PLUS))
13137 mem_tbl = mem_tbl_5705;
13138 else
13139 mem_tbl = mem_tbl_570x;
13141 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13142 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13143 if (err)
13144 break;
13147 return err;
13150 #define TG3_TSO_MSS 500
13152 #define TG3_TSO_IP_HDR_LEN 20
13153 #define TG3_TSO_TCP_HDR_LEN 20
13154 #define TG3_TSO_TCP_OPT_LEN 12
13156 static const u8 tg3_tso_header[] = {
13157 0x08, 0x00,
13158 0x45, 0x00, 0x00, 0x00,
13159 0x00, 0x00, 0x40, 0x00,
13160 0x40, 0x06, 0x00, 0x00,
13161 0x0a, 0x00, 0x00, 0x01,
13162 0x0a, 0x00, 0x00, 0x02,
13163 0x0d, 0x00, 0xe0, 0x00,
13164 0x00, 0x00, 0x01, 0x00,
13165 0x00, 0x00, 0x02, 0x00,
13166 0x80, 0x10, 0x10, 0x00,
13167 0x14, 0x09, 0x00, 0x00,
13168 0x01, 0x01, 0x08, 0x0a,
13169 0x11, 0x11, 0x11, 0x11,
13170 0x11, 0x11, 0x11, 0x11,
13173 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13175 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13176 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13177 u32 budget;
13178 struct sk_buff *skb;
13179 u8 *tx_data, *rx_data;
13180 dma_addr_t map;
13181 int num_pkts, tx_len, rx_len, i, err;
13182 struct tg3_rx_buffer_desc *desc;
13183 struct tg3_napi *tnapi, *rnapi;
13184 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13186 tnapi = &tp->napi[0];
13187 rnapi = &tp->napi[0];
13188 if (tp->irq_cnt > 1) {
13189 if (tg3_flag(tp, ENABLE_RSS))
13190 rnapi = &tp->napi[1];
13191 if (tg3_flag(tp, ENABLE_TSS))
13192 tnapi = &tp->napi[1];
13194 coal_now = tnapi->coal_now | rnapi->coal_now;
13196 err = -EIO;
13198 tx_len = pktsz;
13199 skb = netdev_alloc_skb(tp->dev, tx_len);
13200 if (!skb)
13201 return -ENOMEM;
13203 tx_data = skb_put(skb, tx_len);
13204 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13205 memset(tx_data + ETH_ALEN, 0x0, 8);
13207 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13209 if (tso_loopback) {
13210 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13212 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13213 TG3_TSO_TCP_OPT_LEN;
13215 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13216 sizeof(tg3_tso_header));
13217 mss = TG3_TSO_MSS;
13219 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13220 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13222 /* Set the total length field in the IP header */
13223 iph->tot_len = htons((u16)(mss + hdr_len));
13225 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13226 TXD_FLAG_CPU_POST_DMA);
13228 if (tg3_flag(tp, HW_TSO_1) ||
13229 tg3_flag(tp, HW_TSO_2) ||
13230 tg3_flag(tp, HW_TSO_3)) {
13231 struct tcphdr *th;
13232 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13233 th = (struct tcphdr *)&tx_data[val];
13234 th->check = 0;
13235 } else
13236 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13238 if (tg3_flag(tp, HW_TSO_3)) {
13239 mss |= (hdr_len & 0xc) << 12;
13240 if (hdr_len & 0x10)
13241 base_flags |= 0x00000010;
13242 base_flags |= (hdr_len & 0x3e0) << 5;
13243 } else if (tg3_flag(tp, HW_TSO_2))
13244 mss |= hdr_len << 9;
13245 else if (tg3_flag(tp, HW_TSO_1) ||
13246 tg3_asic_rev(tp) == ASIC_REV_5705) {
13247 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13248 } else {
13249 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13252 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13253 } else {
13254 num_pkts = 1;
13255 data_off = ETH_HLEN;
13257 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13258 tx_len > VLAN_ETH_FRAME_LEN)
13259 base_flags |= TXD_FLAG_JMB_PKT;
13262 for (i = data_off; i < tx_len; i++)
13263 tx_data[i] = (u8) (i & 0xff);
13265 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13266 if (pci_dma_mapping_error(tp->pdev, map)) {
13267 dev_kfree_skb(skb);
13268 return -EIO;
13271 val = tnapi->tx_prod;
13272 tnapi->tx_buffers[val].skb = skb;
13273 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13275 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13276 rnapi->coal_now);
13278 udelay(10);
13280 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13282 budget = tg3_tx_avail(tnapi);
13283 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13284 base_flags | TXD_FLAG_END, mss, 0)) {
13285 tnapi->tx_buffers[val].skb = NULL;
13286 dev_kfree_skb(skb);
13287 return -EIO;
13290 tnapi->tx_prod++;
13292 /* Sync BD data before updating mailbox */
13293 wmb();
13295 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13296 tr32_mailbox(tnapi->prodmbox);
13298 udelay(10);
13300 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13301 for (i = 0; i < 35; i++) {
13302 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13303 coal_now);
13305 udelay(10);
13307 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13308 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13309 if ((tx_idx == tnapi->tx_prod) &&
13310 (rx_idx == (rx_start_idx + num_pkts)))
13311 break;
13314 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13315 dev_kfree_skb(skb);
13317 if (tx_idx != tnapi->tx_prod)
13318 goto out;
13320 if (rx_idx != rx_start_idx + num_pkts)
13321 goto out;
13323 val = data_off;
13324 while (rx_idx != rx_start_idx) {
13325 desc = &rnapi->rx_rcb[rx_start_idx++];
13326 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13327 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13329 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13330 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13331 goto out;
13333 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13334 - ETH_FCS_LEN;
13336 if (!tso_loopback) {
13337 if (rx_len != tx_len)
13338 goto out;
13340 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13341 if (opaque_key != RXD_OPAQUE_RING_STD)
13342 goto out;
13343 } else {
13344 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13345 goto out;
13347 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13348 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13349 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13350 goto out;
13353 if (opaque_key == RXD_OPAQUE_RING_STD) {
13354 rx_data = tpr->rx_std_buffers[desc_idx].data;
13355 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13356 mapping);
13357 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13358 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13359 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13360 mapping);
13361 } else
13362 goto out;
13364 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13365 PCI_DMA_FROMDEVICE);
13367 rx_data += TG3_RX_OFFSET(tp);
13368 for (i = data_off; i < rx_len; i++, val++) {
13369 if (*(rx_data + i) != (u8) (val & 0xff))
13370 goto out;
13374 err = 0;
13376 /* tg3_free_rings will unmap and free the rx_data */
13377 out:
13378 return err;
13381 #define TG3_STD_LOOPBACK_FAILED 1
13382 #define TG3_JMB_LOOPBACK_FAILED 2
13383 #define TG3_TSO_LOOPBACK_FAILED 4
13384 #define TG3_LOOPBACK_FAILED \
13385 (TG3_STD_LOOPBACK_FAILED | \
13386 TG3_JMB_LOOPBACK_FAILED | \
13387 TG3_TSO_LOOPBACK_FAILED)
13389 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13391 int err = -EIO;
13392 u32 eee_cap;
13393 u32 jmb_pkt_sz = 9000;
13395 if (tp->dma_limit)
13396 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13398 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13399 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13401 if (!netif_running(tp->dev)) {
13402 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13403 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13404 if (do_extlpbk)
13405 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13406 goto done;
13409 err = tg3_reset_hw(tp, true);
13410 if (err) {
13411 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13412 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13413 if (do_extlpbk)
13414 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13415 goto done;
13418 if (tg3_flag(tp, ENABLE_RSS)) {
13419 int i;
13421 /* Reroute all rx packets to the 1st queue */
13422 for (i = MAC_RSS_INDIR_TBL_0;
13423 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13424 tw32(i, 0x0);
13427 /* HW errata - mac loopback fails in some cases on 5780.
13428 * Normal traffic and PHY loopback are not affected by
13429 * errata. Also, the MAC loopback test is deprecated for
13430 * all newer ASIC revisions.
13432 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13433 !tg3_flag(tp, CPMU_PRESENT)) {
13434 tg3_mac_loopback(tp, true);
13436 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13437 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13439 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13440 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13441 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13443 tg3_mac_loopback(tp, false);
13446 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13447 !tg3_flag(tp, USE_PHYLIB)) {
13448 int i;
13450 tg3_phy_lpbk_set(tp, 0, false);
13452 /* Wait for link */
13453 for (i = 0; i < 100; i++) {
13454 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13455 break;
13456 mdelay(1);
13459 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13460 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13461 if (tg3_flag(tp, TSO_CAPABLE) &&
13462 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13463 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13464 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13465 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13466 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13468 if (do_extlpbk) {
13469 tg3_phy_lpbk_set(tp, 0, true);
13471 /* All link indications report up, but the hardware
13472 * isn't really ready for about 20 msec. Double it
13473 * to be sure.
13475 mdelay(40);
13477 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13478 data[TG3_EXT_LOOPB_TEST] |=
13479 TG3_STD_LOOPBACK_FAILED;
13480 if (tg3_flag(tp, TSO_CAPABLE) &&
13481 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13482 data[TG3_EXT_LOOPB_TEST] |=
13483 TG3_TSO_LOOPBACK_FAILED;
13484 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13485 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13486 data[TG3_EXT_LOOPB_TEST] |=
13487 TG3_JMB_LOOPBACK_FAILED;
13490 /* Re-enable gphy autopowerdown. */
13491 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13492 tg3_phy_toggle_apd(tp, true);
13495 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13496 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13498 done:
13499 tp->phy_flags |= eee_cap;
13501 return err;
13504 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13505 u64 *data)
13507 struct tg3 *tp = netdev_priv(dev);
13508 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13510 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13511 if (tg3_power_up(tp)) {
13512 etest->flags |= ETH_TEST_FL_FAILED;
13513 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13514 return;
13516 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13519 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13521 if (tg3_test_nvram(tp) != 0) {
13522 etest->flags |= ETH_TEST_FL_FAILED;
13523 data[TG3_NVRAM_TEST] = 1;
13525 if (!doextlpbk && tg3_test_link(tp)) {
13526 etest->flags |= ETH_TEST_FL_FAILED;
13527 data[TG3_LINK_TEST] = 1;
13529 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13530 int err, err2 = 0, irq_sync = 0;
13532 if (netif_running(dev)) {
13533 tg3_phy_stop(tp);
13534 tg3_netif_stop(tp);
13535 irq_sync = 1;
13538 tg3_full_lock(tp, irq_sync);
13539 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13540 err = tg3_nvram_lock(tp);
13541 tg3_halt_cpu(tp, RX_CPU_BASE);
13542 if (!tg3_flag(tp, 5705_PLUS))
13543 tg3_halt_cpu(tp, TX_CPU_BASE);
13544 if (!err)
13545 tg3_nvram_unlock(tp);
13547 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13548 tg3_phy_reset(tp);
13550 if (tg3_test_registers(tp) != 0) {
13551 etest->flags |= ETH_TEST_FL_FAILED;
13552 data[TG3_REGISTER_TEST] = 1;
13555 if (tg3_test_memory(tp) != 0) {
13556 etest->flags |= ETH_TEST_FL_FAILED;
13557 data[TG3_MEMORY_TEST] = 1;
13560 if (doextlpbk)
13561 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13563 if (tg3_test_loopback(tp, data, doextlpbk))
13564 etest->flags |= ETH_TEST_FL_FAILED;
13566 tg3_full_unlock(tp);
13568 if (tg3_test_interrupt(tp) != 0) {
13569 etest->flags |= ETH_TEST_FL_FAILED;
13570 data[TG3_INTERRUPT_TEST] = 1;
13573 tg3_full_lock(tp, 0);
13575 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13576 if (netif_running(dev)) {
13577 tg3_flag_set(tp, INIT_COMPLETE);
13578 err2 = tg3_restart_hw(tp, true);
13579 if (!err2)
13580 tg3_netif_start(tp);
13583 tg3_full_unlock(tp);
13585 if (irq_sync && !err2)
13586 tg3_phy_start(tp);
13588 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13589 tg3_power_down_prepare(tp);
13593 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13594 struct ifreq *ifr, int cmd)
13596 struct tg3 *tp = netdev_priv(dev);
13597 struct hwtstamp_config stmpconf;
13599 if (!tg3_flag(tp, PTP_CAPABLE))
13600 return -EINVAL;
13602 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13603 return -EFAULT;
13605 if (stmpconf.flags)
13606 return -EINVAL;
13608 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13609 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13610 return -ERANGE;
13612 switch (stmpconf.rx_filter) {
13613 case HWTSTAMP_FILTER_NONE:
13614 tp->rxptpctl = 0;
13615 break;
13616 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13617 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13618 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13619 break;
13620 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13621 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13622 TG3_RX_PTP_CTL_SYNC_EVNT;
13623 break;
13624 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13625 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13626 TG3_RX_PTP_CTL_DELAY_REQ;
13627 break;
13628 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13629 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13630 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13631 break;
13632 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13633 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13634 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13635 break;
13636 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13637 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13638 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13639 break;
13640 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13641 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13642 TG3_RX_PTP_CTL_SYNC_EVNT;
13643 break;
13644 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13645 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13646 TG3_RX_PTP_CTL_SYNC_EVNT;
13647 break;
13648 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13649 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13650 TG3_RX_PTP_CTL_SYNC_EVNT;
13651 break;
13652 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13653 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13654 TG3_RX_PTP_CTL_DELAY_REQ;
13655 break;
13656 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13657 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13658 TG3_RX_PTP_CTL_DELAY_REQ;
13659 break;
13660 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13661 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13662 TG3_RX_PTP_CTL_DELAY_REQ;
13663 break;
13664 default:
13665 return -ERANGE;
13668 if (netif_running(dev) && tp->rxptpctl)
13669 tw32(TG3_RX_PTP_CTL,
13670 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13672 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13673 tg3_flag_set(tp, TX_TSTAMP_EN);
13674 else
13675 tg3_flag_clear(tp, TX_TSTAMP_EN);
13677 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13678 -EFAULT : 0;
13681 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13683 struct mii_ioctl_data *data = if_mii(ifr);
13684 struct tg3 *tp = netdev_priv(dev);
13685 int err;
13687 if (tg3_flag(tp, USE_PHYLIB)) {
13688 struct phy_device *phydev;
13689 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13690 return -EAGAIN;
13691 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13692 return phy_mii_ioctl(phydev, ifr, cmd);
13695 switch (cmd) {
13696 case SIOCGMIIPHY:
13697 data->phy_id = tp->phy_addr;
13699 /* fallthru */
13700 case SIOCGMIIREG: {
13701 u32 mii_regval;
13703 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13704 break; /* We have no PHY */
13706 if (!netif_running(dev))
13707 return -EAGAIN;
13709 spin_lock_bh(&tp->lock);
13710 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13711 data->reg_num & 0x1f, &mii_regval);
13712 spin_unlock_bh(&tp->lock);
13714 data->val_out = mii_regval;
13716 return err;
13719 case SIOCSMIIREG:
13720 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13721 break; /* We have no PHY */
13723 if (!netif_running(dev))
13724 return -EAGAIN;
13726 spin_lock_bh(&tp->lock);
13727 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13728 data->reg_num & 0x1f, data->val_in);
13729 spin_unlock_bh(&tp->lock);
13731 return err;
13733 case SIOCSHWTSTAMP:
13734 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13736 default:
13737 /* do nothing */
13738 break;
13740 return -EOPNOTSUPP;
13743 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13745 struct tg3 *tp = netdev_priv(dev);
13747 memcpy(ec, &tp->coal, sizeof(*ec));
13748 return 0;
13751 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13753 struct tg3 *tp = netdev_priv(dev);
13754 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13755 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13757 if (!tg3_flag(tp, 5705_PLUS)) {
13758 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13759 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13760 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13761 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13764 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13765 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13766 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13767 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13768 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13769 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13770 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13771 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13772 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13773 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13774 return -EINVAL;
13776 /* No rx interrupts will be generated if both are zero */
13777 if ((ec->rx_coalesce_usecs == 0) &&
13778 (ec->rx_max_coalesced_frames == 0))
13779 return -EINVAL;
13781 /* No tx interrupts will be generated if both are zero */
13782 if ((ec->tx_coalesce_usecs == 0) &&
13783 (ec->tx_max_coalesced_frames == 0))
13784 return -EINVAL;
13786 /* Only copy relevant parameters, ignore all others. */
13787 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13788 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13789 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13790 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13791 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13792 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13793 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13794 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13795 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13797 if (netif_running(dev)) {
13798 tg3_full_lock(tp, 0);
13799 __tg3_set_coalesce(tp, &tp->coal);
13800 tg3_full_unlock(tp);
13802 return 0;
13805 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13807 struct tg3 *tp = netdev_priv(dev);
13809 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13810 netdev_warn(tp->dev, "Board does not support EEE!\n");
13811 return -EOPNOTSUPP;
13814 if (edata->advertised != tp->eee.advertised) {
13815 netdev_warn(tp->dev,
13816 "Direct manipulation of EEE advertisement is not supported\n");
13817 return -EINVAL;
13820 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13821 netdev_warn(tp->dev,
13822 "Maximal Tx Lpi timer supported is %#x(u)\n",
13823 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13824 return -EINVAL;
13827 tp->eee = *edata;
13829 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13830 tg3_warn_mgmt_link_flap(tp);
13832 if (netif_running(tp->dev)) {
13833 tg3_full_lock(tp, 0);
13834 tg3_setup_eee(tp);
13835 tg3_phy_reset(tp);
13836 tg3_full_unlock(tp);
13839 return 0;
13842 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13844 struct tg3 *tp = netdev_priv(dev);
13846 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13847 netdev_warn(tp->dev,
13848 "Board does not support EEE!\n");
13849 return -EOPNOTSUPP;
13852 *edata = tp->eee;
13853 return 0;
13856 static const struct ethtool_ops tg3_ethtool_ops = {
13857 .get_settings = tg3_get_settings,
13858 .set_settings = tg3_set_settings,
13859 .get_drvinfo = tg3_get_drvinfo,
13860 .get_regs_len = tg3_get_regs_len,
13861 .get_regs = tg3_get_regs,
13862 .get_wol = tg3_get_wol,
13863 .set_wol = tg3_set_wol,
13864 .get_msglevel = tg3_get_msglevel,
13865 .set_msglevel = tg3_set_msglevel,
13866 .nway_reset = tg3_nway_reset,
13867 .get_link = ethtool_op_get_link,
13868 .get_eeprom_len = tg3_get_eeprom_len,
13869 .get_eeprom = tg3_get_eeprom,
13870 .set_eeprom = tg3_set_eeprom,
13871 .get_ringparam = tg3_get_ringparam,
13872 .set_ringparam = tg3_set_ringparam,
13873 .get_pauseparam = tg3_get_pauseparam,
13874 .set_pauseparam = tg3_set_pauseparam,
13875 .self_test = tg3_self_test,
13876 .get_strings = tg3_get_strings,
13877 .set_phys_id = tg3_set_phys_id,
13878 .get_ethtool_stats = tg3_get_ethtool_stats,
13879 .get_coalesce = tg3_get_coalesce,
13880 .set_coalesce = tg3_set_coalesce,
13881 .get_sset_count = tg3_get_sset_count,
13882 .get_rxnfc = tg3_get_rxnfc,
13883 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13884 .get_rxfh_indir = tg3_get_rxfh_indir,
13885 .set_rxfh_indir = tg3_set_rxfh_indir,
13886 .get_channels = tg3_get_channels,
13887 .set_channels = tg3_set_channels,
13888 .get_ts_info = tg3_get_ts_info,
13889 .get_eee = tg3_get_eee,
13890 .set_eee = tg3_set_eee,
13893 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13894 struct rtnl_link_stats64 *stats)
13896 struct tg3 *tp = netdev_priv(dev);
13898 spin_lock_bh(&tp->lock);
13899 if (!tp->hw_stats) {
13900 spin_unlock_bh(&tp->lock);
13901 return &tp->net_stats_prev;
13904 tg3_get_nstats(tp, stats);
13905 spin_unlock_bh(&tp->lock);
13907 return stats;
13910 static void tg3_set_rx_mode(struct net_device *dev)
13912 struct tg3 *tp = netdev_priv(dev);
13914 if (!netif_running(dev))
13915 return;
13917 tg3_full_lock(tp, 0);
13918 __tg3_set_rx_mode(dev);
13919 tg3_full_unlock(tp);
13922 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13923 int new_mtu)
13925 dev->mtu = new_mtu;
13927 if (new_mtu > ETH_DATA_LEN) {
13928 if (tg3_flag(tp, 5780_CLASS)) {
13929 netdev_update_features(dev);
13930 tg3_flag_clear(tp, TSO_CAPABLE);
13931 } else {
13932 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13934 } else {
13935 if (tg3_flag(tp, 5780_CLASS)) {
13936 tg3_flag_set(tp, TSO_CAPABLE);
13937 netdev_update_features(dev);
13939 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13943 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13945 struct tg3 *tp = netdev_priv(dev);
13946 int err;
13947 bool reset_phy = false;
13949 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13950 return -EINVAL;
13952 if (!netif_running(dev)) {
13953 /* We'll just catch it later when the
13954 * device is up'd.
13956 tg3_set_mtu(dev, tp, new_mtu);
13957 return 0;
13960 tg3_phy_stop(tp);
13962 tg3_netif_stop(tp);
13964 tg3_full_lock(tp, 1);
13966 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13968 tg3_set_mtu(dev, tp, new_mtu);
13970 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13971 * breaks all requests to 256 bytes.
13973 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13974 reset_phy = true;
13976 err = tg3_restart_hw(tp, reset_phy);
13978 if (!err)
13979 tg3_netif_start(tp);
13981 tg3_full_unlock(tp);
13983 if (!err)
13984 tg3_phy_start(tp);
13986 return err;
13989 static const struct net_device_ops tg3_netdev_ops = {
13990 .ndo_open = tg3_open,
13991 .ndo_stop = tg3_close,
13992 .ndo_start_xmit = tg3_start_xmit,
13993 .ndo_get_stats64 = tg3_get_stats64,
13994 .ndo_validate_addr = eth_validate_addr,
13995 .ndo_set_rx_mode = tg3_set_rx_mode,
13996 .ndo_set_mac_address = tg3_set_mac_addr,
13997 .ndo_do_ioctl = tg3_ioctl,
13998 .ndo_tx_timeout = tg3_tx_timeout,
13999 .ndo_change_mtu = tg3_change_mtu,
14000 .ndo_fix_features = tg3_fix_features,
14001 .ndo_set_features = tg3_set_features,
14002 #ifdef CONFIG_NET_POLL_CONTROLLER
14003 .ndo_poll_controller = tg3_poll_controller,
14004 #endif
14007 static void tg3_get_eeprom_size(struct tg3 *tp)
14009 u32 cursize, val, magic;
14011 tp->nvram_size = EEPROM_CHIP_SIZE;
14013 if (tg3_nvram_read(tp, 0, &magic) != 0)
14014 return;
14016 if ((magic != TG3_EEPROM_MAGIC) &&
14017 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14018 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14019 return;
14022 * Size the chip by reading offsets at increasing powers of two.
14023 * When we encounter our validation signature, we know the addressing
14024 * has wrapped around, and thus have our chip size.
14026 cursize = 0x10;
14028 while (cursize < tp->nvram_size) {
14029 if (tg3_nvram_read(tp, cursize, &val) != 0)
14030 return;
14032 if (val == magic)
14033 break;
14035 cursize <<= 1;
14038 tp->nvram_size = cursize;
14041 static void tg3_get_nvram_size(struct tg3 *tp)
14043 u32 val;
14045 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14046 return;
14048 /* Selfboot format */
14049 if (val != TG3_EEPROM_MAGIC) {
14050 tg3_get_eeprom_size(tp);
14051 return;
14054 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14055 if (val != 0) {
14056 /* This is confusing. We want to operate on the
14057 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14058 * call will read from NVRAM and byteswap the data
14059 * according to the byteswapping settings for all
14060 * other register accesses. This ensures the data we
14061 * want will always reside in the lower 16-bits.
14062 * However, the data in NVRAM is in LE format, which
14063 * means the data from the NVRAM read will always be
14064 * opposite the endianness of the CPU. The 16-bit
14065 * byteswap then brings the data to CPU endianness.
14067 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14068 return;
14071 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14074 static void tg3_get_nvram_info(struct tg3 *tp)
14076 u32 nvcfg1;
14078 nvcfg1 = tr32(NVRAM_CFG1);
14079 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14080 tg3_flag_set(tp, FLASH);
14081 } else {
14082 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14083 tw32(NVRAM_CFG1, nvcfg1);
14086 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14087 tg3_flag(tp, 5780_CLASS)) {
14088 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14089 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14090 tp->nvram_jedecnum = JEDEC_ATMEL;
14091 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14092 tg3_flag_set(tp, NVRAM_BUFFERED);
14093 break;
14094 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14095 tp->nvram_jedecnum = JEDEC_ATMEL;
14096 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14097 break;
14098 case FLASH_VENDOR_ATMEL_EEPROM:
14099 tp->nvram_jedecnum = JEDEC_ATMEL;
14100 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14101 tg3_flag_set(tp, NVRAM_BUFFERED);
14102 break;
14103 case FLASH_VENDOR_ST:
14104 tp->nvram_jedecnum = JEDEC_ST;
14105 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14106 tg3_flag_set(tp, NVRAM_BUFFERED);
14107 break;
14108 case FLASH_VENDOR_SAIFUN:
14109 tp->nvram_jedecnum = JEDEC_SAIFUN;
14110 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14111 break;
14112 case FLASH_VENDOR_SST_SMALL:
14113 case FLASH_VENDOR_SST_LARGE:
14114 tp->nvram_jedecnum = JEDEC_SST;
14115 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14116 break;
14118 } else {
14119 tp->nvram_jedecnum = JEDEC_ATMEL;
14120 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14121 tg3_flag_set(tp, NVRAM_BUFFERED);
14125 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14127 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14128 case FLASH_5752PAGE_SIZE_256:
14129 tp->nvram_pagesize = 256;
14130 break;
14131 case FLASH_5752PAGE_SIZE_512:
14132 tp->nvram_pagesize = 512;
14133 break;
14134 case FLASH_5752PAGE_SIZE_1K:
14135 tp->nvram_pagesize = 1024;
14136 break;
14137 case FLASH_5752PAGE_SIZE_2K:
14138 tp->nvram_pagesize = 2048;
14139 break;
14140 case FLASH_5752PAGE_SIZE_4K:
14141 tp->nvram_pagesize = 4096;
14142 break;
14143 case FLASH_5752PAGE_SIZE_264:
14144 tp->nvram_pagesize = 264;
14145 break;
14146 case FLASH_5752PAGE_SIZE_528:
14147 tp->nvram_pagesize = 528;
14148 break;
14152 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14154 u32 nvcfg1;
14156 nvcfg1 = tr32(NVRAM_CFG1);
14158 /* NVRAM protection for TPM */
14159 if (nvcfg1 & (1 << 27))
14160 tg3_flag_set(tp, PROTECTED_NVRAM);
14162 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14163 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14164 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14165 tp->nvram_jedecnum = JEDEC_ATMEL;
14166 tg3_flag_set(tp, NVRAM_BUFFERED);
14167 break;
14168 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14169 tp->nvram_jedecnum = JEDEC_ATMEL;
14170 tg3_flag_set(tp, NVRAM_BUFFERED);
14171 tg3_flag_set(tp, FLASH);
14172 break;
14173 case FLASH_5752VENDOR_ST_M45PE10:
14174 case FLASH_5752VENDOR_ST_M45PE20:
14175 case FLASH_5752VENDOR_ST_M45PE40:
14176 tp->nvram_jedecnum = JEDEC_ST;
14177 tg3_flag_set(tp, NVRAM_BUFFERED);
14178 tg3_flag_set(tp, FLASH);
14179 break;
14182 if (tg3_flag(tp, FLASH)) {
14183 tg3_nvram_get_pagesize(tp, nvcfg1);
14184 } else {
14185 /* For eeprom, set pagesize to maximum eeprom size */
14186 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14188 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14189 tw32(NVRAM_CFG1, nvcfg1);
14193 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14195 u32 nvcfg1, protect = 0;
14197 nvcfg1 = tr32(NVRAM_CFG1);
14199 /* NVRAM protection for TPM */
14200 if (nvcfg1 & (1 << 27)) {
14201 tg3_flag_set(tp, PROTECTED_NVRAM);
14202 protect = 1;
14205 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14206 switch (nvcfg1) {
14207 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14208 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14209 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14210 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14211 tp->nvram_jedecnum = JEDEC_ATMEL;
14212 tg3_flag_set(tp, NVRAM_BUFFERED);
14213 tg3_flag_set(tp, FLASH);
14214 tp->nvram_pagesize = 264;
14215 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14216 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14217 tp->nvram_size = (protect ? 0x3e200 :
14218 TG3_NVRAM_SIZE_512KB);
14219 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14220 tp->nvram_size = (protect ? 0x1f200 :
14221 TG3_NVRAM_SIZE_256KB);
14222 else
14223 tp->nvram_size = (protect ? 0x1f200 :
14224 TG3_NVRAM_SIZE_128KB);
14225 break;
14226 case FLASH_5752VENDOR_ST_M45PE10:
14227 case FLASH_5752VENDOR_ST_M45PE20:
14228 case FLASH_5752VENDOR_ST_M45PE40:
14229 tp->nvram_jedecnum = JEDEC_ST;
14230 tg3_flag_set(tp, NVRAM_BUFFERED);
14231 tg3_flag_set(tp, FLASH);
14232 tp->nvram_pagesize = 256;
14233 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14234 tp->nvram_size = (protect ?
14235 TG3_NVRAM_SIZE_64KB :
14236 TG3_NVRAM_SIZE_128KB);
14237 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14238 tp->nvram_size = (protect ?
14239 TG3_NVRAM_SIZE_64KB :
14240 TG3_NVRAM_SIZE_256KB);
14241 else
14242 tp->nvram_size = (protect ?
14243 TG3_NVRAM_SIZE_128KB :
14244 TG3_NVRAM_SIZE_512KB);
14245 break;
14249 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14251 u32 nvcfg1;
14253 nvcfg1 = tr32(NVRAM_CFG1);
14255 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14256 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14257 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14258 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14259 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14260 tp->nvram_jedecnum = JEDEC_ATMEL;
14261 tg3_flag_set(tp, NVRAM_BUFFERED);
14262 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14264 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14265 tw32(NVRAM_CFG1, nvcfg1);
14266 break;
14267 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14268 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14269 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14270 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14271 tp->nvram_jedecnum = JEDEC_ATMEL;
14272 tg3_flag_set(tp, NVRAM_BUFFERED);
14273 tg3_flag_set(tp, FLASH);
14274 tp->nvram_pagesize = 264;
14275 break;
14276 case FLASH_5752VENDOR_ST_M45PE10:
14277 case FLASH_5752VENDOR_ST_M45PE20:
14278 case FLASH_5752VENDOR_ST_M45PE40:
14279 tp->nvram_jedecnum = JEDEC_ST;
14280 tg3_flag_set(tp, NVRAM_BUFFERED);
14281 tg3_flag_set(tp, FLASH);
14282 tp->nvram_pagesize = 256;
14283 break;
14287 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14289 u32 nvcfg1, protect = 0;
14291 nvcfg1 = tr32(NVRAM_CFG1);
14293 /* NVRAM protection for TPM */
14294 if (nvcfg1 & (1 << 27)) {
14295 tg3_flag_set(tp, PROTECTED_NVRAM);
14296 protect = 1;
14299 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14300 switch (nvcfg1) {
14301 case FLASH_5761VENDOR_ATMEL_ADB021D:
14302 case FLASH_5761VENDOR_ATMEL_ADB041D:
14303 case FLASH_5761VENDOR_ATMEL_ADB081D:
14304 case FLASH_5761VENDOR_ATMEL_ADB161D:
14305 case FLASH_5761VENDOR_ATMEL_MDB021D:
14306 case FLASH_5761VENDOR_ATMEL_MDB041D:
14307 case FLASH_5761VENDOR_ATMEL_MDB081D:
14308 case FLASH_5761VENDOR_ATMEL_MDB161D:
14309 tp->nvram_jedecnum = JEDEC_ATMEL;
14310 tg3_flag_set(tp, NVRAM_BUFFERED);
14311 tg3_flag_set(tp, FLASH);
14312 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14313 tp->nvram_pagesize = 256;
14314 break;
14315 case FLASH_5761VENDOR_ST_A_M45PE20:
14316 case FLASH_5761VENDOR_ST_A_M45PE40:
14317 case FLASH_5761VENDOR_ST_A_M45PE80:
14318 case FLASH_5761VENDOR_ST_A_M45PE16:
14319 case FLASH_5761VENDOR_ST_M_M45PE20:
14320 case FLASH_5761VENDOR_ST_M_M45PE40:
14321 case FLASH_5761VENDOR_ST_M_M45PE80:
14322 case FLASH_5761VENDOR_ST_M_M45PE16:
14323 tp->nvram_jedecnum = JEDEC_ST;
14324 tg3_flag_set(tp, NVRAM_BUFFERED);
14325 tg3_flag_set(tp, FLASH);
14326 tp->nvram_pagesize = 256;
14327 break;
14330 if (protect) {
14331 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14332 } else {
14333 switch (nvcfg1) {
14334 case FLASH_5761VENDOR_ATMEL_ADB161D:
14335 case FLASH_5761VENDOR_ATMEL_MDB161D:
14336 case FLASH_5761VENDOR_ST_A_M45PE16:
14337 case FLASH_5761VENDOR_ST_M_M45PE16:
14338 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14339 break;
14340 case FLASH_5761VENDOR_ATMEL_ADB081D:
14341 case FLASH_5761VENDOR_ATMEL_MDB081D:
14342 case FLASH_5761VENDOR_ST_A_M45PE80:
14343 case FLASH_5761VENDOR_ST_M_M45PE80:
14344 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14345 break;
14346 case FLASH_5761VENDOR_ATMEL_ADB041D:
14347 case FLASH_5761VENDOR_ATMEL_MDB041D:
14348 case FLASH_5761VENDOR_ST_A_M45PE40:
14349 case FLASH_5761VENDOR_ST_M_M45PE40:
14350 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14351 break;
14352 case FLASH_5761VENDOR_ATMEL_ADB021D:
14353 case FLASH_5761VENDOR_ATMEL_MDB021D:
14354 case FLASH_5761VENDOR_ST_A_M45PE20:
14355 case FLASH_5761VENDOR_ST_M_M45PE20:
14356 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14357 break;
14362 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14364 tp->nvram_jedecnum = JEDEC_ATMEL;
14365 tg3_flag_set(tp, NVRAM_BUFFERED);
14366 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14369 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14371 u32 nvcfg1;
14373 nvcfg1 = tr32(NVRAM_CFG1);
14375 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14376 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14377 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14378 tp->nvram_jedecnum = JEDEC_ATMEL;
14379 tg3_flag_set(tp, NVRAM_BUFFERED);
14380 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14382 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14383 tw32(NVRAM_CFG1, nvcfg1);
14384 return;
14385 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14386 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14387 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14388 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14389 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14390 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14391 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14392 tp->nvram_jedecnum = JEDEC_ATMEL;
14393 tg3_flag_set(tp, NVRAM_BUFFERED);
14394 tg3_flag_set(tp, FLASH);
14396 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14397 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14398 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14399 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14400 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14401 break;
14402 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14403 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14404 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14405 break;
14406 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14407 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14408 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14409 break;
14411 break;
14412 case FLASH_5752VENDOR_ST_M45PE10:
14413 case FLASH_5752VENDOR_ST_M45PE20:
14414 case FLASH_5752VENDOR_ST_M45PE40:
14415 tp->nvram_jedecnum = JEDEC_ST;
14416 tg3_flag_set(tp, NVRAM_BUFFERED);
14417 tg3_flag_set(tp, FLASH);
14419 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14420 case FLASH_5752VENDOR_ST_M45PE10:
14421 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14422 break;
14423 case FLASH_5752VENDOR_ST_M45PE20:
14424 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14425 break;
14426 case FLASH_5752VENDOR_ST_M45PE40:
14427 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14428 break;
14430 break;
14431 default:
14432 tg3_flag_set(tp, NO_NVRAM);
14433 return;
14436 tg3_nvram_get_pagesize(tp, nvcfg1);
14437 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14438 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14442 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14444 u32 nvcfg1;
14446 nvcfg1 = tr32(NVRAM_CFG1);
14448 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14449 case FLASH_5717VENDOR_ATMEL_EEPROM:
14450 case FLASH_5717VENDOR_MICRO_EEPROM:
14451 tp->nvram_jedecnum = JEDEC_ATMEL;
14452 tg3_flag_set(tp, NVRAM_BUFFERED);
14453 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14455 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14456 tw32(NVRAM_CFG1, nvcfg1);
14457 return;
14458 case FLASH_5717VENDOR_ATMEL_MDB011D:
14459 case FLASH_5717VENDOR_ATMEL_ADB011B:
14460 case FLASH_5717VENDOR_ATMEL_ADB011D:
14461 case FLASH_5717VENDOR_ATMEL_MDB021D:
14462 case FLASH_5717VENDOR_ATMEL_ADB021B:
14463 case FLASH_5717VENDOR_ATMEL_ADB021D:
14464 case FLASH_5717VENDOR_ATMEL_45USPT:
14465 tp->nvram_jedecnum = JEDEC_ATMEL;
14466 tg3_flag_set(tp, NVRAM_BUFFERED);
14467 tg3_flag_set(tp, FLASH);
14469 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14470 case FLASH_5717VENDOR_ATMEL_MDB021D:
14471 /* Detect size with tg3_nvram_get_size() */
14472 break;
14473 case FLASH_5717VENDOR_ATMEL_ADB021B:
14474 case FLASH_5717VENDOR_ATMEL_ADB021D:
14475 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14476 break;
14477 default:
14478 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14479 break;
14481 break;
14482 case FLASH_5717VENDOR_ST_M_M25PE10:
14483 case FLASH_5717VENDOR_ST_A_M25PE10:
14484 case FLASH_5717VENDOR_ST_M_M45PE10:
14485 case FLASH_5717VENDOR_ST_A_M45PE10:
14486 case FLASH_5717VENDOR_ST_M_M25PE20:
14487 case FLASH_5717VENDOR_ST_A_M25PE20:
14488 case FLASH_5717VENDOR_ST_M_M45PE20:
14489 case FLASH_5717VENDOR_ST_A_M45PE20:
14490 case FLASH_5717VENDOR_ST_25USPT:
14491 case FLASH_5717VENDOR_ST_45USPT:
14492 tp->nvram_jedecnum = JEDEC_ST;
14493 tg3_flag_set(tp, NVRAM_BUFFERED);
14494 tg3_flag_set(tp, FLASH);
14496 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14497 case FLASH_5717VENDOR_ST_M_M25PE20:
14498 case FLASH_5717VENDOR_ST_M_M45PE20:
14499 /* Detect size with tg3_nvram_get_size() */
14500 break;
14501 case FLASH_5717VENDOR_ST_A_M25PE20:
14502 case FLASH_5717VENDOR_ST_A_M45PE20:
14503 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14504 break;
14505 default:
14506 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14507 break;
14509 break;
14510 default:
14511 tg3_flag_set(tp, NO_NVRAM);
14512 return;
14515 tg3_nvram_get_pagesize(tp, nvcfg1);
14516 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14517 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14520 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14522 u32 nvcfg1, nvmpinstrp;
14524 nvcfg1 = tr32(NVRAM_CFG1);
14525 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14527 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14528 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14529 tg3_flag_set(tp, NO_NVRAM);
14530 return;
14533 switch (nvmpinstrp) {
14534 case FLASH_5762_EEPROM_HD:
14535 nvmpinstrp = FLASH_5720_EEPROM_HD;
14536 break;
14537 case FLASH_5762_EEPROM_LD:
14538 nvmpinstrp = FLASH_5720_EEPROM_LD;
14539 break;
14540 case FLASH_5720VENDOR_M_ST_M45PE20:
14541 /* This pinstrap supports multiple sizes, so force it
14542 * to read the actual size from location 0xf0.
14544 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14545 break;
14549 switch (nvmpinstrp) {
14550 case FLASH_5720_EEPROM_HD:
14551 case FLASH_5720_EEPROM_LD:
14552 tp->nvram_jedecnum = JEDEC_ATMEL;
14553 tg3_flag_set(tp, NVRAM_BUFFERED);
14555 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14556 tw32(NVRAM_CFG1, nvcfg1);
14557 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14558 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14559 else
14560 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14561 return;
14562 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14563 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14564 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14565 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14566 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14567 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14568 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14569 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14570 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14571 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14572 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14573 case FLASH_5720VENDOR_ATMEL_45USPT:
14574 tp->nvram_jedecnum = JEDEC_ATMEL;
14575 tg3_flag_set(tp, NVRAM_BUFFERED);
14576 tg3_flag_set(tp, FLASH);
14578 switch (nvmpinstrp) {
14579 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14580 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14581 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14582 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14583 break;
14584 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14585 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14586 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14587 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14588 break;
14589 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14590 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14591 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14592 break;
14593 default:
14594 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14595 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14596 break;
14598 break;
14599 case FLASH_5720VENDOR_M_ST_M25PE10:
14600 case FLASH_5720VENDOR_M_ST_M45PE10:
14601 case FLASH_5720VENDOR_A_ST_M25PE10:
14602 case FLASH_5720VENDOR_A_ST_M45PE10:
14603 case FLASH_5720VENDOR_M_ST_M25PE20:
14604 case FLASH_5720VENDOR_M_ST_M45PE20:
14605 case FLASH_5720VENDOR_A_ST_M25PE20:
14606 case FLASH_5720VENDOR_A_ST_M45PE20:
14607 case FLASH_5720VENDOR_M_ST_M25PE40:
14608 case FLASH_5720VENDOR_M_ST_M45PE40:
14609 case FLASH_5720VENDOR_A_ST_M25PE40:
14610 case FLASH_5720VENDOR_A_ST_M45PE40:
14611 case FLASH_5720VENDOR_M_ST_M25PE80:
14612 case FLASH_5720VENDOR_M_ST_M45PE80:
14613 case FLASH_5720VENDOR_A_ST_M25PE80:
14614 case FLASH_5720VENDOR_A_ST_M45PE80:
14615 case FLASH_5720VENDOR_ST_25USPT:
14616 case FLASH_5720VENDOR_ST_45USPT:
14617 tp->nvram_jedecnum = JEDEC_ST;
14618 tg3_flag_set(tp, NVRAM_BUFFERED);
14619 tg3_flag_set(tp, FLASH);
14621 switch (nvmpinstrp) {
14622 case FLASH_5720VENDOR_M_ST_M25PE20:
14623 case FLASH_5720VENDOR_M_ST_M45PE20:
14624 case FLASH_5720VENDOR_A_ST_M25PE20:
14625 case FLASH_5720VENDOR_A_ST_M45PE20:
14626 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14627 break;
14628 case FLASH_5720VENDOR_M_ST_M25PE40:
14629 case FLASH_5720VENDOR_M_ST_M45PE40:
14630 case FLASH_5720VENDOR_A_ST_M25PE40:
14631 case FLASH_5720VENDOR_A_ST_M45PE40:
14632 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14633 break;
14634 case FLASH_5720VENDOR_M_ST_M25PE80:
14635 case FLASH_5720VENDOR_M_ST_M45PE80:
14636 case FLASH_5720VENDOR_A_ST_M25PE80:
14637 case FLASH_5720VENDOR_A_ST_M45PE80:
14638 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14639 break;
14640 default:
14641 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14642 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14643 break;
14645 break;
14646 default:
14647 tg3_flag_set(tp, NO_NVRAM);
14648 return;
14651 tg3_nvram_get_pagesize(tp, nvcfg1);
14652 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14653 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14655 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14656 u32 val;
14658 if (tg3_nvram_read(tp, 0, &val))
14659 return;
14661 if (val != TG3_EEPROM_MAGIC &&
14662 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14663 tg3_flag_set(tp, NO_NVRAM);
14667 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14668 static void tg3_nvram_init(struct tg3 *tp)
14670 if (tg3_flag(tp, IS_SSB_CORE)) {
14671 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14672 tg3_flag_clear(tp, NVRAM);
14673 tg3_flag_clear(tp, NVRAM_BUFFERED);
14674 tg3_flag_set(tp, NO_NVRAM);
14675 return;
14678 tw32_f(GRC_EEPROM_ADDR,
14679 (EEPROM_ADDR_FSM_RESET |
14680 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14681 EEPROM_ADDR_CLKPERD_SHIFT)));
14683 msleep(1);
14685 /* Enable seeprom accesses. */
14686 tw32_f(GRC_LOCAL_CTRL,
14687 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14688 udelay(100);
14690 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14691 tg3_asic_rev(tp) != ASIC_REV_5701) {
14692 tg3_flag_set(tp, NVRAM);
14694 if (tg3_nvram_lock(tp)) {
14695 netdev_warn(tp->dev,
14696 "Cannot get nvram lock, %s failed\n",
14697 __func__);
14698 return;
14700 tg3_enable_nvram_access(tp);
14702 tp->nvram_size = 0;
14704 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14705 tg3_get_5752_nvram_info(tp);
14706 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14707 tg3_get_5755_nvram_info(tp);
14708 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14709 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14710 tg3_asic_rev(tp) == ASIC_REV_5785)
14711 tg3_get_5787_nvram_info(tp);
14712 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14713 tg3_get_5761_nvram_info(tp);
14714 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14715 tg3_get_5906_nvram_info(tp);
14716 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14717 tg3_flag(tp, 57765_CLASS))
14718 tg3_get_57780_nvram_info(tp);
14719 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14720 tg3_asic_rev(tp) == ASIC_REV_5719)
14721 tg3_get_5717_nvram_info(tp);
14722 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14723 tg3_asic_rev(tp) == ASIC_REV_5762)
14724 tg3_get_5720_nvram_info(tp);
14725 else
14726 tg3_get_nvram_info(tp);
14728 if (tp->nvram_size == 0)
14729 tg3_get_nvram_size(tp);
14731 tg3_disable_nvram_access(tp);
14732 tg3_nvram_unlock(tp);
14734 } else {
14735 tg3_flag_clear(tp, NVRAM);
14736 tg3_flag_clear(tp, NVRAM_BUFFERED);
14738 tg3_get_eeprom_size(tp);
14742 struct subsys_tbl_ent {
14743 u16 subsys_vendor, subsys_devid;
14744 u32 phy_id;
14747 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14748 /* Broadcom boards. */
14749 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14750 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14751 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14752 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14753 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14754 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14755 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14756 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14757 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14758 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14759 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14760 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14761 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14762 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14763 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14764 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14765 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14766 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14767 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14768 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14769 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14770 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14772 /* 3com boards. */
14773 { TG3PCI_SUBVENDOR_ID_3COM,
14774 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14775 { TG3PCI_SUBVENDOR_ID_3COM,
14776 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14777 { TG3PCI_SUBVENDOR_ID_3COM,
14778 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14779 { TG3PCI_SUBVENDOR_ID_3COM,
14780 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14781 { TG3PCI_SUBVENDOR_ID_3COM,
14782 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14784 /* DELL boards. */
14785 { TG3PCI_SUBVENDOR_ID_DELL,
14786 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14787 { TG3PCI_SUBVENDOR_ID_DELL,
14788 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14789 { TG3PCI_SUBVENDOR_ID_DELL,
14790 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14791 { TG3PCI_SUBVENDOR_ID_DELL,
14792 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14794 /* Compaq boards. */
14795 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14796 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14797 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14798 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14799 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14800 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14801 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14802 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14803 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14804 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14806 /* IBM boards. */
14807 { TG3PCI_SUBVENDOR_ID_IBM,
14808 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14811 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14813 int i;
14815 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14816 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14817 tp->pdev->subsystem_vendor) &&
14818 (subsys_id_to_phy_id[i].subsys_devid ==
14819 tp->pdev->subsystem_device))
14820 return &subsys_id_to_phy_id[i];
14822 return NULL;
14825 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14827 u32 val;
14829 tp->phy_id = TG3_PHY_ID_INVALID;
14830 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14832 /* Assume an onboard device and WOL capable by default. */
14833 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14834 tg3_flag_set(tp, WOL_CAP);
14836 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14837 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14838 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14839 tg3_flag_set(tp, IS_NIC);
14841 val = tr32(VCPU_CFGSHDW);
14842 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14843 tg3_flag_set(tp, ASPM_WORKAROUND);
14844 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14845 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14846 tg3_flag_set(tp, WOL_ENABLE);
14847 device_set_wakeup_enable(&tp->pdev->dev, true);
14849 goto done;
14852 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14853 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14854 u32 nic_cfg, led_cfg;
14855 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14856 int eeprom_phy_serdes = 0;
14858 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14859 tp->nic_sram_data_cfg = nic_cfg;
14861 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14862 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14863 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14864 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14865 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14866 (ver > 0) && (ver < 0x100))
14867 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14869 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14870 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14872 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14873 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14874 eeprom_phy_serdes = 1;
14876 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14877 if (nic_phy_id != 0) {
14878 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14879 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14881 eeprom_phy_id = (id1 >> 16) << 10;
14882 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14883 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14884 } else
14885 eeprom_phy_id = 0;
14887 tp->phy_id = eeprom_phy_id;
14888 if (eeprom_phy_serdes) {
14889 if (!tg3_flag(tp, 5705_PLUS))
14890 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14891 else
14892 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14895 if (tg3_flag(tp, 5750_PLUS))
14896 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14897 SHASTA_EXT_LED_MODE_MASK);
14898 else
14899 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14901 switch (led_cfg) {
14902 default:
14903 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14904 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14905 break;
14907 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14908 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14909 break;
14911 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14912 tp->led_ctrl = LED_CTRL_MODE_MAC;
14914 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14915 * read on some older 5700/5701 bootcode.
14917 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14918 tg3_asic_rev(tp) == ASIC_REV_5701)
14919 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14921 break;
14923 case SHASTA_EXT_LED_SHARED:
14924 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14925 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14926 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14927 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14928 LED_CTRL_MODE_PHY_2);
14930 if (tg3_flag(tp, 5717_PLUS) ||
14931 tg3_asic_rev(tp) == ASIC_REV_5762)
14932 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
14933 LED_CTRL_BLINK_RATE_MASK;
14935 break;
14937 case SHASTA_EXT_LED_MAC:
14938 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14939 break;
14941 case SHASTA_EXT_LED_COMBO:
14942 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14943 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14944 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14945 LED_CTRL_MODE_PHY_2);
14946 break;
14950 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14951 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14952 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14953 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14955 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14956 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14958 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14959 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14960 if ((tp->pdev->subsystem_vendor ==
14961 PCI_VENDOR_ID_ARIMA) &&
14962 (tp->pdev->subsystem_device == 0x205a ||
14963 tp->pdev->subsystem_device == 0x2063))
14964 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14965 } else {
14966 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14967 tg3_flag_set(tp, IS_NIC);
14970 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14971 tg3_flag_set(tp, ENABLE_ASF);
14972 if (tg3_flag(tp, 5750_PLUS))
14973 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14976 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14977 tg3_flag(tp, 5750_PLUS))
14978 tg3_flag_set(tp, ENABLE_APE);
14980 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14981 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14982 tg3_flag_clear(tp, WOL_CAP);
14984 if (tg3_flag(tp, WOL_CAP) &&
14985 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14986 tg3_flag_set(tp, WOL_ENABLE);
14987 device_set_wakeup_enable(&tp->pdev->dev, true);
14990 if (cfg2 & (1 << 17))
14991 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14993 /* serdes signal pre-emphasis in register 0x590 set by */
14994 /* bootcode if bit 18 is set */
14995 if (cfg2 & (1 << 18))
14996 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14998 if ((tg3_flag(tp, 57765_PLUS) ||
14999 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15000 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15001 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15002 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15004 if (tg3_flag(tp, PCI_EXPRESS)) {
15005 u32 cfg3;
15007 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15008 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15009 !tg3_flag(tp, 57765_PLUS) &&
15010 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15011 tg3_flag_set(tp, ASPM_WORKAROUND);
15012 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15013 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15014 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15015 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15018 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15019 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15020 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15021 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15022 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15023 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15025 done:
15026 if (tg3_flag(tp, WOL_CAP))
15027 device_set_wakeup_enable(&tp->pdev->dev,
15028 tg3_flag(tp, WOL_ENABLE));
15029 else
15030 device_set_wakeup_capable(&tp->pdev->dev, false);
15033 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15035 int i, err;
15036 u32 val2, off = offset * 8;
15038 err = tg3_nvram_lock(tp);
15039 if (err)
15040 return err;
15042 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15043 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15044 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15045 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15046 udelay(10);
15048 for (i = 0; i < 100; i++) {
15049 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15050 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15051 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15052 break;
15054 udelay(10);
15057 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15059 tg3_nvram_unlock(tp);
15060 if (val2 & APE_OTP_STATUS_CMD_DONE)
15061 return 0;
15063 return -EBUSY;
15066 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15068 int i;
15069 u32 val;
15071 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15072 tw32(OTP_CTRL, cmd);
15074 /* Wait for up to 1 ms for command to execute. */
15075 for (i = 0; i < 100; i++) {
15076 val = tr32(OTP_STATUS);
15077 if (val & OTP_STATUS_CMD_DONE)
15078 break;
15079 udelay(10);
15082 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15085 /* Read the gphy configuration from the OTP region of the chip. The gphy
15086 * configuration is a 32-bit value that straddles the alignment boundary.
15087 * We do two 32-bit reads and then shift and merge the results.
15089 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15091 u32 bhalf_otp, thalf_otp;
15093 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15095 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15096 return 0;
15098 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15100 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15101 return 0;
15103 thalf_otp = tr32(OTP_READ_DATA);
15105 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15107 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15108 return 0;
15110 bhalf_otp = tr32(OTP_READ_DATA);
15112 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15115 static void tg3_phy_init_link_config(struct tg3 *tp)
15117 u32 adv = ADVERTISED_Autoneg;
15119 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15120 adv |= ADVERTISED_1000baseT_Half |
15121 ADVERTISED_1000baseT_Full;
15123 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15124 adv |= ADVERTISED_100baseT_Half |
15125 ADVERTISED_100baseT_Full |
15126 ADVERTISED_10baseT_Half |
15127 ADVERTISED_10baseT_Full |
15128 ADVERTISED_TP;
15129 else
15130 adv |= ADVERTISED_FIBRE;
15132 tp->link_config.advertising = adv;
15133 tp->link_config.speed = SPEED_UNKNOWN;
15134 tp->link_config.duplex = DUPLEX_UNKNOWN;
15135 tp->link_config.autoneg = AUTONEG_ENABLE;
15136 tp->link_config.active_speed = SPEED_UNKNOWN;
15137 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15139 tp->old_link = -1;
15142 static int tg3_phy_probe(struct tg3 *tp)
15144 u32 hw_phy_id_1, hw_phy_id_2;
15145 u32 hw_phy_id, hw_phy_id_masked;
15146 int err;
15148 /* flow control autonegotiation is default behavior */
15149 tg3_flag_set(tp, PAUSE_AUTONEG);
15150 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15152 if (tg3_flag(tp, ENABLE_APE)) {
15153 switch (tp->pci_fn) {
15154 case 0:
15155 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15156 break;
15157 case 1:
15158 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15159 break;
15160 case 2:
15161 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15162 break;
15163 case 3:
15164 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15165 break;
15169 if (!tg3_flag(tp, ENABLE_ASF) &&
15170 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15171 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15172 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15173 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15175 if (tg3_flag(tp, USE_PHYLIB))
15176 return tg3_phy_init(tp);
15178 /* Reading the PHY ID register can conflict with ASF
15179 * firmware access to the PHY hardware.
15181 err = 0;
15182 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15183 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15184 } else {
15185 /* Now read the physical PHY_ID from the chip and verify
15186 * that it is sane. If it doesn't look good, we fall back
15187 * to either the hard-coded table based PHY_ID and failing
15188 * that the value found in the eeprom area.
15190 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15191 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15193 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15194 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15195 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15197 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15200 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15201 tp->phy_id = hw_phy_id;
15202 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15203 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15204 else
15205 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15206 } else {
15207 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15208 /* Do nothing, phy ID already set up in
15209 * tg3_get_eeprom_hw_cfg().
15211 } else {
15212 struct subsys_tbl_ent *p;
15214 /* No eeprom signature? Try the hardcoded
15215 * subsys device table.
15217 p = tg3_lookup_by_subsys(tp);
15218 if (p) {
15219 tp->phy_id = p->phy_id;
15220 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15221 /* For now we saw the IDs 0xbc050cd0,
15222 * 0xbc050f80 and 0xbc050c30 on devices
15223 * connected to an BCM4785 and there are
15224 * probably more. Just assume that the phy is
15225 * supported when it is connected to a SSB core
15226 * for now.
15228 return -ENODEV;
15231 if (!tp->phy_id ||
15232 tp->phy_id == TG3_PHY_ID_BCM8002)
15233 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15237 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15238 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15239 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15240 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15241 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15242 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15243 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15244 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15245 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15246 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15248 tp->eee.supported = SUPPORTED_100baseT_Full |
15249 SUPPORTED_1000baseT_Full;
15250 tp->eee.advertised = ADVERTISED_100baseT_Full |
15251 ADVERTISED_1000baseT_Full;
15252 tp->eee.eee_enabled = 1;
15253 tp->eee.tx_lpi_enabled = 1;
15254 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15257 tg3_phy_init_link_config(tp);
15259 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15260 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15261 !tg3_flag(tp, ENABLE_APE) &&
15262 !tg3_flag(tp, ENABLE_ASF)) {
15263 u32 bmsr, dummy;
15265 tg3_readphy(tp, MII_BMSR, &bmsr);
15266 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15267 (bmsr & BMSR_LSTATUS))
15268 goto skip_phy_reset;
15270 err = tg3_phy_reset(tp);
15271 if (err)
15272 return err;
15274 tg3_phy_set_wirespeed(tp);
15276 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15277 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15278 tp->link_config.flowctrl);
15280 tg3_writephy(tp, MII_BMCR,
15281 BMCR_ANENABLE | BMCR_ANRESTART);
15285 skip_phy_reset:
15286 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15287 err = tg3_init_5401phy_dsp(tp);
15288 if (err)
15289 return err;
15291 err = tg3_init_5401phy_dsp(tp);
15294 return err;
15297 static void tg3_read_vpd(struct tg3 *tp)
15299 u8 *vpd_data;
15300 unsigned int block_end, rosize, len;
15301 u32 vpdlen;
15302 int j, i = 0;
15304 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15305 if (!vpd_data)
15306 goto out_no_vpd;
15308 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15309 if (i < 0)
15310 goto out_not_found;
15312 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15313 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15314 i += PCI_VPD_LRDT_TAG_SIZE;
15316 if (block_end > vpdlen)
15317 goto out_not_found;
15319 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15320 PCI_VPD_RO_KEYWORD_MFR_ID);
15321 if (j > 0) {
15322 len = pci_vpd_info_field_size(&vpd_data[j]);
15324 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15325 if (j + len > block_end || len != 4 ||
15326 memcmp(&vpd_data[j], "1028", 4))
15327 goto partno;
15329 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15330 PCI_VPD_RO_KEYWORD_VENDOR0);
15331 if (j < 0)
15332 goto partno;
15334 len = pci_vpd_info_field_size(&vpd_data[j]);
15336 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15337 if (j + len > block_end)
15338 goto partno;
15340 if (len >= sizeof(tp->fw_ver))
15341 len = sizeof(tp->fw_ver) - 1;
15342 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15343 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15344 &vpd_data[j]);
15347 partno:
15348 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15349 PCI_VPD_RO_KEYWORD_PARTNO);
15350 if (i < 0)
15351 goto out_not_found;
15353 len = pci_vpd_info_field_size(&vpd_data[i]);
15355 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15356 if (len > TG3_BPN_SIZE ||
15357 (len + i) > vpdlen)
15358 goto out_not_found;
15360 memcpy(tp->board_part_number, &vpd_data[i], len);
15362 out_not_found:
15363 kfree(vpd_data);
15364 if (tp->board_part_number[0])
15365 return;
15367 out_no_vpd:
15368 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15369 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15370 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15371 strcpy(tp->board_part_number, "BCM5717");
15372 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15373 strcpy(tp->board_part_number, "BCM5718");
15374 else
15375 goto nomatch;
15376 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15377 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15378 strcpy(tp->board_part_number, "BCM57780");
15379 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15380 strcpy(tp->board_part_number, "BCM57760");
15381 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15382 strcpy(tp->board_part_number, "BCM57790");
15383 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15384 strcpy(tp->board_part_number, "BCM57788");
15385 else
15386 goto nomatch;
15387 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15388 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15389 strcpy(tp->board_part_number, "BCM57761");
15390 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15391 strcpy(tp->board_part_number, "BCM57765");
15392 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15393 strcpy(tp->board_part_number, "BCM57781");
15394 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15395 strcpy(tp->board_part_number, "BCM57785");
15396 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15397 strcpy(tp->board_part_number, "BCM57791");
15398 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15399 strcpy(tp->board_part_number, "BCM57795");
15400 else
15401 goto nomatch;
15402 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15403 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15404 strcpy(tp->board_part_number, "BCM57762");
15405 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15406 strcpy(tp->board_part_number, "BCM57766");
15407 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15408 strcpy(tp->board_part_number, "BCM57782");
15409 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15410 strcpy(tp->board_part_number, "BCM57786");
15411 else
15412 goto nomatch;
15413 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15414 strcpy(tp->board_part_number, "BCM95906");
15415 } else {
15416 nomatch:
15417 strcpy(tp->board_part_number, "none");
15421 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15423 u32 val;
15425 if (tg3_nvram_read(tp, offset, &val) ||
15426 (val & 0xfc000000) != 0x0c000000 ||
15427 tg3_nvram_read(tp, offset + 4, &val) ||
15428 val != 0)
15429 return 0;
15431 return 1;
15434 static void tg3_read_bc_ver(struct tg3 *tp)
15436 u32 val, offset, start, ver_offset;
15437 int i, dst_off;
15438 bool newver = false;
15440 if (tg3_nvram_read(tp, 0xc, &offset) ||
15441 tg3_nvram_read(tp, 0x4, &start))
15442 return;
15444 offset = tg3_nvram_logical_addr(tp, offset);
15446 if (tg3_nvram_read(tp, offset, &val))
15447 return;
15449 if ((val & 0xfc000000) == 0x0c000000) {
15450 if (tg3_nvram_read(tp, offset + 4, &val))
15451 return;
15453 if (val == 0)
15454 newver = true;
15457 dst_off = strlen(tp->fw_ver);
15459 if (newver) {
15460 if (TG3_VER_SIZE - dst_off < 16 ||
15461 tg3_nvram_read(tp, offset + 8, &ver_offset))
15462 return;
15464 offset = offset + ver_offset - start;
15465 for (i = 0; i < 16; i += 4) {
15466 __be32 v;
15467 if (tg3_nvram_read_be32(tp, offset + i, &v))
15468 return;
15470 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15472 } else {
15473 u32 major, minor;
15475 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15476 return;
15478 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15479 TG3_NVM_BCVER_MAJSFT;
15480 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15481 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15482 "v%d.%02d", major, minor);
15486 static void tg3_read_hwsb_ver(struct tg3 *tp)
15488 u32 val, major, minor;
15490 /* Use native endian representation */
15491 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15492 return;
15494 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15495 TG3_NVM_HWSB_CFG1_MAJSFT;
15496 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15497 TG3_NVM_HWSB_CFG1_MINSFT;
15499 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15502 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15504 u32 offset, major, minor, build;
15506 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15508 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15509 return;
15511 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15512 case TG3_EEPROM_SB_REVISION_0:
15513 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15514 break;
15515 case TG3_EEPROM_SB_REVISION_2:
15516 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15517 break;
15518 case TG3_EEPROM_SB_REVISION_3:
15519 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15520 break;
15521 case TG3_EEPROM_SB_REVISION_4:
15522 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15523 break;
15524 case TG3_EEPROM_SB_REVISION_5:
15525 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15526 break;
15527 case TG3_EEPROM_SB_REVISION_6:
15528 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15529 break;
15530 default:
15531 return;
15534 if (tg3_nvram_read(tp, offset, &val))
15535 return;
15537 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15538 TG3_EEPROM_SB_EDH_BLD_SHFT;
15539 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15540 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15541 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15543 if (minor > 99 || build > 26)
15544 return;
15546 offset = strlen(tp->fw_ver);
15547 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15548 " v%d.%02d", major, minor);
15550 if (build > 0) {
15551 offset = strlen(tp->fw_ver);
15552 if (offset < TG3_VER_SIZE - 1)
15553 tp->fw_ver[offset] = 'a' + build - 1;
15557 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15559 u32 val, offset, start;
15560 int i, vlen;
15562 for (offset = TG3_NVM_DIR_START;
15563 offset < TG3_NVM_DIR_END;
15564 offset += TG3_NVM_DIRENT_SIZE) {
15565 if (tg3_nvram_read(tp, offset, &val))
15566 return;
15568 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15569 break;
15572 if (offset == TG3_NVM_DIR_END)
15573 return;
15575 if (!tg3_flag(tp, 5705_PLUS))
15576 start = 0x08000000;
15577 else if (tg3_nvram_read(tp, offset - 4, &start))
15578 return;
15580 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15581 !tg3_fw_img_is_valid(tp, offset) ||
15582 tg3_nvram_read(tp, offset + 8, &val))
15583 return;
15585 offset += val - start;
15587 vlen = strlen(tp->fw_ver);
15589 tp->fw_ver[vlen++] = ',';
15590 tp->fw_ver[vlen++] = ' ';
15592 for (i = 0; i < 4; i++) {
15593 __be32 v;
15594 if (tg3_nvram_read_be32(tp, offset, &v))
15595 return;
15597 offset += sizeof(v);
15599 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15600 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15601 break;
15604 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15605 vlen += sizeof(v);
15609 static void tg3_probe_ncsi(struct tg3 *tp)
15611 u32 apedata;
15613 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15614 if (apedata != APE_SEG_SIG_MAGIC)
15615 return;
15617 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15618 if (!(apedata & APE_FW_STATUS_READY))
15619 return;
15621 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15622 tg3_flag_set(tp, APE_HAS_NCSI);
15625 static void tg3_read_dash_ver(struct tg3 *tp)
15627 int vlen;
15628 u32 apedata;
15629 char *fwtype;
15631 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15633 if (tg3_flag(tp, APE_HAS_NCSI))
15634 fwtype = "NCSI";
15635 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15636 fwtype = "SMASH";
15637 else
15638 fwtype = "DASH";
15640 vlen = strlen(tp->fw_ver);
15642 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15643 fwtype,
15644 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15645 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15646 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15647 (apedata & APE_FW_VERSION_BLDMSK));
15650 static void tg3_read_otp_ver(struct tg3 *tp)
15652 u32 val, val2;
15654 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15655 return;
15657 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15658 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15659 TG3_OTP_MAGIC0_VALID(val)) {
15660 u64 val64 = (u64) val << 32 | val2;
15661 u32 ver = 0;
15662 int i, vlen;
15664 for (i = 0; i < 7; i++) {
15665 if ((val64 & 0xff) == 0)
15666 break;
15667 ver = val64 & 0xff;
15668 val64 >>= 8;
15670 vlen = strlen(tp->fw_ver);
15671 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15675 static void tg3_read_fw_ver(struct tg3 *tp)
15677 u32 val;
15678 bool vpd_vers = false;
15680 if (tp->fw_ver[0] != 0)
15681 vpd_vers = true;
15683 if (tg3_flag(tp, NO_NVRAM)) {
15684 strcat(tp->fw_ver, "sb");
15685 tg3_read_otp_ver(tp);
15686 return;
15689 if (tg3_nvram_read(tp, 0, &val))
15690 return;
15692 if (val == TG3_EEPROM_MAGIC)
15693 tg3_read_bc_ver(tp);
15694 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15695 tg3_read_sb_ver(tp, val);
15696 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15697 tg3_read_hwsb_ver(tp);
15699 if (tg3_flag(tp, ENABLE_ASF)) {
15700 if (tg3_flag(tp, ENABLE_APE)) {
15701 tg3_probe_ncsi(tp);
15702 if (!vpd_vers)
15703 tg3_read_dash_ver(tp);
15704 } else if (!vpd_vers) {
15705 tg3_read_mgmtfw_ver(tp);
15709 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15712 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15714 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15715 return TG3_RX_RET_MAX_SIZE_5717;
15716 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15717 return TG3_RX_RET_MAX_SIZE_5700;
15718 else
15719 return TG3_RX_RET_MAX_SIZE_5705;
15722 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15723 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15724 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15725 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15726 { },
15729 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15731 struct pci_dev *peer;
15732 unsigned int func, devnr = tp->pdev->devfn & ~7;
15734 for (func = 0; func < 8; func++) {
15735 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15736 if (peer && peer != tp->pdev)
15737 break;
15738 pci_dev_put(peer);
15740 /* 5704 can be configured in single-port mode, set peer to
15741 * tp->pdev in that case.
15743 if (!peer) {
15744 peer = tp->pdev;
15745 return peer;
15749 * We don't need to keep the refcount elevated; there's no way
15750 * to remove one half of this device without removing the other
15752 pci_dev_put(peer);
15754 return peer;
15757 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15759 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15760 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15761 u32 reg;
15763 /* All devices that use the alternate
15764 * ASIC REV location have a CPMU.
15766 tg3_flag_set(tp, CPMU_PRESENT);
15768 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15769 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15770 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15771 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15772 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15773 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
15774 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
15775 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15776 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15777 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
15778 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
15779 reg = TG3PCI_GEN2_PRODID_ASICREV;
15780 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15781 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15782 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15783 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15784 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15785 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15786 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15787 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15788 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15789 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15790 reg = TG3PCI_GEN15_PRODID_ASICREV;
15791 else
15792 reg = TG3PCI_PRODID_ASICREV;
15794 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15797 /* Wrong chip ID in 5752 A0. This code can be removed later
15798 * as A0 is not in production.
15800 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15801 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15803 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15804 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15806 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15807 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15808 tg3_asic_rev(tp) == ASIC_REV_5720)
15809 tg3_flag_set(tp, 5717_PLUS);
15811 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15812 tg3_asic_rev(tp) == ASIC_REV_57766)
15813 tg3_flag_set(tp, 57765_CLASS);
15815 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15816 tg3_asic_rev(tp) == ASIC_REV_5762)
15817 tg3_flag_set(tp, 57765_PLUS);
15819 /* Intentionally exclude ASIC_REV_5906 */
15820 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15821 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15822 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15823 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15824 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15825 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15826 tg3_flag(tp, 57765_PLUS))
15827 tg3_flag_set(tp, 5755_PLUS);
15829 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15830 tg3_asic_rev(tp) == ASIC_REV_5714)
15831 tg3_flag_set(tp, 5780_CLASS);
15833 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15834 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15835 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15836 tg3_flag(tp, 5755_PLUS) ||
15837 tg3_flag(tp, 5780_CLASS))
15838 tg3_flag_set(tp, 5750_PLUS);
15840 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15841 tg3_flag(tp, 5750_PLUS))
15842 tg3_flag_set(tp, 5705_PLUS);
15845 static bool tg3_10_100_only_device(struct tg3 *tp,
15846 const struct pci_device_id *ent)
15848 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15850 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15851 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15852 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15853 return true;
15855 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15856 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15857 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15858 return true;
15859 } else {
15860 return true;
15864 return false;
15867 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15869 u32 misc_ctrl_reg;
15870 u32 pci_state_reg, grc_misc_cfg;
15871 u32 val;
15872 u16 pci_cmd;
15873 int err;
15875 /* Force memory write invalidate off. If we leave it on,
15876 * then on 5700_BX chips we have to enable a workaround.
15877 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15878 * to match the cacheline size. The Broadcom driver have this
15879 * workaround but turns MWI off all the times so never uses
15880 * it. This seems to suggest that the workaround is insufficient.
15882 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15883 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15884 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15886 /* Important! -- Make sure register accesses are byteswapped
15887 * correctly. Also, for those chips that require it, make
15888 * sure that indirect register accesses are enabled before
15889 * the first operation.
15891 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15892 &misc_ctrl_reg);
15893 tp->misc_host_ctrl |= (misc_ctrl_reg &
15894 MISC_HOST_CTRL_CHIPREV);
15895 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15896 tp->misc_host_ctrl);
15898 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15900 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15901 * we need to disable memory and use config. cycles
15902 * only to access all registers. The 5702/03 chips
15903 * can mistakenly decode the special cycles from the
15904 * ICH chipsets as memory write cycles, causing corruption
15905 * of register and memory space. Only certain ICH bridges
15906 * will drive special cycles with non-zero data during the
15907 * address phase which can fall within the 5703's address
15908 * range. This is not an ICH bug as the PCI spec allows
15909 * non-zero address during special cycles. However, only
15910 * these ICH bridges are known to drive non-zero addresses
15911 * during special cycles.
15913 * Since special cycles do not cross PCI bridges, we only
15914 * enable this workaround if the 5703 is on the secondary
15915 * bus of these ICH bridges.
15917 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15918 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15919 static struct tg3_dev_id {
15920 u32 vendor;
15921 u32 device;
15922 u32 rev;
15923 } ich_chipsets[] = {
15924 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15925 PCI_ANY_ID },
15926 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15927 PCI_ANY_ID },
15928 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15929 0xa },
15930 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15931 PCI_ANY_ID },
15932 { },
15934 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15935 struct pci_dev *bridge = NULL;
15937 while (pci_id->vendor != 0) {
15938 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15939 bridge);
15940 if (!bridge) {
15941 pci_id++;
15942 continue;
15944 if (pci_id->rev != PCI_ANY_ID) {
15945 if (bridge->revision > pci_id->rev)
15946 continue;
15948 if (bridge->subordinate &&
15949 (bridge->subordinate->number ==
15950 tp->pdev->bus->number)) {
15951 tg3_flag_set(tp, ICH_WORKAROUND);
15952 pci_dev_put(bridge);
15953 break;
15958 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15959 static struct tg3_dev_id {
15960 u32 vendor;
15961 u32 device;
15962 } bridge_chipsets[] = {
15963 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15964 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15965 { },
15967 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15968 struct pci_dev *bridge = NULL;
15970 while (pci_id->vendor != 0) {
15971 bridge = pci_get_device(pci_id->vendor,
15972 pci_id->device,
15973 bridge);
15974 if (!bridge) {
15975 pci_id++;
15976 continue;
15978 if (bridge->subordinate &&
15979 (bridge->subordinate->number <=
15980 tp->pdev->bus->number) &&
15981 (bridge->subordinate->busn_res.end >=
15982 tp->pdev->bus->number)) {
15983 tg3_flag_set(tp, 5701_DMA_BUG);
15984 pci_dev_put(bridge);
15985 break;
15990 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15991 * DMA addresses > 40-bit. This bridge may have other additional
15992 * 57xx devices behind it in some 4-port NIC designs for example.
15993 * Any tg3 device found behind the bridge will also need the 40-bit
15994 * DMA workaround.
15996 if (tg3_flag(tp, 5780_CLASS)) {
15997 tg3_flag_set(tp, 40BIT_DMA_BUG);
15998 tp->msi_cap = tp->pdev->msi_cap;
15999 } else {
16000 struct pci_dev *bridge = NULL;
16002 do {
16003 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16004 PCI_DEVICE_ID_SERVERWORKS_EPB,
16005 bridge);
16006 if (bridge && bridge->subordinate &&
16007 (bridge->subordinate->number <=
16008 tp->pdev->bus->number) &&
16009 (bridge->subordinate->busn_res.end >=
16010 tp->pdev->bus->number)) {
16011 tg3_flag_set(tp, 40BIT_DMA_BUG);
16012 pci_dev_put(bridge);
16013 break;
16015 } while (bridge);
16018 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16019 tg3_asic_rev(tp) == ASIC_REV_5714)
16020 tp->pdev_peer = tg3_find_peer(tp);
16022 /* Determine TSO capabilities */
16023 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16024 ; /* Do nothing. HW bug. */
16025 else if (tg3_flag(tp, 57765_PLUS))
16026 tg3_flag_set(tp, HW_TSO_3);
16027 else if (tg3_flag(tp, 5755_PLUS) ||
16028 tg3_asic_rev(tp) == ASIC_REV_5906)
16029 tg3_flag_set(tp, HW_TSO_2);
16030 else if (tg3_flag(tp, 5750_PLUS)) {
16031 tg3_flag_set(tp, HW_TSO_1);
16032 tg3_flag_set(tp, TSO_BUG);
16033 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16034 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16035 tg3_flag_clear(tp, TSO_BUG);
16036 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16037 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16038 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16039 tg3_flag_set(tp, FW_TSO);
16040 tg3_flag_set(tp, TSO_BUG);
16041 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16042 tp->fw_needed = FIRMWARE_TG3TSO5;
16043 else
16044 tp->fw_needed = FIRMWARE_TG3TSO;
16047 /* Selectively allow TSO based on operating conditions */
16048 if (tg3_flag(tp, HW_TSO_1) ||
16049 tg3_flag(tp, HW_TSO_2) ||
16050 tg3_flag(tp, HW_TSO_3) ||
16051 tg3_flag(tp, FW_TSO)) {
16052 /* For firmware TSO, assume ASF is disabled.
16053 * We'll disable TSO later if we discover ASF
16054 * is enabled in tg3_get_eeprom_hw_cfg().
16056 tg3_flag_set(tp, TSO_CAPABLE);
16057 } else {
16058 tg3_flag_clear(tp, TSO_CAPABLE);
16059 tg3_flag_clear(tp, TSO_BUG);
16060 tp->fw_needed = NULL;
16063 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16064 tp->fw_needed = FIRMWARE_TG3;
16066 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16067 tp->fw_needed = FIRMWARE_TG357766;
16069 tp->irq_max = 1;
16071 if (tg3_flag(tp, 5750_PLUS)) {
16072 tg3_flag_set(tp, SUPPORT_MSI);
16073 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16074 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16075 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16076 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16077 tp->pdev_peer == tp->pdev))
16078 tg3_flag_clear(tp, SUPPORT_MSI);
16080 if (tg3_flag(tp, 5755_PLUS) ||
16081 tg3_asic_rev(tp) == ASIC_REV_5906) {
16082 tg3_flag_set(tp, 1SHOT_MSI);
16085 if (tg3_flag(tp, 57765_PLUS)) {
16086 tg3_flag_set(tp, SUPPORT_MSIX);
16087 tp->irq_max = TG3_IRQ_MAX_VECS;
16091 tp->txq_max = 1;
16092 tp->rxq_max = 1;
16093 if (tp->irq_max > 1) {
16094 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16095 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16097 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16098 tg3_asic_rev(tp) == ASIC_REV_5720)
16099 tp->txq_max = tp->irq_max - 1;
16102 if (tg3_flag(tp, 5755_PLUS) ||
16103 tg3_asic_rev(tp) == ASIC_REV_5906)
16104 tg3_flag_set(tp, SHORT_DMA_BUG);
16106 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16107 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16109 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16110 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16111 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16112 tg3_asic_rev(tp) == ASIC_REV_5762)
16113 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16115 if (tg3_flag(tp, 57765_PLUS) &&
16116 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16117 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16119 if (!tg3_flag(tp, 5705_PLUS) ||
16120 tg3_flag(tp, 5780_CLASS) ||
16121 tg3_flag(tp, USE_JUMBO_BDFLAG))
16122 tg3_flag_set(tp, JUMBO_CAPABLE);
16124 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16125 &pci_state_reg);
16127 if (pci_is_pcie(tp->pdev)) {
16128 u16 lnkctl;
16130 tg3_flag_set(tp, PCI_EXPRESS);
16132 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16133 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16134 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16135 tg3_flag_clear(tp, HW_TSO_2);
16136 tg3_flag_clear(tp, TSO_CAPABLE);
16138 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16139 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16140 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16141 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16142 tg3_flag_set(tp, CLKREQ_BUG);
16143 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16144 tg3_flag_set(tp, L1PLLPD_EN);
16146 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16147 /* BCM5785 devices are effectively PCIe devices, and should
16148 * follow PCIe codepaths, but do not have a PCIe capabilities
16149 * section.
16151 tg3_flag_set(tp, PCI_EXPRESS);
16152 } else if (!tg3_flag(tp, 5705_PLUS) ||
16153 tg3_flag(tp, 5780_CLASS)) {
16154 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16155 if (!tp->pcix_cap) {
16156 dev_err(&tp->pdev->dev,
16157 "Cannot find PCI-X capability, aborting\n");
16158 return -EIO;
16161 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16162 tg3_flag_set(tp, PCIX_MODE);
16165 /* If we have an AMD 762 or VIA K8T800 chipset, write
16166 * reordering to the mailbox registers done by the host
16167 * controller can cause major troubles. We read back from
16168 * every mailbox register write to force the writes to be
16169 * posted to the chip in order.
16171 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16172 !tg3_flag(tp, PCI_EXPRESS))
16173 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16175 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16176 &tp->pci_cacheline_sz);
16177 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16178 &tp->pci_lat_timer);
16179 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16180 tp->pci_lat_timer < 64) {
16181 tp->pci_lat_timer = 64;
16182 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16183 tp->pci_lat_timer);
16186 /* Important! -- It is critical that the PCI-X hw workaround
16187 * situation is decided before the first MMIO register access.
16189 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16190 /* 5700 BX chips need to have their TX producer index
16191 * mailboxes written twice to workaround a bug.
16193 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16195 /* If we are in PCI-X mode, enable register write workaround.
16197 * The workaround is to use indirect register accesses
16198 * for all chip writes not to mailbox registers.
16200 if (tg3_flag(tp, PCIX_MODE)) {
16201 u32 pm_reg;
16203 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16205 /* The chip can have it's power management PCI config
16206 * space registers clobbered due to this bug.
16207 * So explicitly force the chip into D0 here.
16209 pci_read_config_dword(tp->pdev,
16210 tp->pdev->pm_cap + PCI_PM_CTRL,
16211 &pm_reg);
16212 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16213 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16214 pci_write_config_dword(tp->pdev,
16215 tp->pdev->pm_cap + PCI_PM_CTRL,
16216 pm_reg);
16218 /* Also, force SERR#/PERR# in PCI command. */
16219 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16220 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16221 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16225 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16226 tg3_flag_set(tp, PCI_HIGH_SPEED);
16227 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16228 tg3_flag_set(tp, PCI_32BIT);
16230 /* Chip-specific fixup from Broadcom driver */
16231 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16232 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16233 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16234 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16237 /* Default fast path register access methods */
16238 tp->read32 = tg3_read32;
16239 tp->write32 = tg3_write32;
16240 tp->read32_mbox = tg3_read32;
16241 tp->write32_mbox = tg3_write32;
16242 tp->write32_tx_mbox = tg3_write32;
16243 tp->write32_rx_mbox = tg3_write32;
16245 /* Various workaround register access methods */
16246 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16247 tp->write32 = tg3_write_indirect_reg32;
16248 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16249 (tg3_flag(tp, PCI_EXPRESS) &&
16250 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16252 * Back to back register writes can cause problems on these
16253 * chips, the workaround is to read back all reg writes
16254 * except those to mailbox regs.
16256 * See tg3_write_indirect_reg32().
16258 tp->write32 = tg3_write_flush_reg32;
16261 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16262 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16263 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16264 tp->write32_rx_mbox = tg3_write_flush_reg32;
16267 if (tg3_flag(tp, ICH_WORKAROUND)) {
16268 tp->read32 = tg3_read_indirect_reg32;
16269 tp->write32 = tg3_write_indirect_reg32;
16270 tp->read32_mbox = tg3_read_indirect_mbox;
16271 tp->write32_mbox = tg3_write_indirect_mbox;
16272 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16273 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16275 iounmap(tp->regs);
16276 tp->regs = NULL;
16278 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16279 pci_cmd &= ~PCI_COMMAND_MEMORY;
16280 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16282 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16283 tp->read32_mbox = tg3_read32_mbox_5906;
16284 tp->write32_mbox = tg3_write32_mbox_5906;
16285 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16286 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16289 if (tp->write32 == tg3_write_indirect_reg32 ||
16290 (tg3_flag(tp, PCIX_MODE) &&
16291 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16292 tg3_asic_rev(tp) == ASIC_REV_5701)))
16293 tg3_flag_set(tp, SRAM_USE_CONFIG);
16295 /* The memory arbiter has to be enabled in order for SRAM accesses
16296 * to succeed. Normally on powerup the tg3 chip firmware will make
16297 * sure it is enabled, but other entities such as system netboot
16298 * code might disable it.
16300 val = tr32(MEMARB_MODE);
16301 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16303 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16304 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16305 tg3_flag(tp, 5780_CLASS)) {
16306 if (tg3_flag(tp, PCIX_MODE)) {
16307 pci_read_config_dword(tp->pdev,
16308 tp->pcix_cap + PCI_X_STATUS,
16309 &val);
16310 tp->pci_fn = val & 0x7;
16312 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16313 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16314 tg3_asic_rev(tp) == ASIC_REV_5720) {
16315 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16316 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16317 val = tr32(TG3_CPMU_STATUS);
16319 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16320 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16321 else
16322 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16323 TG3_CPMU_STATUS_FSHFT_5719;
16326 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16327 tp->write32_tx_mbox = tg3_write_flush_reg32;
16328 tp->write32_rx_mbox = tg3_write_flush_reg32;
16331 /* Get eeprom hw config before calling tg3_set_power_state().
16332 * In particular, the TG3_FLAG_IS_NIC flag must be
16333 * determined before calling tg3_set_power_state() so that
16334 * we know whether or not to switch out of Vaux power.
16335 * When the flag is set, it means that GPIO1 is used for eeprom
16336 * write protect and also implies that it is a LOM where GPIOs
16337 * are not used to switch power.
16339 tg3_get_eeprom_hw_cfg(tp);
16341 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16342 tg3_flag_clear(tp, TSO_CAPABLE);
16343 tg3_flag_clear(tp, TSO_BUG);
16344 tp->fw_needed = NULL;
16347 if (tg3_flag(tp, ENABLE_APE)) {
16348 /* Allow reads and writes to the
16349 * APE register and memory space.
16351 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16352 PCISTATE_ALLOW_APE_SHMEM_WR |
16353 PCISTATE_ALLOW_APE_PSPACE_WR;
16354 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16355 pci_state_reg);
16357 tg3_ape_lock_init(tp);
16360 /* Set up tp->grc_local_ctrl before calling
16361 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16362 * will bring 5700's external PHY out of reset.
16363 * It is also used as eeprom write protect on LOMs.
16365 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16366 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16367 tg3_flag(tp, EEPROM_WRITE_PROT))
16368 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16369 GRC_LCLCTRL_GPIO_OUTPUT1);
16370 /* Unused GPIO3 must be driven as output on 5752 because there
16371 * are no pull-up resistors on unused GPIO pins.
16373 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16374 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16376 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16377 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16378 tg3_flag(tp, 57765_CLASS))
16379 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16381 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16382 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16383 /* Turn off the debug UART. */
16384 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16385 if (tg3_flag(tp, IS_NIC))
16386 /* Keep VMain power. */
16387 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16388 GRC_LCLCTRL_GPIO_OUTPUT0;
16391 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16392 tp->grc_local_ctrl |=
16393 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16395 /* Switch out of Vaux if it is a NIC */
16396 tg3_pwrsrc_switch_to_vmain(tp);
16398 /* Derive initial jumbo mode from MTU assigned in
16399 * ether_setup() via the alloc_etherdev() call
16401 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16402 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16404 /* Determine WakeOnLan speed to use. */
16405 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16406 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16407 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16408 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16409 tg3_flag_clear(tp, WOL_SPEED_100MB);
16410 } else {
16411 tg3_flag_set(tp, WOL_SPEED_100MB);
16414 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16415 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16417 /* A few boards don't want Ethernet@WireSpeed phy feature */
16418 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16419 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16420 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16421 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16422 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16423 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16424 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16426 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16427 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16428 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16429 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16430 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16432 if (tg3_flag(tp, 5705_PLUS) &&
16433 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16434 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16435 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16436 !tg3_flag(tp, 57765_PLUS)) {
16437 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16438 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16439 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16440 tg3_asic_rev(tp) == ASIC_REV_5761) {
16441 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16442 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16443 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16444 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16445 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16446 } else
16447 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16450 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16451 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16452 tp->phy_otp = tg3_read_otp_phycfg(tp);
16453 if (tp->phy_otp == 0)
16454 tp->phy_otp = TG3_OTP_DEFAULT;
16457 if (tg3_flag(tp, CPMU_PRESENT))
16458 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16459 else
16460 tp->mi_mode = MAC_MI_MODE_BASE;
16462 tp->coalesce_mode = 0;
16463 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16464 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16465 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16467 /* Set these bits to enable statistics workaround. */
16468 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16469 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16470 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16471 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16472 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16475 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16476 tg3_asic_rev(tp) == ASIC_REV_57780)
16477 tg3_flag_set(tp, USE_PHYLIB);
16479 err = tg3_mdio_init(tp);
16480 if (err)
16481 return err;
16483 /* Initialize data/descriptor byte/word swapping. */
16484 val = tr32(GRC_MODE);
16485 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16486 tg3_asic_rev(tp) == ASIC_REV_5762)
16487 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16488 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16489 GRC_MODE_B2HRX_ENABLE |
16490 GRC_MODE_HTX2B_ENABLE |
16491 GRC_MODE_HOST_STACKUP);
16492 else
16493 val &= GRC_MODE_HOST_STACKUP;
16495 tw32(GRC_MODE, val | tp->grc_mode);
16497 tg3_switch_clocks(tp);
16499 /* Clear this out for sanity. */
16500 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16502 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16503 &pci_state_reg);
16504 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16505 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16506 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16507 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16508 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16509 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16510 void __iomem *sram_base;
16512 /* Write some dummy words into the SRAM status block
16513 * area, see if it reads back correctly. If the return
16514 * value is bad, force enable the PCIX workaround.
16516 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16518 writel(0x00000000, sram_base);
16519 writel(0x00000000, sram_base + 4);
16520 writel(0xffffffff, sram_base + 4);
16521 if (readl(sram_base) != 0x00000000)
16522 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16526 udelay(50);
16527 tg3_nvram_init(tp);
16529 /* If the device has an NVRAM, no need to load patch firmware */
16530 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16531 !tg3_flag(tp, NO_NVRAM))
16532 tp->fw_needed = NULL;
16534 grc_misc_cfg = tr32(GRC_MISC_CFG);
16535 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16537 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16538 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16539 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16540 tg3_flag_set(tp, IS_5788);
16542 if (!tg3_flag(tp, IS_5788) &&
16543 tg3_asic_rev(tp) != ASIC_REV_5700)
16544 tg3_flag_set(tp, TAGGED_STATUS);
16545 if (tg3_flag(tp, TAGGED_STATUS)) {
16546 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16547 HOSTCC_MODE_CLRTICK_TXBD);
16549 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16550 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16551 tp->misc_host_ctrl);
16554 /* Preserve the APE MAC_MODE bits */
16555 if (tg3_flag(tp, ENABLE_APE))
16556 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16557 else
16558 tp->mac_mode = 0;
16560 if (tg3_10_100_only_device(tp, ent))
16561 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16563 err = tg3_phy_probe(tp);
16564 if (err) {
16565 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16566 /* ... but do not return immediately ... */
16567 tg3_mdio_fini(tp);
16570 tg3_read_vpd(tp);
16571 tg3_read_fw_ver(tp);
16573 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16574 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16575 } else {
16576 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16577 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16578 else
16579 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16582 /* 5700 {AX,BX} chips have a broken status block link
16583 * change bit implementation, so we must use the
16584 * status register in those cases.
16586 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16587 tg3_flag_set(tp, USE_LINKCHG_REG);
16588 else
16589 tg3_flag_clear(tp, USE_LINKCHG_REG);
16591 /* The led_ctrl is set during tg3_phy_probe, here we might
16592 * have to force the link status polling mechanism based
16593 * upon subsystem IDs.
16595 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16596 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16597 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16598 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16599 tg3_flag_set(tp, USE_LINKCHG_REG);
16602 /* For all SERDES we poll the MAC status register. */
16603 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16604 tg3_flag_set(tp, POLL_SERDES);
16605 else
16606 tg3_flag_clear(tp, POLL_SERDES);
16608 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16609 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16610 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16611 tg3_flag(tp, PCIX_MODE)) {
16612 tp->rx_offset = NET_SKB_PAD;
16613 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16614 tp->rx_copy_thresh = ~(u16)0;
16615 #endif
16618 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16619 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16620 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16622 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16624 /* Increment the rx prod index on the rx std ring by at most
16625 * 8 for these chips to workaround hw errata.
16627 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16628 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16629 tg3_asic_rev(tp) == ASIC_REV_5755)
16630 tp->rx_std_max_post = 8;
16632 if (tg3_flag(tp, ASPM_WORKAROUND))
16633 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16634 PCIE_PWR_MGMT_L1_THRESH_MSK;
16636 return err;
16639 #ifdef CONFIG_SPARC
16640 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16642 struct net_device *dev = tp->dev;
16643 struct pci_dev *pdev = tp->pdev;
16644 struct device_node *dp = pci_device_to_OF_node(pdev);
16645 const unsigned char *addr;
16646 int len;
16648 addr = of_get_property(dp, "local-mac-address", &len);
16649 if (addr && len == ETH_ALEN) {
16650 memcpy(dev->dev_addr, addr, ETH_ALEN);
16651 return 0;
16653 return -ENODEV;
16656 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16658 struct net_device *dev = tp->dev;
16660 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16661 return 0;
16663 #endif
16665 static int tg3_get_device_address(struct tg3 *tp)
16667 struct net_device *dev = tp->dev;
16668 u32 hi, lo, mac_offset;
16669 int addr_ok = 0;
16670 int err;
16672 #ifdef CONFIG_SPARC
16673 if (!tg3_get_macaddr_sparc(tp))
16674 return 0;
16675 #endif
16677 if (tg3_flag(tp, IS_SSB_CORE)) {
16678 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16679 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16680 return 0;
16683 mac_offset = 0x7c;
16684 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16685 tg3_flag(tp, 5780_CLASS)) {
16686 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16687 mac_offset = 0xcc;
16688 if (tg3_nvram_lock(tp))
16689 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16690 else
16691 tg3_nvram_unlock(tp);
16692 } else if (tg3_flag(tp, 5717_PLUS)) {
16693 if (tp->pci_fn & 1)
16694 mac_offset = 0xcc;
16695 if (tp->pci_fn > 1)
16696 mac_offset += 0x18c;
16697 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16698 mac_offset = 0x10;
16700 /* First try to get it from MAC address mailbox. */
16701 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16702 if ((hi >> 16) == 0x484b) {
16703 dev->dev_addr[0] = (hi >> 8) & 0xff;
16704 dev->dev_addr[1] = (hi >> 0) & 0xff;
16706 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16707 dev->dev_addr[2] = (lo >> 24) & 0xff;
16708 dev->dev_addr[3] = (lo >> 16) & 0xff;
16709 dev->dev_addr[4] = (lo >> 8) & 0xff;
16710 dev->dev_addr[5] = (lo >> 0) & 0xff;
16712 /* Some old bootcode may report a 0 MAC address in SRAM */
16713 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16715 if (!addr_ok) {
16716 /* Next, try NVRAM. */
16717 if (!tg3_flag(tp, NO_NVRAM) &&
16718 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16719 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16720 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16721 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16723 /* Finally just fetch it out of the MAC control regs. */
16724 else {
16725 hi = tr32(MAC_ADDR_0_HIGH);
16726 lo = tr32(MAC_ADDR_0_LOW);
16728 dev->dev_addr[5] = lo & 0xff;
16729 dev->dev_addr[4] = (lo >> 8) & 0xff;
16730 dev->dev_addr[3] = (lo >> 16) & 0xff;
16731 dev->dev_addr[2] = (lo >> 24) & 0xff;
16732 dev->dev_addr[1] = hi & 0xff;
16733 dev->dev_addr[0] = (hi >> 8) & 0xff;
16737 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16738 #ifdef CONFIG_SPARC
16739 if (!tg3_get_default_macaddr_sparc(tp))
16740 return 0;
16741 #endif
16742 return -EINVAL;
16744 return 0;
16747 #define BOUNDARY_SINGLE_CACHELINE 1
16748 #define BOUNDARY_MULTI_CACHELINE 2
16750 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16752 int cacheline_size;
16753 u8 byte;
16754 int goal;
16756 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16757 if (byte == 0)
16758 cacheline_size = 1024;
16759 else
16760 cacheline_size = (int) byte * 4;
16762 /* On 5703 and later chips, the boundary bits have no
16763 * effect.
16765 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16766 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16767 !tg3_flag(tp, PCI_EXPRESS))
16768 goto out;
16770 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16771 goal = BOUNDARY_MULTI_CACHELINE;
16772 #else
16773 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16774 goal = BOUNDARY_SINGLE_CACHELINE;
16775 #else
16776 goal = 0;
16777 #endif
16778 #endif
16780 if (tg3_flag(tp, 57765_PLUS)) {
16781 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16782 goto out;
16785 if (!goal)
16786 goto out;
16788 /* PCI controllers on most RISC systems tend to disconnect
16789 * when a device tries to burst across a cache-line boundary.
16790 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16792 * Unfortunately, for PCI-E there are only limited
16793 * write-side controls for this, and thus for reads
16794 * we will still get the disconnects. We'll also waste
16795 * these PCI cycles for both read and write for chips
16796 * other than 5700 and 5701 which do not implement the
16797 * boundary bits.
16799 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16800 switch (cacheline_size) {
16801 case 16:
16802 case 32:
16803 case 64:
16804 case 128:
16805 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16806 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16807 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16808 } else {
16809 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16810 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16812 break;
16814 case 256:
16815 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16816 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16817 break;
16819 default:
16820 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16821 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16822 break;
16824 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16825 switch (cacheline_size) {
16826 case 16:
16827 case 32:
16828 case 64:
16829 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16830 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16831 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16832 break;
16834 /* fallthrough */
16835 case 128:
16836 default:
16837 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16838 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16839 break;
16841 } else {
16842 switch (cacheline_size) {
16843 case 16:
16844 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16845 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16846 DMA_RWCTRL_WRITE_BNDRY_16);
16847 break;
16849 /* fallthrough */
16850 case 32:
16851 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16852 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16853 DMA_RWCTRL_WRITE_BNDRY_32);
16854 break;
16856 /* fallthrough */
16857 case 64:
16858 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16859 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16860 DMA_RWCTRL_WRITE_BNDRY_64);
16861 break;
16863 /* fallthrough */
16864 case 128:
16865 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16866 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16867 DMA_RWCTRL_WRITE_BNDRY_128);
16868 break;
16870 /* fallthrough */
16871 case 256:
16872 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16873 DMA_RWCTRL_WRITE_BNDRY_256);
16874 break;
16875 case 512:
16876 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16877 DMA_RWCTRL_WRITE_BNDRY_512);
16878 break;
16879 case 1024:
16880 default:
16881 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16882 DMA_RWCTRL_WRITE_BNDRY_1024);
16883 break;
16887 out:
16888 return val;
16891 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16892 int size, bool to_device)
16894 struct tg3_internal_buffer_desc test_desc;
16895 u32 sram_dma_descs;
16896 int i, ret;
16898 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16900 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16901 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16902 tw32(RDMAC_STATUS, 0);
16903 tw32(WDMAC_STATUS, 0);
16905 tw32(BUFMGR_MODE, 0);
16906 tw32(FTQ_RESET, 0);
16908 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16909 test_desc.addr_lo = buf_dma & 0xffffffff;
16910 test_desc.nic_mbuf = 0x00002100;
16911 test_desc.len = size;
16914 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16915 * the *second* time the tg3 driver was getting loaded after an
16916 * initial scan.
16918 * Broadcom tells me:
16919 * ...the DMA engine is connected to the GRC block and a DMA
16920 * reset may affect the GRC block in some unpredictable way...
16921 * The behavior of resets to individual blocks has not been tested.
16923 * Broadcom noted the GRC reset will also reset all sub-components.
16925 if (to_device) {
16926 test_desc.cqid_sqid = (13 << 8) | 2;
16928 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16929 udelay(40);
16930 } else {
16931 test_desc.cqid_sqid = (16 << 8) | 7;
16933 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16934 udelay(40);
16936 test_desc.flags = 0x00000005;
16938 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16939 u32 val;
16941 val = *(((u32 *)&test_desc) + i);
16942 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16943 sram_dma_descs + (i * sizeof(u32)));
16944 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16946 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16948 if (to_device)
16949 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16950 else
16951 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16953 ret = -ENODEV;
16954 for (i = 0; i < 40; i++) {
16955 u32 val;
16957 if (to_device)
16958 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16959 else
16960 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16961 if ((val & 0xffff) == sram_dma_descs) {
16962 ret = 0;
16963 break;
16966 udelay(100);
16969 return ret;
16972 #define TEST_BUFFER_SIZE 0x2000
16974 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16975 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16976 { },
16979 static int tg3_test_dma(struct tg3 *tp)
16981 dma_addr_t buf_dma;
16982 u32 *buf, saved_dma_rwctrl;
16983 int ret = 0;
16985 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16986 &buf_dma, GFP_KERNEL);
16987 if (!buf) {
16988 ret = -ENOMEM;
16989 goto out_nofree;
16992 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16993 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16995 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16997 if (tg3_flag(tp, 57765_PLUS))
16998 goto out;
17000 if (tg3_flag(tp, PCI_EXPRESS)) {
17001 /* DMA read watermark not used on PCIE */
17002 tp->dma_rwctrl |= 0x00180000;
17003 } else if (!tg3_flag(tp, PCIX_MODE)) {
17004 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17005 tg3_asic_rev(tp) == ASIC_REV_5750)
17006 tp->dma_rwctrl |= 0x003f0000;
17007 else
17008 tp->dma_rwctrl |= 0x003f000f;
17009 } else {
17010 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17011 tg3_asic_rev(tp) == ASIC_REV_5704) {
17012 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17013 u32 read_water = 0x7;
17015 /* If the 5704 is behind the EPB bridge, we can
17016 * do the less restrictive ONE_DMA workaround for
17017 * better performance.
17019 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17020 tg3_asic_rev(tp) == ASIC_REV_5704)
17021 tp->dma_rwctrl |= 0x8000;
17022 else if (ccval == 0x6 || ccval == 0x7)
17023 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17025 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17026 read_water = 4;
17027 /* Set bit 23 to enable PCIX hw bug fix */
17028 tp->dma_rwctrl |=
17029 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17030 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17031 (1 << 23);
17032 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17033 /* 5780 always in PCIX mode */
17034 tp->dma_rwctrl |= 0x00144000;
17035 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17036 /* 5714 always in PCIX mode */
17037 tp->dma_rwctrl |= 0x00148000;
17038 } else {
17039 tp->dma_rwctrl |= 0x001b000f;
17042 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17043 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17045 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17046 tg3_asic_rev(tp) == ASIC_REV_5704)
17047 tp->dma_rwctrl &= 0xfffffff0;
17049 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17050 tg3_asic_rev(tp) == ASIC_REV_5701) {
17051 /* Remove this if it causes problems for some boards. */
17052 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17054 /* On 5700/5701 chips, we need to set this bit.
17055 * Otherwise the chip will issue cacheline transactions
17056 * to streamable DMA memory with not all the byte
17057 * enables turned on. This is an error on several
17058 * RISC PCI controllers, in particular sparc64.
17060 * On 5703/5704 chips, this bit has been reassigned
17061 * a different meaning. In particular, it is used
17062 * on those chips to enable a PCI-X workaround.
17064 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17067 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17070 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17071 tg3_asic_rev(tp) != ASIC_REV_5701)
17072 goto out;
17074 /* It is best to perform DMA test with maximum write burst size
17075 * to expose the 5700/5701 write DMA bug.
17077 saved_dma_rwctrl = tp->dma_rwctrl;
17078 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17079 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17081 while (1) {
17082 u32 *p = buf, i;
17084 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17085 p[i] = i;
17087 /* Send the buffer to the chip. */
17088 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17089 if (ret) {
17090 dev_err(&tp->pdev->dev,
17091 "%s: Buffer write failed. err = %d\n",
17092 __func__, ret);
17093 break;
17096 /* Now read it back. */
17097 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17098 if (ret) {
17099 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17100 "err = %d\n", __func__, ret);
17101 break;
17104 /* Verify it. */
17105 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17106 if (p[i] == i)
17107 continue;
17109 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17110 DMA_RWCTRL_WRITE_BNDRY_16) {
17111 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17112 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17113 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17114 break;
17115 } else {
17116 dev_err(&tp->pdev->dev,
17117 "%s: Buffer corrupted on read back! "
17118 "(%d != %d)\n", __func__, p[i], i);
17119 ret = -ENODEV;
17120 goto out;
17124 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17125 /* Success. */
17126 ret = 0;
17127 break;
17130 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17131 DMA_RWCTRL_WRITE_BNDRY_16) {
17132 /* DMA test passed without adjusting DMA boundary,
17133 * now look for chipsets that are known to expose the
17134 * DMA bug without failing the test.
17136 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17137 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17138 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17139 } else {
17140 /* Safe to use the calculated DMA boundary. */
17141 tp->dma_rwctrl = saved_dma_rwctrl;
17144 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17147 out:
17148 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17149 out_nofree:
17150 return ret;
17153 static void tg3_init_bufmgr_config(struct tg3 *tp)
17155 if (tg3_flag(tp, 57765_PLUS)) {
17156 tp->bufmgr_config.mbuf_read_dma_low_water =
17157 DEFAULT_MB_RDMA_LOW_WATER_5705;
17158 tp->bufmgr_config.mbuf_mac_rx_low_water =
17159 DEFAULT_MB_MACRX_LOW_WATER_57765;
17160 tp->bufmgr_config.mbuf_high_water =
17161 DEFAULT_MB_HIGH_WATER_57765;
17163 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17164 DEFAULT_MB_RDMA_LOW_WATER_5705;
17165 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17166 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17167 tp->bufmgr_config.mbuf_high_water_jumbo =
17168 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17169 } else if (tg3_flag(tp, 5705_PLUS)) {
17170 tp->bufmgr_config.mbuf_read_dma_low_water =
17171 DEFAULT_MB_RDMA_LOW_WATER_5705;
17172 tp->bufmgr_config.mbuf_mac_rx_low_water =
17173 DEFAULT_MB_MACRX_LOW_WATER_5705;
17174 tp->bufmgr_config.mbuf_high_water =
17175 DEFAULT_MB_HIGH_WATER_5705;
17176 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17177 tp->bufmgr_config.mbuf_mac_rx_low_water =
17178 DEFAULT_MB_MACRX_LOW_WATER_5906;
17179 tp->bufmgr_config.mbuf_high_water =
17180 DEFAULT_MB_HIGH_WATER_5906;
17183 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17184 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17185 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17186 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17187 tp->bufmgr_config.mbuf_high_water_jumbo =
17188 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17189 } else {
17190 tp->bufmgr_config.mbuf_read_dma_low_water =
17191 DEFAULT_MB_RDMA_LOW_WATER;
17192 tp->bufmgr_config.mbuf_mac_rx_low_water =
17193 DEFAULT_MB_MACRX_LOW_WATER;
17194 tp->bufmgr_config.mbuf_high_water =
17195 DEFAULT_MB_HIGH_WATER;
17197 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17198 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17199 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17200 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17201 tp->bufmgr_config.mbuf_high_water_jumbo =
17202 DEFAULT_MB_HIGH_WATER_JUMBO;
17205 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17206 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17209 static char *tg3_phy_string(struct tg3 *tp)
17211 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17212 case TG3_PHY_ID_BCM5400: return "5400";
17213 case TG3_PHY_ID_BCM5401: return "5401";
17214 case TG3_PHY_ID_BCM5411: return "5411";
17215 case TG3_PHY_ID_BCM5701: return "5701";
17216 case TG3_PHY_ID_BCM5703: return "5703";
17217 case TG3_PHY_ID_BCM5704: return "5704";
17218 case TG3_PHY_ID_BCM5705: return "5705";
17219 case TG3_PHY_ID_BCM5750: return "5750";
17220 case TG3_PHY_ID_BCM5752: return "5752";
17221 case TG3_PHY_ID_BCM5714: return "5714";
17222 case TG3_PHY_ID_BCM5780: return "5780";
17223 case TG3_PHY_ID_BCM5755: return "5755";
17224 case TG3_PHY_ID_BCM5787: return "5787";
17225 case TG3_PHY_ID_BCM5784: return "5784";
17226 case TG3_PHY_ID_BCM5756: return "5722/5756";
17227 case TG3_PHY_ID_BCM5906: return "5906";
17228 case TG3_PHY_ID_BCM5761: return "5761";
17229 case TG3_PHY_ID_BCM5718C: return "5718C";
17230 case TG3_PHY_ID_BCM5718S: return "5718S";
17231 case TG3_PHY_ID_BCM57765: return "57765";
17232 case TG3_PHY_ID_BCM5719C: return "5719C";
17233 case TG3_PHY_ID_BCM5720C: return "5720C";
17234 case TG3_PHY_ID_BCM5762: return "5762C";
17235 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17236 case 0: return "serdes";
17237 default: return "unknown";
17241 static char *tg3_bus_string(struct tg3 *tp, char *str)
17243 if (tg3_flag(tp, PCI_EXPRESS)) {
17244 strcpy(str, "PCI Express");
17245 return str;
17246 } else if (tg3_flag(tp, PCIX_MODE)) {
17247 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17249 strcpy(str, "PCIX:");
17251 if ((clock_ctrl == 7) ||
17252 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17253 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17254 strcat(str, "133MHz");
17255 else if (clock_ctrl == 0)
17256 strcat(str, "33MHz");
17257 else if (clock_ctrl == 2)
17258 strcat(str, "50MHz");
17259 else if (clock_ctrl == 4)
17260 strcat(str, "66MHz");
17261 else if (clock_ctrl == 6)
17262 strcat(str, "100MHz");
17263 } else {
17264 strcpy(str, "PCI:");
17265 if (tg3_flag(tp, PCI_HIGH_SPEED))
17266 strcat(str, "66MHz");
17267 else
17268 strcat(str, "33MHz");
17270 if (tg3_flag(tp, PCI_32BIT))
17271 strcat(str, ":32-bit");
17272 else
17273 strcat(str, ":64-bit");
17274 return str;
17277 static void tg3_init_coal(struct tg3 *tp)
17279 struct ethtool_coalesce *ec = &tp->coal;
17281 memset(ec, 0, sizeof(*ec));
17282 ec->cmd = ETHTOOL_GCOALESCE;
17283 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17284 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17285 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17286 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17287 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17288 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17289 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17290 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17291 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17293 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17294 HOSTCC_MODE_CLRTICK_TXBD)) {
17295 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17296 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17297 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17298 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17301 if (tg3_flag(tp, 5705_PLUS)) {
17302 ec->rx_coalesce_usecs_irq = 0;
17303 ec->tx_coalesce_usecs_irq = 0;
17304 ec->stats_block_coalesce_usecs = 0;
17308 static int tg3_init_one(struct pci_dev *pdev,
17309 const struct pci_device_id *ent)
17311 struct net_device *dev;
17312 struct tg3 *tp;
17313 int i, err;
17314 u32 sndmbx, rcvmbx, intmbx;
17315 char str[40];
17316 u64 dma_mask, persist_dma_mask;
17317 netdev_features_t features = 0;
17319 printk_once(KERN_INFO "%s\n", version);
17321 err = pci_enable_device(pdev);
17322 if (err) {
17323 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17324 return err;
17327 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17328 if (err) {
17329 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17330 goto err_out_disable_pdev;
17333 pci_set_master(pdev);
17335 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17336 if (!dev) {
17337 err = -ENOMEM;
17338 goto err_out_free_res;
17341 SET_NETDEV_DEV(dev, &pdev->dev);
17343 tp = netdev_priv(dev);
17344 tp->pdev = pdev;
17345 tp->dev = dev;
17346 tp->rx_mode = TG3_DEF_RX_MODE;
17347 tp->tx_mode = TG3_DEF_TX_MODE;
17348 tp->irq_sync = 1;
17350 if (tg3_debug > 0)
17351 tp->msg_enable = tg3_debug;
17352 else
17353 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17355 if (pdev_is_ssb_gige_core(pdev)) {
17356 tg3_flag_set(tp, IS_SSB_CORE);
17357 if (ssb_gige_must_flush_posted_writes(pdev))
17358 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17359 if (ssb_gige_one_dma_at_once(pdev))
17360 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17361 if (ssb_gige_have_roboswitch(pdev)) {
17362 tg3_flag_set(tp, USE_PHYLIB);
17363 tg3_flag_set(tp, ROBOSWITCH);
17365 if (ssb_gige_is_rgmii(pdev))
17366 tg3_flag_set(tp, RGMII_MODE);
17369 /* The word/byte swap controls here control register access byte
17370 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17371 * setting below.
17373 tp->misc_host_ctrl =
17374 MISC_HOST_CTRL_MASK_PCI_INT |
17375 MISC_HOST_CTRL_WORD_SWAP |
17376 MISC_HOST_CTRL_INDIR_ACCESS |
17377 MISC_HOST_CTRL_PCISTATE_RW;
17379 /* The NONFRM (non-frame) byte/word swap controls take effect
17380 * on descriptor entries, anything which isn't packet data.
17382 * The StrongARM chips on the board (one for tx, one for rx)
17383 * are running in big-endian mode.
17385 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17386 GRC_MODE_WSWAP_NONFRM_DATA);
17387 #ifdef __BIG_ENDIAN
17388 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17389 #endif
17390 spin_lock_init(&tp->lock);
17391 spin_lock_init(&tp->indirect_lock);
17392 INIT_WORK(&tp->reset_task, tg3_reset_task);
17394 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17395 if (!tp->regs) {
17396 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17397 err = -ENOMEM;
17398 goto err_out_free_dev;
17401 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17402 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17403 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17404 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17405 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17406 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17407 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17408 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17409 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17410 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17411 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17412 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17413 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17414 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17415 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17416 tg3_flag_set(tp, ENABLE_APE);
17417 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17418 if (!tp->aperegs) {
17419 dev_err(&pdev->dev,
17420 "Cannot map APE registers, aborting\n");
17421 err = -ENOMEM;
17422 goto err_out_iounmap;
17426 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17427 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17429 dev->ethtool_ops = &tg3_ethtool_ops;
17430 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17431 dev->netdev_ops = &tg3_netdev_ops;
17432 dev->irq = pdev->irq;
17434 err = tg3_get_invariants(tp, ent);
17435 if (err) {
17436 dev_err(&pdev->dev,
17437 "Problem fetching invariants of chip, aborting\n");
17438 goto err_out_apeunmap;
17441 /* The EPB bridge inside 5714, 5715, and 5780 and any
17442 * device behind the EPB cannot support DMA addresses > 40-bit.
17443 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17444 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17445 * do DMA address check in tg3_start_xmit().
17447 if (tg3_flag(tp, IS_5788))
17448 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17449 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17450 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17451 #ifdef CONFIG_HIGHMEM
17452 dma_mask = DMA_BIT_MASK(64);
17453 #endif
17454 } else
17455 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17457 /* Configure DMA attributes. */
17458 if (dma_mask > DMA_BIT_MASK(32)) {
17459 err = pci_set_dma_mask(pdev, dma_mask);
17460 if (!err) {
17461 features |= NETIF_F_HIGHDMA;
17462 err = pci_set_consistent_dma_mask(pdev,
17463 persist_dma_mask);
17464 if (err < 0) {
17465 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17466 "DMA for consistent allocations\n");
17467 goto err_out_apeunmap;
17471 if (err || dma_mask == DMA_BIT_MASK(32)) {
17472 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17473 if (err) {
17474 dev_err(&pdev->dev,
17475 "No usable DMA configuration, aborting\n");
17476 goto err_out_apeunmap;
17480 tg3_init_bufmgr_config(tp);
17482 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17484 /* 5700 B0 chips do not support checksumming correctly due
17485 * to hardware bugs.
17487 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17488 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17490 if (tg3_flag(tp, 5755_PLUS))
17491 features |= NETIF_F_IPV6_CSUM;
17494 /* TSO is on by default on chips that support hardware TSO.
17495 * Firmware TSO on older chips gives lower performance, so it
17496 * is off by default, but can be enabled using ethtool.
17498 if ((tg3_flag(tp, HW_TSO_1) ||
17499 tg3_flag(tp, HW_TSO_2) ||
17500 tg3_flag(tp, HW_TSO_3)) &&
17501 (features & NETIF_F_IP_CSUM))
17502 features |= NETIF_F_TSO;
17503 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17504 if (features & NETIF_F_IPV6_CSUM)
17505 features |= NETIF_F_TSO6;
17506 if (tg3_flag(tp, HW_TSO_3) ||
17507 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17508 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17509 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17510 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17511 tg3_asic_rev(tp) == ASIC_REV_57780)
17512 features |= NETIF_F_TSO_ECN;
17515 dev->features |= features;
17516 dev->vlan_features |= features;
17519 * Add loopback capability only for a subset of devices that support
17520 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17521 * loopback for the remaining devices.
17523 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17524 !tg3_flag(tp, CPMU_PRESENT))
17525 /* Add the loopback capability */
17526 features |= NETIF_F_LOOPBACK;
17528 dev->hw_features |= features;
17530 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17531 !tg3_flag(tp, TSO_CAPABLE) &&
17532 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17533 tg3_flag_set(tp, MAX_RXPEND_64);
17534 tp->rx_pending = 63;
17537 err = tg3_get_device_address(tp);
17538 if (err) {
17539 dev_err(&pdev->dev,
17540 "Could not obtain valid ethernet address, aborting\n");
17541 goto err_out_apeunmap;
17545 * Reset chip in case UNDI or EFI driver did not shutdown
17546 * DMA self test will enable WDMAC and we'll see (spurious)
17547 * pending DMA on the PCI bus at that point.
17549 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17550 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17551 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17552 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17555 err = tg3_test_dma(tp);
17556 if (err) {
17557 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17558 goto err_out_apeunmap;
17561 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17562 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17563 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17564 for (i = 0; i < tp->irq_max; i++) {
17565 struct tg3_napi *tnapi = &tp->napi[i];
17567 tnapi->tp = tp;
17568 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17570 tnapi->int_mbox = intmbx;
17571 if (i <= 4)
17572 intmbx += 0x8;
17573 else
17574 intmbx += 0x4;
17576 tnapi->consmbox = rcvmbx;
17577 tnapi->prodmbox = sndmbx;
17579 if (i)
17580 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17581 else
17582 tnapi->coal_now = HOSTCC_MODE_NOW;
17584 if (!tg3_flag(tp, SUPPORT_MSIX))
17585 break;
17588 * If we support MSIX, we'll be using RSS. If we're using
17589 * RSS, the first vector only handles link interrupts and the
17590 * remaining vectors handle rx and tx interrupts. Reuse the
17591 * mailbox values for the next iteration. The values we setup
17592 * above are still useful for the single vectored mode.
17594 if (!i)
17595 continue;
17597 rcvmbx += 0x8;
17599 if (sndmbx & 0x4)
17600 sndmbx -= 0x4;
17601 else
17602 sndmbx += 0xc;
17605 tg3_init_coal(tp);
17607 pci_set_drvdata(pdev, dev);
17609 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17610 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17611 tg3_asic_rev(tp) == ASIC_REV_5762)
17612 tg3_flag_set(tp, PTP_CAPABLE);
17614 tg3_timer_init(tp);
17616 tg3_carrier_off(tp);
17618 err = register_netdev(dev);
17619 if (err) {
17620 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17621 goto err_out_apeunmap;
17624 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17625 tp->board_part_number,
17626 tg3_chip_rev_id(tp),
17627 tg3_bus_string(tp, str),
17628 dev->dev_addr);
17630 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17631 struct phy_device *phydev;
17632 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17633 netdev_info(dev,
17634 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17635 phydev->drv->name, dev_name(&phydev->dev));
17636 } else {
17637 char *ethtype;
17639 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17640 ethtype = "10/100Base-TX";
17641 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17642 ethtype = "1000Base-SX";
17643 else
17644 ethtype = "10/100/1000Base-T";
17646 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17647 "(WireSpeed[%d], EEE[%d])\n",
17648 tg3_phy_string(tp), ethtype,
17649 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17650 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17653 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17654 (dev->features & NETIF_F_RXCSUM) != 0,
17655 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17656 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17657 tg3_flag(tp, ENABLE_ASF) != 0,
17658 tg3_flag(tp, TSO_CAPABLE) != 0);
17659 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17660 tp->dma_rwctrl,
17661 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17662 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17664 pci_save_state(pdev);
17666 return 0;
17668 err_out_apeunmap:
17669 if (tp->aperegs) {
17670 iounmap(tp->aperegs);
17671 tp->aperegs = NULL;
17674 err_out_iounmap:
17675 if (tp->regs) {
17676 iounmap(tp->regs);
17677 tp->regs = NULL;
17680 err_out_free_dev:
17681 free_netdev(dev);
17683 err_out_free_res:
17684 pci_release_regions(pdev);
17686 err_out_disable_pdev:
17687 if (pci_is_enabled(pdev))
17688 pci_disable_device(pdev);
17689 return err;
17692 static void tg3_remove_one(struct pci_dev *pdev)
17694 struct net_device *dev = pci_get_drvdata(pdev);
17696 if (dev) {
17697 struct tg3 *tp = netdev_priv(dev);
17699 release_firmware(tp->fw);
17701 tg3_reset_task_cancel(tp);
17703 if (tg3_flag(tp, USE_PHYLIB)) {
17704 tg3_phy_fini(tp);
17705 tg3_mdio_fini(tp);
17708 unregister_netdev(dev);
17709 if (tp->aperegs) {
17710 iounmap(tp->aperegs);
17711 tp->aperegs = NULL;
17713 if (tp->regs) {
17714 iounmap(tp->regs);
17715 tp->regs = NULL;
17717 free_netdev(dev);
17718 pci_release_regions(pdev);
17719 pci_disable_device(pdev);
17723 #ifdef CONFIG_PM_SLEEP
17724 static int tg3_suspend(struct device *device)
17726 struct pci_dev *pdev = to_pci_dev(device);
17727 struct net_device *dev = pci_get_drvdata(pdev);
17728 struct tg3 *tp = netdev_priv(dev);
17729 int err;
17731 if (!netif_running(dev))
17732 return 0;
17734 tg3_reset_task_cancel(tp);
17735 tg3_phy_stop(tp);
17736 tg3_netif_stop(tp);
17738 tg3_timer_stop(tp);
17740 tg3_full_lock(tp, 1);
17741 tg3_disable_ints(tp);
17742 tg3_full_unlock(tp);
17744 netif_device_detach(dev);
17746 tg3_full_lock(tp, 0);
17747 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17748 tg3_flag_clear(tp, INIT_COMPLETE);
17749 tg3_full_unlock(tp);
17751 err = tg3_power_down_prepare(tp);
17752 if (err) {
17753 int err2;
17755 tg3_full_lock(tp, 0);
17757 tg3_flag_set(tp, INIT_COMPLETE);
17758 err2 = tg3_restart_hw(tp, true);
17759 if (err2)
17760 goto out;
17762 tg3_timer_start(tp);
17764 netif_device_attach(dev);
17765 tg3_netif_start(tp);
17767 out:
17768 tg3_full_unlock(tp);
17770 if (!err2)
17771 tg3_phy_start(tp);
17774 return err;
17777 static int tg3_resume(struct device *device)
17779 struct pci_dev *pdev = to_pci_dev(device);
17780 struct net_device *dev = pci_get_drvdata(pdev);
17781 struct tg3 *tp = netdev_priv(dev);
17782 int err;
17784 if (!netif_running(dev))
17785 return 0;
17787 netif_device_attach(dev);
17789 tg3_full_lock(tp, 0);
17791 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17793 tg3_flag_set(tp, INIT_COMPLETE);
17794 err = tg3_restart_hw(tp,
17795 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17796 if (err)
17797 goto out;
17799 tg3_timer_start(tp);
17801 tg3_netif_start(tp);
17803 out:
17804 tg3_full_unlock(tp);
17806 if (!err)
17807 tg3_phy_start(tp);
17809 return err;
17811 #endif /* CONFIG_PM_SLEEP */
17813 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17815 static void tg3_shutdown(struct pci_dev *pdev)
17817 struct net_device *dev = pci_get_drvdata(pdev);
17818 struct tg3 *tp = netdev_priv(dev);
17820 rtnl_lock();
17821 netif_device_detach(dev);
17823 if (netif_running(dev))
17824 dev_close(dev);
17826 if (system_state == SYSTEM_POWER_OFF)
17827 tg3_power_down(tp);
17829 rtnl_unlock();
17833 * tg3_io_error_detected - called when PCI error is detected
17834 * @pdev: Pointer to PCI device
17835 * @state: The current pci connection state
17837 * This function is called after a PCI bus error affecting
17838 * this device has been detected.
17840 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17841 pci_channel_state_t state)
17843 struct net_device *netdev = pci_get_drvdata(pdev);
17844 struct tg3 *tp = netdev_priv(netdev);
17845 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17847 netdev_info(netdev, "PCI I/O error detected\n");
17849 rtnl_lock();
17851 /* We probably don't have netdev yet */
17852 if (!netdev || !netif_running(netdev))
17853 goto done;
17855 tg3_phy_stop(tp);
17857 tg3_netif_stop(tp);
17859 tg3_timer_stop(tp);
17861 /* Want to make sure that the reset task doesn't run */
17862 tg3_reset_task_cancel(tp);
17864 netif_device_detach(netdev);
17866 /* Clean up software state, even if MMIO is blocked */
17867 tg3_full_lock(tp, 0);
17868 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17869 tg3_full_unlock(tp);
17871 done:
17872 if (state == pci_channel_io_perm_failure) {
17873 if (netdev) {
17874 tg3_napi_enable(tp);
17875 dev_close(netdev);
17877 err = PCI_ERS_RESULT_DISCONNECT;
17878 } else {
17879 pci_disable_device(pdev);
17882 rtnl_unlock();
17884 return err;
17888 * tg3_io_slot_reset - called after the pci bus has been reset.
17889 * @pdev: Pointer to PCI device
17891 * Restart the card from scratch, as if from a cold-boot.
17892 * At this point, the card has exprienced a hard reset,
17893 * followed by fixups by BIOS, and has its config space
17894 * set up identically to what it was at cold boot.
17896 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17898 struct net_device *netdev = pci_get_drvdata(pdev);
17899 struct tg3 *tp = netdev_priv(netdev);
17900 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17901 int err;
17903 rtnl_lock();
17905 if (pci_enable_device(pdev)) {
17906 dev_err(&pdev->dev,
17907 "Cannot re-enable PCI device after reset.\n");
17908 goto done;
17911 pci_set_master(pdev);
17912 pci_restore_state(pdev);
17913 pci_save_state(pdev);
17915 if (!netdev || !netif_running(netdev)) {
17916 rc = PCI_ERS_RESULT_RECOVERED;
17917 goto done;
17920 err = tg3_power_up(tp);
17921 if (err)
17922 goto done;
17924 rc = PCI_ERS_RESULT_RECOVERED;
17926 done:
17927 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17928 tg3_napi_enable(tp);
17929 dev_close(netdev);
17931 rtnl_unlock();
17933 return rc;
17937 * tg3_io_resume - called when traffic can start flowing again.
17938 * @pdev: Pointer to PCI device
17940 * This callback is called when the error recovery driver tells
17941 * us that its OK to resume normal operation.
17943 static void tg3_io_resume(struct pci_dev *pdev)
17945 struct net_device *netdev = pci_get_drvdata(pdev);
17946 struct tg3 *tp = netdev_priv(netdev);
17947 int err;
17949 rtnl_lock();
17951 if (!netif_running(netdev))
17952 goto done;
17954 tg3_full_lock(tp, 0);
17955 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17956 tg3_flag_set(tp, INIT_COMPLETE);
17957 err = tg3_restart_hw(tp, true);
17958 if (err) {
17959 tg3_full_unlock(tp);
17960 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17961 goto done;
17964 netif_device_attach(netdev);
17966 tg3_timer_start(tp);
17968 tg3_netif_start(tp);
17970 tg3_full_unlock(tp);
17972 tg3_phy_start(tp);
17974 done:
17975 rtnl_unlock();
17978 static const struct pci_error_handlers tg3_err_handler = {
17979 .error_detected = tg3_io_error_detected,
17980 .slot_reset = tg3_io_slot_reset,
17981 .resume = tg3_io_resume
17984 static struct pci_driver tg3_driver = {
17985 .name = DRV_MODULE_NAME,
17986 .id_table = tg3_pci_tbl,
17987 .probe = tg3_init_one,
17988 .remove = tg3_remove_one,
17989 .err_handler = &tg3_err_handler,
17990 .driver.pm = &tg3_pm_ops,
17991 .shutdown = tg3_shutdown,
17994 module_pci_driver(tg3_driver);