tg3: Eliminate timer race with reset_task
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
blobcc7349fd7fcda5db713bb409aff187201d011034
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
61 #define BAR_0 0
62 #define BAR_2 2
64 #include "tg3.h"
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 set_bit(flag, bits);
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 120
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "August 18, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
104 (NETIF_MSG_DRV | \
105 NETIF_MSG_PROBE | \
106 NETIF_MSG_LINK | \
107 NETIF_MSG_TIMER | \
108 NETIF_MSG_IFDOWN | \
109 NETIF_MSG_IFUP | \
110 NETIF_MSG_RX_ERR | \
111 NETIF_MSG_TX_ERR)
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 #define TG3_RSS_INDIR_TBL_SIZE 128
140 /* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
147 #define TG3_TX_RING_SIZE 512
148 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 TG3_TX_RING_SIZE)
158 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
160 #define TG3_DMA_BYTE_ENAB 64
162 #define TG3_RX_STD_DMA_SZ 1536
163 #define TG3_RX_JMB_DMA_SZ 9046
165 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
167 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
187 #define TG3_RX_COPY_THRESHOLD 256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190 #else
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192 #endif
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp) 0
198 #endif
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314 static const struct {
315 const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317 { "rx_octets" },
318 { "rx_fragments" },
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
322 { "rx_fcs_errors" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
329 { "rx_jabbers" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
344 { "tx_octets" },
345 { "tx_collisions" },
347 { "tx_xon_sent" },
348 { "tx_xoff_sent" },
349 { "tx_flow_control" },
350 { "tx_mac_errors" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
353 { "tx_deferred" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
374 { "tx_discards" },
375 { "tx_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
379 { "rxbds_empty" },
380 { "rx_discards" },
381 { "rx_errors" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
390 { "nic_irqs" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 writel(val, tp->regs + off);
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 return readl(tp->regs + off);
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 writel(val, tp->aperegs + off);
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 return readl(tp->aperegs + off);
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
438 unsigned long flags;
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 writel(val, tp->regs + off);
449 readl(tp->regs + off);
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
454 unsigned long flags;
455 u32 val;
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
461 return val;
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
466 unsigned long flags;
468 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 TG3_64BIT_REG_LOW, val);
471 return;
473 if (off == TG3_RX_STD_PROD_IDX_REG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 TG3_64BIT_REG_LOW, val);
476 return;
479 spin_lock_irqsave(&tp->indirect_lock, flags);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488 (val == 0x1)) {
489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
496 unsigned long flags;
497 u32 val;
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 return val;
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 /* Non-posted methods */
515 tp->write32(tp, off, val);
516 else {
517 /* Posted method */
518 tg3_write32(tp, off, val);
519 if (usec_wait)
520 udelay(usec_wait);
521 tp->read32(tp, off);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
526 if (usec_wait)
527 udelay(usec_wait);
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 tp->write32_mbox(tp, off, val);
533 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 tp->read32_mbox(tp, off);
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 void __iomem *mbox = tp->regs + off;
540 writel(val, mbox);
541 if (tg3_flag(tp, TXD_MBOX_HWBUG))
542 writel(val, mbox);
543 if (tg3_flag(tp, MBOX_WRITE_REORDER))
544 readl(mbox);
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 return readl(tp->regs + off + GRCMBOX_BASE);
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 writel(val, tp->regs + off + GRCMBOX_BASE);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
570 unsigned long flags;
572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574 return;
576 spin_lock_irqsave(&tp->indirect_lock, flags);
577 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583 } else {
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590 spin_unlock_irqrestore(&tp->indirect_lock, flags);
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
595 unsigned long flags;
597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599 *val = 0;
600 return;
603 spin_lock_irqsave(&tp->indirect_lock, flags);
604 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610 } else {
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 *val = tr32(TG3PCI_MEM_WIN_DATA);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617 spin_unlock_irqrestore(&tp->indirect_lock, flags);
620 static void tg3_ape_lock_init(struct tg3 *tp)
622 int i;
623 u32 regbase, bit;
625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 regbase = TG3_APE_LOCK_GRANT;
627 else
628 regbase = TG3_APE_PER_LOCK_GRANT;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632 switch (i) {
633 case TG3_APE_LOCK_PHY0:
634 case TG3_APE_LOCK_PHY1:
635 case TG3_APE_LOCK_PHY2:
636 case TG3_APE_LOCK_PHY3:
637 bit = APE_LOCK_GRANT_DRIVER;
638 break;
639 default:
640 if (!tp->pci_fn)
641 bit = APE_LOCK_GRANT_DRIVER;
642 else
643 bit = 1 << tp->pci_fn;
645 tg3_ape_write32(tp, regbase + 4 * i, bit);
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
652 int i, off;
653 int ret = 0;
654 u32 status, req, gnt, bit;
656 if (!tg3_flag(tp, ENABLE_APE))
657 return 0;
659 switch (locknum) {
660 case TG3_APE_LOCK_GPIO:
661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662 return 0;
663 case TG3_APE_LOCK_GRC:
664 case TG3_APE_LOCK_MEM:
665 if (!tp->pci_fn)
666 bit = APE_LOCK_REQ_DRIVER;
667 else
668 bit = 1 << tp->pci_fn;
669 break;
670 default:
671 return -EINVAL;
674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 req = TG3_APE_LOCK_REQ;
676 gnt = TG3_APE_LOCK_GRANT;
677 } else {
678 req = TG3_APE_PER_LOCK_REQ;
679 gnt = TG3_APE_PER_LOCK_GRANT;
682 off = 4 * locknum;
684 tg3_ape_write32(tp, req + off, bit);
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i = 0; i < 100; i++) {
688 status = tg3_ape_read32(tp, gnt + off);
689 if (status == bit)
690 break;
691 udelay(10);
694 if (status != bit) {
695 /* Revoke the lock request. */
696 tg3_ape_write32(tp, gnt + off, bit);
697 ret = -EBUSY;
700 return ret;
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
705 u32 gnt, bit;
707 if (!tg3_flag(tp, ENABLE_APE))
708 return;
710 switch (locknum) {
711 case TG3_APE_LOCK_GPIO:
712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713 return;
714 case TG3_APE_LOCK_GRC:
715 case TG3_APE_LOCK_MEM:
716 if (!tp->pci_fn)
717 bit = APE_LOCK_GRANT_DRIVER;
718 else
719 bit = 1 << tp->pci_fn;
720 break;
721 default:
722 return;
725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 gnt = TG3_APE_LOCK_GRANT;
727 else
728 gnt = TG3_APE_PER_LOCK_GRANT;
730 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
735 int i;
736 u32 apedata;
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp, APE_HAS_NCSI))
740 return;
742 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 if (apedata != APE_SEG_SIG_MAGIC)
744 return;
746 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 if (!(apedata & APE_FW_STATUS_READY))
748 return;
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i = 0; i < 10; i++) {
752 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753 return;
755 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 event | APE_EVENT_STATUS_EVENT_PENDING);
761 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764 break;
766 udelay(100);
769 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
775 u32 event;
776 u32 apedata;
778 if (!tg3_flag(tp, ENABLE_APE))
779 return;
781 switch (kind) {
782 case RESET_KIND_INIT:
783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 APE_HOST_SEG_SIG_MAGIC);
785 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 APE_HOST_SEG_LEN_MAGIC);
787 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 APE_HOST_BEHAV_NO_PHYLOCK);
793 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 TG3_APE_HOST_DRVR_STATE_START);
796 event = APE_EVENT_STATUS_STATE_START;
797 break;
798 case RESET_KIND_SHUTDOWN:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
804 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
806 if (device_may_wakeup(&tp->pdev->dev) &&
807 tg3_flag(tp, WOL_ENABLE)) {
808 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 TG3_APE_HOST_WOL_SPEED_AUTO);
810 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811 } else
812 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
814 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
816 event = APE_EVENT_STATUS_STATE_UNLOAD;
817 break;
818 case RESET_KIND_SUSPEND:
819 event = APE_EVENT_STATUS_STATE_SUSPEND;
820 break;
821 default:
822 return;
825 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
827 tg3_ape_send_event(tp, event);
830 static void tg3_disable_ints(struct tg3 *tp)
832 int i;
834 tw32(TG3PCI_MISC_HOST_CTRL,
835 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836 for (i = 0; i < tp->irq_max; i++)
837 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
840 static void tg3_enable_ints(struct tg3 *tp)
842 int i;
844 tp->irq_sync = 0;
845 wmb();
847 tw32(TG3PCI_MISC_HOST_CTRL,
848 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
850 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851 for (i = 0; i < tp->irq_cnt; i++) {
852 struct tg3_napi *tnapi = &tp->napi[i];
854 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855 if (tg3_flag(tp, 1SHOT_MSI))
856 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
858 tp->coal_now |= tnapi->coal_now;
861 /* Force an initial interrupt */
862 if (!tg3_flag(tp, TAGGED_STATUS) &&
863 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865 else
866 tw32(HOSTCC_MODE, tp->coal_now);
868 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
873 struct tg3 *tp = tnapi->tp;
874 struct tg3_hw_status *sblk = tnapi->hw_status;
875 unsigned int work_exists = 0;
877 /* check for phy events */
878 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879 if (sblk->status & SD_STATUS_LINK_CHG)
880 work_exists = 1;
882 /* check for RX/TX work to do */
883 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885 work_exists = 1;
887 return work_exists;
890 /* tg3_int_reenable
891 * similar to tg3_enable_ints, but it accurately determines whether there
892 * is new work pending and can return without flushing the PIO write
893 * which reenables interrupts
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
897 struct tg3 *tp = tnapi->tp;
899 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900 mmiowb();
902 /* When doing tagged status, this work check is unnecessary.
903 * The last_tag we write above tells the chip which piece of
904 * work we've completed.
906 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907 tw32(HOSTCC_MODE, tp->coalesce_mode |
908 HOSTCC_MODE_ENABLE | tnapi->coal_now);
911 static void tg3_switch_clocks(struct tg3 *tp)
913 u32 clock_ctrl;
914 u32 orig_clock_ctrl;
916 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917 return;
919 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
921 orig_clock_ctrl = clock_ctrl;
922 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923 CLOCK_CTRL_CLKRUN_OENABLE |
924 0x1f);
925 tp->pci_clock_ctrl = clock_ctrl;
927 if (tg3_flag(tp, 5705_PLUS)) {
928 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929 tw32_wait_f(TG3PCI_CLOCK_CTRL,
930 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
932 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934 clock_ctrl |
935 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936 40);
937 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938 clock_ctrl | (CLOCK_CTRL_ALTCLK),
939 40);
941 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
944 #define PHY_BUSY_LOOPS 5000
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
948 u32 frame_val;
949 unsigned int loops;
950 int ret;
952 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953 tw32_f(MAC_MI_MODE,
954 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955 udelay(80);
958 *val = 0x0;
960 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961 MI_COM_PHY_ADDR_MASK);
962 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963 MI_COM_REG_ADDR_MASK);
964 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
966 tw32_f(MAC_MI_COM, frame_val);
968 loops = PHY_BUSY_LOOPS;
969 while (loops != 0) {
970 udelay(10);
971 frame_val = tr32(MAC_MI_COM);
973 if ((frame_val & MI_COM_BUSY) == 0) {
974 udelay(5);
975 frame_val = tr32(MAC_MI_COM);
976 break;
978 loops -= 1;
981 ret = -EBUSY;
982 if (loops != 0) {
983 *val = frame_val & MI_COM_DATA_MASK;
984 ret = 0;
987 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988 tw32_f(MAC_MI_MODE, tp->mi_mode);
989 udelay(80);
992 return ret;
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
997 u32 frame_val;
998 unsigned int loops;
999 int ret;
1001 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003 return 0;
1005 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006 tw32_f(MAC_MI_MODE,
1007 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008 udelay(80);
1011 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012 MI_COM_PHY_ADDR_MASK);
1013 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014 MI_COM_REG_ADDR_MASK);
1015 frame_val |= (val & MI_COM_DATA_MASK);
1016 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1018 tw32_f(MAC_MI_COM, frame_val);
1020 loops = PHY_BUSY_LOOPS;
1021 while (loops != 0) {
1022 udelay(10);
1023 frame_val = tr32(MAC_MI_COM);
1024 if ((frame_val & MI_COM_BUSY) == 0) {
1025 udelay(5);
1026 frame_val = tr32(MAC_MI_COM);
1027 break;
1029 loops -= 1;
1032 ret = -EBUSY;
1033 if (loops != 0)
1034 ret = 0;
1036 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038 udelay(80);
1041 return ret;
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1046 int err;
1048 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049 if (err)
1050 goto done;
1052 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053 if (err)
1054 goto done;
1056 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058 if (err)
1059 goto done;
1061 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1063 done:
1064 return err;
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1069 int err;
1071 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072 if (err)
1073 goto done;
1075 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076 if (err)
1077 goto done;
1079 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081 if (err)
1082 goto done;
1084 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1086 done:
1087 return err;
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1092 int err;
1094 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095 if (!err)
1096 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1098 return err;
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1103 int err;
1105 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106 if (!err)
1107 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1109 return err;
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1114 int err;
1116 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118 MII_TG3_AUXCTL_SHDWSEL_MISC);
1119 if (!err)
1120 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1122 return err;
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1127 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128 set |= MII_TG3_AUXCTL_MISC_WREN;
1130 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136 MII_TG3_AUXCTL_ACTL_TX_6DB)
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 MII_TG3_AUXCTL_ACTL_TX_6DB);
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1144 u32 phy_control;
1145 int limit, err;
1147 /* OK, reset it, and poll the BMCR_RESET bit until it
1148 * clears or we time out.
1150 phy_control = BMCR_RESET;
1151 err = tg3_writephy(tp, MII_BMCR, phy_control);
1152 if (err != 0)
1153 return -EBUSY;
1155 limit = 5000;
1156 while (limit--) {
1157 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158 if (err != 0)
1159 return -EBUSY;
1161 if ((phy_control & BMCR_RESET) == 0) {
1162 udelay(40);
1163 break;
1165 udelay(10);
1167 if (limit < 0)
1168 return -EBUSY;
1170 return 0;
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1175 struct tg3 *tp = bp->priv;
1176 u32 val;
1178 spin_lock_bh(&tp->lock);
1180 if (tg3_readphy(tp, reg, &val))
1181 val = -EIO;
1183 spin_unlock_bh(&tp->lock);
1185 return val;
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1190 struct tg3 *tp = bp->priv;
1191 u32 ret = 0;
1193 spin_lock_bh(&tp->lock);
1195 if (tg3_writephy(tp, reg, val))
1196 ret = -EIO;
1198 spin_unlock_bh(&tp->lock);
1200 return ret;
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1205 return 0;
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1210 u32 val;
1211 struct phy_device *phydev;
1213 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215 case PHY_ID_BCM50610:
1216 case PHY_ID_BCM50610M:
1217 val = MAC_PHYCFG2_50610_LED_MODES;
1218 break;
1219 case PHY_ID_BCMAC131:
1220 val = MAC_PHYCFG2_AC131_LED_MODES;
1221 break;
1222 case PHY_ID_RTL8211C:
1223 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224 break;
1225 case PHY_ID_RTL8201E:
1226 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227 break;
1228 default:
1229 return;
1232 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233 tw32(MAC_PHYCFG2, val);
1235 val = tr32(MAC_PHYCFG1);
1236 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239 tw32(MAC_PHYCFG1, val);
1241 return;
1244 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246 MAC_PHYCFG2_FMODE_MASK_MASK |
1247 MAC_PHYCFG2_GMODE_MASK_MASK |
1248 MAC_PHYCFG2_ACT_MASK_MASK |
1249 MAC_PHYCFG2_QUAL_MASK_MASK |
1250 MAC_PHYCFG2_INBAND_ENABLE;
1252 tw32(MAC_PHYCFG2, val);
1254 val = tr32(MAC_PHYCFG1);
1255 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1263 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265 tw32(MAC_PHYCFG1, val);
1267 val = tr32(MAC_EXT_RGMII_MODE);
1268 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269 MAC_RGMII_MODE_RX_QUALITY |
1270 MAC_RGMII_MODE_RX_ACTIVITY |
1271 MAC_RGMII_MODE_RX_ENG_DET |
1272 MAC_RGMII_MODE_TX_ENABLE |
1273 MAC_RGMII_MODE_TX_LOWPWR |
1274 MAC_RGMII_MODE_TX_RESET);
1275 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277 val |= MAC_RGMII_MODE_RX_INT_B |
1278 MAC_RGMII_MODE_RX_QUALITY |
1279 MAC_RGMII_MODE_RX_ACTIVITY |
1280 MAC_RGMII_MODE_RX_ENG_DET;
1281 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282 val |= MAC_RGMII_MODE_TX_ENABLE |
1283 MAC_RGMII_MODE_TX_LOWPWR |
1284 MAC_RGMII_MODE_TX_RESET;
1286 tw32(MAC_EXT_RGMII_MODE, val);
1289 static void tg3_mdio_start(struct tg3 *tp)
1291 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292 tw32_f(MAC_MI_MODE, tp->mi_mode);
1293 udelay(80);
1295 if (tg3_flag(tp, MDIOBUS_INITED) &&
1296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297 tg3_mdio_config_5785(tp);
1300 static int tg3_mdio_init(struct tg3 *tp)
1302 int i;
1303 u32 reg;
1304 struct phy_device *phydev;
1306 if (tg3_flag(tp, 5717_PLUS)) {
1307 u32 is_serdes;
1309 tp->phy_addr = tp->pci_fn + 1;
1311 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313 else
1314 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315 TG3_CPMU_PHY_STRAP_IS_SERDES;
1316 if (is_serdes)
1317 tp->phy_addr += 7;
1318 } else
1319 tp->phy_addr = TG3_PHY_MII_ADDR;
1321 tg3_mdio_start(tp);
1323 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324 return 0;
1326 tp->mdio_bus = mdiobus_alloc();
1327 if (tp->mdio_bus == NULL)
1328 return -ENOMEM;
1330 tp->mdio_bus->name = "tg3 mdio bus";
1331 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333 tp->mdio_bus->priv = tp;
1334 tp->mdio_bus->parent = &tp->pdev->dev;
1335 tp->mdio_bus->read = &tg3_mdio_read;
1336 tp->mdio_bus->write = &tg3_mdio_write;
1337 tp->mdio_bus->reset = &tg3_mdio_reset;
1338 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339 tp->mdio_bus->irq = &tp->mdio_irq[0];
1341 for (i = 0; i < PHY_MAX_ADDR; i++)
1342 tp->mdio_bus->irq[i] = PHY_POLL;
1344 /* The bus registration will look for all the PHYs on the mdio bus.
1345 * Unfortunately, it does not ensure the PHY is powered up before
1346 * accessing the PHY ID registers. A chip reset is the
1347 * quickest way to bring the device back to an operational state..
1349 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350 tg3_bmcr_reset(tp);
1352 i = mdiobus_register(tp->mdio_bus);
1353 if (i) {
1354 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355 mdiobus_free(tp->mdio_bus);
1356 return i;
1359 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1361 if (!phydev || !phydev->drv) {
1362 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363 mdiobus_unregister(tp->mdio_bus);
1364 mdiobus_free(tp->mdio_bus);
1365 return -ENODEV;
1368 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369 case PHY_ID_BCM57780:
1370 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372 break;
1373 case PHY_ID_BCM50610:
1374 case PHY_ID_BCM50610M:
1375 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376 PHY_BRCM_RX_REFCLK_UNUSED |
1377 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385 /* fallthru */
1386 case PHY_ID_RTL8211C:
1387 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388 break;
1389 case PHY_ID_RTL8201E:
1390 case PHY_ID_BCMAC131:
1391 phydev->interface = PHY_INTERFACE_MODE_MII;
1392 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394 break;
1397 tg3_flag_set(tp, MDIOBUS_INITED);
1399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400 tg3_mdio_config_5785(tp);
1402 return 0;
1405 static void tg3_mdio_fini(struct tg3 *tp)
1407 if (tg3_flag(tp, MDIOBUS_INITED)) {
1408 tg3_flag_clear(tp, MDIOBUS_INITED);
1409 mdiobus_unregister(tp->mdio_bus);
1410 mdiobus_free(tp->mdio_bus);
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1417 u32 val;
1419 val = tr32(GRC_RX_CPU_EVENT);
1420 val |= GRC_RX_CPU_DRIVER_EVENT;
1421 tw32_f(GRC_RX_CPU_EVENT, val);
1423 tp->last_event_jiffies = jiffies;
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1431 int i;
1432 unsigned int delay_cnt;
1433 long time_remain;
1435 /* If enough time has passed, no wait is necessary. */
1436 time_remain = (long)(tp->last_event_jiffies + 1 +
1437 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438 (long)jiffies;
1439 if (time_remain < 0)
1440 return;
1442 /* Check if we can shorten the wait time. */
1443 delay_cnt = jiffies_to_usecs(time_remain);
1444 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446 delay_cnt = (delay_cnt >> 3) + 1;
1448 for (i = 0; i < delay_cnt; i++) {
1449 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450 break;
1451 udelay(8);
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1458 u32 reg;
1459 u32 val;
1461 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462 return;
1464 tg3_wait_for_event_ack(tp);
1466 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1468 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1470 val = 0;
1471 if (!tg3_readphy(tp, MII_BMCR, &reg))
1472 val = reg << 16;
1473 if (!tg3_readphy(tp, MII_BMSR, &reg))
1474 val |= (reg & 0xffff);
1475 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1477 val = 0;
1478 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479 val = reg << 16;
1480 if (!tg3_readphy(tp, MII_LPA, &reg))
1481 val |= (reg & 0xffff);
1482 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1484 val = 0;
1485 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487 val = reg << 16;
1488 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489 val |= (reg & 0xffff);
1491 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1493 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494 val = reg << 16;
1495 else
1496 val = 0;
1497 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1499 tg3_generate_fw_event(tp);
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1505 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506 /* Wait for RX cpu to ACK the previous event. */
1507 tg3_wait_for_event_ack(tp);
1509 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1511 tg3_generate_fw_event(tp);
1513 /* Wait for RX cpu to ACK this event. */
1514 tg3_wait_for_event_ack(tp);
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1521 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1524 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525 switch (kind) {
1526 case RESET_KIND_INIT:
1527 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528 DRV_STATE_START);
1529 break;
1531 case RESET_KIND_SHUTDOWN:
1532 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533 DRV_STATE_UNLOAD);
1534 break;
1536 case RESET_KIND_SUSPEND:
1537 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538 DRV_STATE_SUSPEND);
1539 break;
1541 default:
1542 break;
1546 if (kind == RESET_KIND_INIT ||
1547 kind == RESET_KIND_SUSPEND)
1548 tg3_ape_driver_state_change(tp, kind);
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1554 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555 switch (kind) {
1556 case RESET_KIND_INIT:
1557 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558 DRV_STATE_START_DONE);
1559 break;
1561 case RESET_KIND_SHUTDOWN:
1562 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563 DRV_STATE_UNLOAD_DONE);
1564 break;
1566 default:
1567 break;
1571 if (kind == RESET_KIND_SHUTDOWN)
1572 tg3_ape_driver_state_change(tp, kind);
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1578 if (tg3_flag(tp, ENABLE_ASF)) {
1579 switch (kind) {
1580 case RESET_KIND_INIT:
1581 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582 DRV_STATE_START);
1583 break;
1585 case RESET_KIND_SHUTDOWN:
1586 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587 DRV_STATE_UNLOAD);
1588 break;
1590 case RESET_KIND_SUSPEND:
1591 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592 DRV_STATE_SUSPEND);
1593 break;
1595 default:
1596 break;
1601 static int tg3_poll_fw(struct tg3 *tp)
1603 int i;
1604 u32 val;
1606 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607 /* Wait up to 20ms for init done. */
1608 for (i = 0; i < 200; i++) {
1609 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610 return 0;
1611 udelay(100);
1613 return -ENODEV;
1616 /* Wait for firmware initialization to complete. */
1617 for (i = 0; i < 100000; i++) {
1618 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620 break;
1621 udelay(10);
1624 /* Chip might not be fitted with firmware. Some Sun onboard
1625 * parts are configured like that. So don't signal the timeout
1626 * of the above loop as an error, but do report the lack of
1627 * running firmware once.
1629 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630 tg3_flag_set(tp, NO_FWARE_REPORTED);
1632 netdev_info(tp->dev, "No firmware running\n");
1635 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636 /* The 57765 A0 needs a little more
1637 * time to do some important work.
1639 mdelay(10);
1642 return 0;
1645 static void tg3_link_report(struct tg3 *tp)
1647 if (!netif_carrier_ok(tp->dev)) {
1648 netif_info(tp, link, tp->dev, "Link is down\n");
1649 tg3_ump_link_report(tp);
1650 } else if (netif_msg_link(tp)) {
1651 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652 (tp->link_config.active_speed == SPEED_1000 ?
1653 1000 :
1654 (tp->link_config.active_speed == SPEED_100 ?
1655 100 : 10)),
1656 (tp->link_config.active_duplex == DUPLEX_FULL ?
1657 "full" : "half"));
1659 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661 "on" : "off",
1662 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663 "on" : "off");
1665 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666 netdev_info(tp->dev, "EEE is %s\n",
1667 tp->setlpicnt ? "enabled" : "disabled");
1669 tg3_ump_link_report(tp);
1673 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1675 u16 miireg;
1677 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678 miireg = ADVERTISE_PAUSE_CAP;
1679 else if (flow_ctrl & FLOW_CTRL_TX)
1680 miireg = ADVERTISE_PAUSE_ASYM;
1681 else if (flow_ctrl & FLOW_CTRL_RX)
1682 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1683 else
1684 miireg = 0;
1686 return miireg;
1689 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1691 u16 miireg;
1693 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1694 miireg = ADVERTISE_1000XPAUSE;
1695 else if (flow_ctrl & FLOW_CTRL_TX)
1696 miireg = ADVERTISE_1000XPSE_ASYM;
1697 else if (flow_ctrl & FLOW_CTRL_RX)
1698 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1699 else
1700 miireg = 0;
1702 return miireg;
1705 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1707 u8 cap = 0;
1709 if (lcladv & ADVERTISE_1000XPAUSE) {
1710 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1711 if (rmtadv & LPA_1000XPAUSE)
1712 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1713 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1714 cap = FLOW_CTRL_RX;
1715 } else {
1716 if (rmtadv & LPA_1000XPAUSE)
1717 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1719 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1720 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1721 cap = FLOW_CTRL_TX;
1724 return cap;
1727 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1729 u8 autoneg;
1730 u8 flowctrl = 0;
1731 u32 old_rx_mode = tp->rx_mode;
1732 u32 old_tx_mode = tp->tx_mode;
1734 if (tg3_flag(tp, USE_PHYLIB))
1735 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1736 else
1737 autoneg = tp->link_config.autoneg;
1739 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1740 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1741 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1742 else
1743 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1744 } else
1745 flowctrl = tp->link_config.flowctrl;
1747 tp->link_config.active_flowctrl = flowctrl;
1749 if (flowctrl & FLOW_CTRL_RX)
1750 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1751 else
1752 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1754 if (old_rx_mode != tp->rx_mode)
1755 tw32_f(MAC_RX_MODE, tp->rx_mode);
1757 if (flowctrl & FLOW_CTRL_TX)
1758 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1759 else
1760 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1762 if (old_tx_mode != tp->tx_mode)
1763 tw32_f(MAC_TX_MODE, tp->tx_mode);
1766 static void tg3_adjust_link(struct net_device *dev)
1768 u8 oldflowctrl, linkmesg = 0;
1769 u32 mac_mode, lcl_adv, rmt_adv;
1770 struct tg3 *tp = netdev_priv(dev);
1771 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1773 spin_lock_bh(&tp->lock);
1775 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1776 MAC_MODE_HALF_DUPLEX);
1778 oldflowctrl = tp->link_config.active_flowctrl;
1780 if (phydev->link) {
1781 lcl_adv = 0;
1782 rmt_adv = 0;
1784 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1785 mac_mode |= MAC_MODE_PORT_MODE_MII;
1786 else if (phydev->speed == SPEED_1000 ||
1787 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1788 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1789 else
1790 mac_mode |= MAC_MODE_PORT_MODE_MII;
1792 if (phydev->duplex == DUPLEX_HALF)
1793 mac_mode |= MAC_MODE_HALF_DUPLEX;
1794 else {
1795 lcl_adv = tg3_advert_flowctrl_1000T(
1796 tp->link_config.flowctrl);
1798 if (phydev->pause)
1799 rmt_adv = LPA_PAUSE_CAP;
1800 if (phydev->asym_pause)
1801 rmt_adv |= LPA_PAUSE_ASYM;
1804 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1805 } else
1806 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1808 if (mac_mode != tp->mac_mode) {
1809 tp->mac_mode = mac_mode;
1810 tw32_f(MAC_MODE, tp->mac_mode);
1811 udelay(40);
1814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1815 if (phydev->speed == SPEED_10)
1816 tw32(MAC_MI_STAT,
1817 MAC_MI_STAT_10MBPS_MODE |
1818 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1819 else
1820 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1823 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1824 tw32(MAC_TX_LENGTHS,
1825 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1826 (6 << TX_LENGTHS_IPG_SHIFT) |
1827 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1828 else
1829 tw32(MAC_TX_LENGTHS,
1830 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1831 (6 << TX_LENGTHS_IPG_SHIFT) |
1832 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1834 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1835 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1836 phydev->speed != tp->link_config.active_speed ||
1837 phydev->duplex != tp->link_config.active_duplex ||
1838 oldflowctrl != tp->link_config.active_flowctrl)
1839 linkmesg = 1;
1841 tp->link_config.active_speed = phydev->speed;
1842 tp->link_config.active_duplex = phydev->duplex;
1844 spin_unlock_bh(&tp->lock);
1846 if (linkmesg)
1847 tg3_link_report(tp);
1850 static int tg3_phy_init(struct tg3 *tp)
1852 struct phy_device *phydev;
1854 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1855 return 0;
1857 /* Bring the PHY back to a known state. */
1858 tg3_bmcr_reset(tp);
1860 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1862 /* Attach the MAC to the PHY. */
1863 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1864 phydev->dev_flags, phydev->interface);
1865 if (IS_ERR(phydev)) {
1866 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1867 return PTR_ERR(phydev);
1870 /* Mask with MAC supported features. */
1871 switch (phydev->interface) {
1872 case PHY_INTERFACE_MODE_GMII:
1873 case PHY_INTERFACE_MODE_RGMII:
1874 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1875 phydev->supported &= (PHY_GBIT_FEATURES |
1876 SUPPORTED_Pause |
1877 SUPPORTED_Asym_Pause);
1878 break;
1880 /* fallthru */
1881 case PHY_INTERFACE_MODE_MII:
1882 phydev->supported &= (PHY_BASIC_FEATURES |
1883 SUPPORTED_Pause |
1884 SUPPORTED_Asym_Pause);
1885 break;
1886 default:
1887 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1888 return -EINVAL;
1891 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1893 phydev->advertising = phydev->supported;
1895 return 0;
1898 static void tg3_phy_start(struct tg3 *tp)
1900 struct phy_device *phydev;
1902 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1903 return;
1905 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1907 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1908 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1909 phydev->speed = tp->link_config.orig_speed;
1910 phydev->duplex = tp->link_config.orig_duplex;
1911 phydev->autoneg = tp->link_config.orig_autoneg;
1912 phydev->advertising = tp->link_config.orig_advertising;
1915 phy_start(phydev);
1917 phy_start_aneg(phydev);
1920 static void tg3_phy_stop(struct tg3 *tp)
1922 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1923 return;
1925 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1928 static void tg3_phy_fini(struct tg3 *tp)
1930 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1931 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1932 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1936 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1938 int err;
1939 u32 val;
1941 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1942 return 0;
1944 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1945 /* Cannot do read-modify-write on 5401 */
1946 err = tg3_phy_auxctl_write(tp,
1947 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1948 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1949 0x4c20);
1950 goto done;
1953 err = tg3_phy_auxctl_read(tp,
1954 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1955 if (err)
1956 return err;
1958 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1959 err = tg3_phy_auxctl_write(tp,
1960 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1962 done:
1963 return err;
1966 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1968 u32 phytest;
1970 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1971 u32 phy;
1973 tg3_writephy(tp, MII_TG3_FET_TEST,
1974 phytest | MII_TG3_FET_SHADOW_EN);
1975 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1976 if (enable)
1977 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1978 else
1979 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1980 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1982 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1986 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1988 u32 reg;
1990 if (!tg3_flag(tp, 5705_PLUS) ||
1991 (tg3_flag(tp, 5717_PLUS) &&
1992 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1993 return;
1995 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1996 tg3_phy_fet_toggle_apd(tp, enable);
1997 return;
2000 reg = MII_TG3_MISC_SHDW_WREN |
2001 MII_TG3_MISC_SHDW_SCR5_SEL |
2002 MII_TG3_MISC_SHDW_SCR5_LPED |
2003 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2004 MII_TG3_MISC_SHDW_SCR5_SDTL |
2005 MII_TG3_MISC_SHDW_SCR5_C125OE;
2006 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2007 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2009 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2012 reg = MII_TG3_MISC_SHDW_WREN |
2013 MII_TG3_MISC_SHDW_APD_SEL |
2014 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2015 if (enable)
2016 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2018 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2021 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2023 u32 phy;
2025 if (!tg3_flag(tp, 5705_PLUS) ||
2026 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2027 return;
2029 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2030 u32 ephy;
2032 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2033 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2035 tg3_writephy(tp, MII_TG3_FET_TEST,
2036 ephy | MII_TG3_FET_SHADOW_EN);
2037 if (!tg3_readphy(tp, reg, &phy)) {
2038 if (enable)
2039 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2040 else
2041 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2042 tg3_writephy(tp, reg, phy);
2044 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2046 } else {
2047 int ret;
2049 ret = tg3_phy_auxctl_read(tp,
2050 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2051 if (!ret) {
2052 if (enable)
2053 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2054 else
2055 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2056 tg3_phy_auxctl_write(tp,
2057 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2062 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2064 int ret;
2065 u32 val;
2067 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2068 return;
2070 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2071 if (!ret)
2072 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2073 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2076 static void tg3_phy_apply_otp(struct tg3 *tp)
2078 u32 otp, phy;
2080 if (!tp->phy_otp)
2081 return;
2083 otp = tp->phy_otp;
2085 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2086 return;
2088 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2089 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2090 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2092 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2093 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2094 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2096 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2097 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2098 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2100 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2101 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2103 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2104 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2106 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2107 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2108 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2110 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2113 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2115 u32 val;
2117 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2118 return;
2120 tp->setlpicnt = 0;
2122 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2123 current_link_up == 1 &&
2124 tp->link_config.active_duplex == DUPLEX_FULL &&
2125 (tp->link_config.active_speed == SPEED_100 ||
2126 tp->link_config.active_speed == SPEED_1000)) {
2127 u32 eeectl;
2129 if (tp->link_config.active_speed == SPEED_1000)
2130 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2131 else
2132 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2134 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2136 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2137 TG3_CL45_D7_EEERES_STAT, &val);
2139 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2140 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2141 tp->setlpicnt = 2;
2144 if (!tp->setlpicnt) {
2145 if (current_link_up == 1 &&
2146 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2147 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2148 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2151 val = tr32(TG3_CPMU_EEE_MODE);
2152 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2156 static void tg3_phy_eee_enable(struct tg3 *tp)
2158 u32 val;
2160 if (tp->link_config.active_speed == SPEED_1000 &&
2161 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2162 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2163 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2164 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2165 val = MII_TG3_DSP_TAP26_ALNOKO |
2166 MII_TG3_DSP_TAP26_RMRXSTO;
2167 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2168 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2171 val = tr32(TG3_CPMU_EEE_MODE);
2172 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2175 static int tg3_wait_macro_done(struct tg3 *tp)
2177 int limit = 100;
2179 while (limit--) {
2180 u32 tmp32;
2182 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2183 if ((tmp32 & 0x1000) == 0)
2184 break;
2187 if (limit < 0)
2188 return -EBUSY;
2190 return 0;
2193 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2195 static const u32 test_pat[4][6] = {
2196 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2197 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2198 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2199 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2201 int chan;
2203 for (chan = 0; chan < 4; chan++) {
2204 int i;
2206 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2207 (chan * 0x2000) | 0x0200);
2208 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2210 for (i = 0; i < 6; i++)
2211 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2212 test_pat[chan][i]);
2214 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2215 if (tg3_wait_macro_done(tp)) {
2216 *resetp = 1;
2217 return -EBUSY;
2220 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2221 (chan * 0x2000) | 0x0200);
2222 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2223 if (tg3_wait_macro_done(tp)) {
2224 *resetp = 1;
2225 return -EBUSY;
2228 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2229 if (tg3_wait_macro_done(tp)) {
2230 *resetp = 1;
2231 return -EBUSY;
2234 for (i = 0; i < 6; i += 2) {
2235 u32 low, high;
2237 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2238 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2239 tg3_wait_macro_done(tp)) {
2240 *resetp = 1;
2241 return -EBUSY;
2243 low &= 0x7fff;
2244 high &= 0x000f;
2245 if (low != test_pat[chan][i] ||
2246 high != test_pat[chan][i+1]) {
2247 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2248 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2249 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2251 return -EBUSY;
2256 return 0;
2259 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2261 int chan;
2263 for (chan = 0; chan < 4; chan++) {
2264 int i;
2266 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2267 (chan * 0x2000) | 0x0200);
2268 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2269 for (i = 0; i < 6; i++)
2270 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2271 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2272 if (tg3_wait_macro_done(tp))
2273 return -EBUSY;
2276 return 0;
2279 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2281 u32 reg32, phy9_orig;
2282 int retries, do_phy_reset, err;
2284 retries = 10;
2285 do_phy_reset = 1;
2286 do {
2287 if (do_phy_reset) {
2288 err = tg3_bmcr_reset(tp);
2289 if (err)
2290 return err;
2291 do_phy_reset = 0;
2294 /* Disable transmitter and interrupt. */
2295 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2296 continue;
2298 reg32 |= 0x3000;
2299 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2301 /* Set full-duplex, 1000 mbps. */
2302 tg3_writephy(tp, MII_BMCR,
2303 BMCR_FULLDPLX | BMCR_SPEED1000);
2305 /* Set to master mode. */
2306 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2307 continue;
2309 tg3_writephy(tp, MII_CTRL1000,
2310 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2312 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2313 if (err)
2314 return err;
2316 /* Block the PHY control access. */
2317 tg3_phydsp_write(tp, 0x8005, 0x0800);
2319 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2320 if (!err)
2321 break;
2322 } while (--retries);
2324 err = tg3_phy_reset_chanpat(tp);
2325 if (err)
2326 return err;
2328 tg3_phydsp_write(tp, 0x8005, 0x0000);
2330 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2331 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2333 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2335 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2337 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2338 reg32 &= ~0x3000;
2339 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2340 } else if (!err)
2341 err = -EBUSY;
2343 return err;
2346 /* This will reset the tigon3 PHY if there is no valid
2347 * link unless the FORCE argument is non-zero.
2349 static int tg3_phy_reset(struct tg3 *tp)
2351 u32 val, cpmuctrl;
2352 int err;
2354 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2355 val = tr32(GRC_MISC_CFG);
2356 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2357 udelay(40);
2359 err = tg3_readphy(tp, MII_BMSR, &val);
2360 err |= tg3_readphy(tp, MII_BMSR, &val);
2361 if (err != 0)
2362 return -EBUSY;
2364 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2365 netif_carrier_off(tp->dev);
2366 tg3_link_report(tp);
2369 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2370 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2371 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2372 err = tg3_phy_reset_5703_4_5(tp);
2373 if (err)
2374 return err;
2375 goto out;
2378 cpmuctrl = 0;
2379 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2380 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2381 cpmuctrl = tr32(TG3_CPMU_CTRL);
2382 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2383 tw32(TG3_CPMU_CTRL,
2384 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2387 err = tg3_bmcr_reset(tp);
2388 if (err)
2389 return err;
2391 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2392 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2393 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2395 tw32(TG3_CPMU_CTRL, cpmuctrl);
2398 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2399 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2400 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2401 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2402 CPMU_LSPD_1000MB_MACCLK_12_5) {
2403 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2404 udelay(40);
2405 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2409 if (tg3_flag(tp, 5717_PLUS) &&
2410 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2411 return 0;
2413 tg3_phy_apply_otp(tp);
2415 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2416 tg3_phy_toggle_apd(tp, true);
2417 else
2418 tg3_phy_toggle_apd(tp, false);
2420 out:
2421 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2422 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2424 tg3_phydsp_write(tp, 0x000a, 0x0323);
2425 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2428 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2429 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2430 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2433 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2434 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2435 tg3_phydsp_write(tp, 0x000a, 0x310b);
2436 tg3_phydsp_write(tp, 0x201f, 0x9506);
2437 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2438 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2440 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2441 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2442 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2443 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2444 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2445 tg3_writephy(tp, MII_TG3_TEST1,
2446 MII_TG3_TEST1_TRIM_EN | 0x4);
2447 } else
2448 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2450 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2454 /* Set Extended packet length bit (bit 14) on all chips that */
2455 /* support jumbo frames */
2456 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2457 /* Cannot do read-modify-write on 5401 */
2458 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2459 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2460 /* Set bit 14 with read-modify-write to preserve other bits */
2461 err = tg3_phy_auxctl_read(tp,
2462 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2463 if (!err)
2464 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2465 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2468 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2469 * jumbo frames transmission.
2471 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2472 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2473 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2474 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2477 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478 /* adjust output voltage */
2479 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2482 tg3_phy_toggle_automdix(tp, 1);
2483 tg3_phy_set_wirespeed(tp);
2484 return 0;
2487 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2488 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2489 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2490 TG3_GPIO_MSG_NEED_VAUX)
2491 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2492 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2493 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2494 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2495 (TG3_GPIO_MSG_DRVR_PRES << 12))
2497 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2498 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2499 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2500 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2501 (TG3_GPIO_MSG_NEED_VAUX << 12))
2503 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2505 u32 status, shift;
2507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2509 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2510 else
2511 status = tr32(TG3_CPMU_DRV_STATUS);
2513 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2514 status &= ~(TG3_GPIO_MSG_MASK << shift);
2515 status |= (newstat << shift);
2517 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2518 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2519 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2520 else
2521 tw32(TG3_CPMU_DRV_STATUS, status);
2523 return status >> TG3_APE_GPIO_MSG_SHIFT;
2526 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2528 if (!tg3_flag(tp, IS_NIC))
2529 return 0;
2531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2533 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2534 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2535 return -EIO;
2537 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2539 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2540 TG3_GRC_LCLCTL_PWRSW_DELAY);
2542 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2543 } else {
2544 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2545 TG3_GRC_LCLCTL_PWRSW_DELAY);
2548 return 0;
2551 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2553 u32 grc_local_ctrl;
2555 if (!tg3_flag(tp, IS_NIC) ||
2556 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2558 return;
2560 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2562 tw32_wait_f(GRC_LOCAL_CTRL,
2563 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2564 TG3_GRC_LCLCTL_PWRSW_DELAY);
2566 tw32_wait_f(GRC_LOCAL_CTRL,
2567 grc_local_ctrl,
2568 TG3_GRC_LCLCTL_PWRSW_DELAY);
2570 tw32_wait_f(GRC_LOCAL_CTRL,
2571 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2572 TG3_GRC_LCLCTL_PWRSW_DELAY);
2575 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2577 if (!tg3_flag(tp, IS_NIC))
2578 return;
2580 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2581 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2582 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2583 (GRC_LCLCTRL_GPIO_OE0 |
2584 GRC_LCLCTRL_GPIO_OE1 |
2585 GRC_LCLCTRL_GPIO_OE2 |
2586 GRC_LCLCTRL_GPIO_OUTPUT0 |
2587 GRC_LCLCTRL_GPIO_OUTPUT1),
2588 TG3_GRC_LCLCTL_PWRSW_DELAY);
2589 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2590 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2591 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2592 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2593 GRC_LCLCTRL_GPIO_OE1 |
2594 GRC_LCLCTRL_GPIO_OE2 |
2595 GRC_LCLCTRL_GPIO_OUTPUT0 |
2596 GRC_LCLCTRL_GPIO_OUTPUT1 |
2597 tp->grc_local_ctrl;
2598 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2599 TG3_GRC_LCLCTL_PWRSW_DELAY);
2601 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2602 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2603 TG3_GRC_LCLCTL_PWRSW_DELAY);
2605 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2606 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2607 TG3_GRC_LCLCTL_PWRSW_DELAY);
2608 } else {
2609 u32 no_gpio2;
2610 u32 grc_local_ctrl = 0;
2612 /* Workaround to prevent overdrawing Amps. */
2613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2614 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2615 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2616 grc_local_ctrl,
2617 TG3_GRC_LCLCTL_PWRSW_DELAY);
2620 /* On 5753 and variants, GPIO2 cannot be used. */
2621 no_gpio2 = tp->nic_sram_data_cfg &
2622 NIC_SRAM_DATA_CFG_NO_GPIO2;
2624 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2625 GRC_LCLCTRL_GPIO_OE1 |
2626 GRC_LCLCTRL_GPIO_OE2 |
2627 GRC_LCLCTRL_GPIO_OUTPUT1 |
2628 GRC_LCLCTRL_GPIO_OUTPUT2;
2629 if (no_gpio2) {
2630 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2631 GRC_LCLCTRL_GPIO_OUTPUT2);
2633 tw32_wait_f(GRC_LOCAL_CTRL,
2634 tp->grc_local_ctrl | grc_local_ctrl,
2635 TG3_GRC_LCLCTL_PWRSW_DELAY);
2637 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2639 tw32_wait_f(GRC_LOCAL_CTRL,
2640 tp->grc_local_ctrl | grc_local_ctrl,
2641 TG3_GRC_LCLCTL_PWRSW_DELAY);
2643 if (!no_gpio2) {
2644 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2645 tw32_wait_f(GRC_LOCAL_CTRL,
2646 tp->grc_local_ctrl | grc_local_ctrl,
2647 TG3_GRC_LCLCTL_PWRSW_DELAY);
2652 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2654 u32 msg = 0;
2656 /* Serialize power state transitions */
2657 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2658 return;
2660 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2661 msg = TG3_GPIO_MSG_NEED_VAUX;
2663 msg = tg3_set_function_status(tp, msg);
2665 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2666 goto done;
2668 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2669 tg3_pwrsrc_switch_to_vaux(tp);
2670 else
2671 tg3_pwrsrc_die_with_vmain(tp);
2673 done:
2674 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2677 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2679 bool need_vaux = false;
2681 /* The GPIOs do something completely different on 57765. */
2682 if (!tg3_flag(tp, IS_NIC) ||
2683 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2684 return;
2686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2689 tg3_frob_aux_power_5717(tp, include_wol ?
2690 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2691 return;
2694 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2695 struct net_device *dev_peer;
2697 dev_peer = pci_get_drvdata(tp->pdev_peer);
2699 /* remove_one() may have been run on the peer. */
2700 if (dev_peer) {
2701 struct tg3 *tp_peer = netdev_priv(dev_peer);
2703 if (tg3_flag(tp_peer, INIT_COMPLETE))
2704 return;
2706 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2707 tg3_flag(tp_peer, ENABLE_ASF))
2708 need_vaux = true;
2712 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2713 tg3_flag(tp, ENABLE_ASF))
2714 need_vaux = true;
2716 if (need_vaux)
2717 tg3_pwrsrc_switch_to_vaux(tp);
2718 else
2719 tg3_pwrsrc_die_with_vmain(tp);
2722 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2724 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2725 return 1;
2726 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2727 if (speed != SPEED_10)
2728 return 1;
2729 } else if (speed == SPEED_10)
2730 return 1;
2732 return 0;
2735 static int tg3_setup_phy(struct tg3 *, int);
2736 static int tg3_halt_cpu(struct tg3 *, u32);
2738 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2740 u32 val;
2742 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2743 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2744 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2745 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2747 sg_dig_ctrl |=
2748 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2749 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2750 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2752 return;
2755 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2756 tg3_bmcr_reset(tp);
2757 val = tr32(GRC_MISC_CFG);
2758 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2759 udelay(40);
2760 return;
2761 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2762 u32 phytest;
2763 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2764 u32 phy;
2766 tg3_writephy(tp, MII_ADVERTISE, 0);
2767 tg3_writephy(tp, MII_BMCR,
2768 BMCR_ANENABLE | BMCR_ANRESTART);
2770 tg3_writephy(tp, MII_TG3_FET_TEST,
2771 phytest | MII_TG3_FET_SHADOW_EN);
2772 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2773 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2774 tg3_writephy(tp,
2775 MII_TG3_FET_SHDW_AUXMODE4,
2776 phy);
2778 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2780 return;
2781 } else if (do_low_power) {
2782 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2783 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2785 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2786 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2787 MII_TG3_AUXCTL_PCTL_VREG_11V;
2788 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2791 /* The PHY should not be powered down on some chips because
2792 * of bugs.
2794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2796 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2797 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2798 return;
2800 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2801 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2802 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2803 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2804 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2805 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2808 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2811 /* tp->lock is held. */
2812 static int tg3_nvram_lock(struct tg3 *tp)
2814 if (tg3_flag(tp, NVRAM)) {
2815 int i;
2817 if (tp->nvram_lock_cnt == 0) {
2818 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2819 for (i = 0; i < 8000; i++) {
2820 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2821 break;
2822 udelay(20);
2824 if (i == 8000) {
2825 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2826 return -ENODEV;
2829 tp->nvram_lock_cnt++;
2831 return 0;
2834 /* tp->lock is held. */
2835 static void tg3_nvram_unlock(struct tg3 *tp)
2837 if (tg3_flag(tp, NVRAM)) {
2838 if (tp->nvram_lock_cnt > 0)
2839 tp->nvram_lock_cnt--;
2840 if (tp->nvram_lock_cnt == 0)
2841 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2845 /* tp->lock is held. */
2846 static void tg3_enable_nvram_access(struct tg3 *tp)
2848 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2849 u32 nvaccess = tr32(NVRAM_ACCESS);
2851 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2855 /* tp->lock is held. */
2856 static void tg3_disable_nvram_access(struct tg3 *tp)
2858 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2859 u32 nvaccess = tr32(NVRAM_ACCESS);
2861 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2865 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2866 u32 offset, u32 *val)
2868 u32 tmp;
2869 int i;
2871 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2872 return -EINVAL;
2874 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2875 EEPROM_ADDR_DEVID_MASK |
2876 EEPROM_ADDR_READ);
2877 tw32(GRC_EEPROM_ADDR,
2878 tmp |
2879 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2880 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2881 EEPROM_ADDR_ADDR_MASK) |
2882 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2884 for (i = 0; i < 1000; i++) {
2885 tmp = tr32(GRC_EEPROM_ADDR);
2887 if (tmp & EEPROM_ADDR_COMPLETE)
2888 break;
2889 msleep(1);
2891 if (!(tmp & EEPROM_ADDR_COMPLETE))
2892 return -EBUSY;
2894 tmp = tr32(GRC_EEPROM_DATA);
2897 * The data will always be opposite the native endian
2898 * format. Perform a blind byteswap to compensate.
2900 *val = swab32(tmp);
2902 return 0;
2905 #define NVRAM_CMD_TIMEOUT 10000
2907 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2909 int i;
2911 tw32(NVRAM_CMD, nvram_cmd);
2912 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2913 udelay(10);
2914 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2915 udelay(10);
2916 break;
2920 if (i == NVRAM_CMD_TIMEOUT)
2921 return -EBUSY;
2923 return 0;
2926 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2928 if (tg3_flag(tp, NVRAM) &&
2929 tg3_flag(tp, NVRAM_BUFFERED) &&
2930 tg3_flag(tp, FLASH) &&
2931 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2932 (tp->nvram_jedecnum == JEDEC_ATMEL))
2934 addr = ((addr / tp->nvram_pagesize) <<
2935 ATMEL_AT45DB0X1B_PAGE_POS) +
2936 (addr % tp->nvram_pagesize);
2938 return addr;
2941 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2943 if (tg3_flag(tp, NVRAM) &&
2944 tg3_flag(tp, NVRAM_BUFFERED) &&
2945 tg3_flag(tp, FLASH) &&
2946 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2947 (tp->nvram_jedecnum == JEDEC_ATMEL))
2949 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2950 tp->nvram_pagesize) +
2951 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2953 return addr;
2956 /* NOTE: Data read in from NVRAM is byteswapped according to
2957 * the byteswapping settings for all other register accesses.
2958 * tg3 devices are BE devices, so on a BE machine, the data
2959 * returned will be exactly as it is seen in NVRAM. On a LE
2960 * machine, the 32-bit value will be byteswapped.
2962 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2964 int ret;
2966 if (!tg3_flag(tp, NVRAM))
2967 return tg3_nvram_read_using_eeprom(tp, offset, val);
2969 offset = tg3_nvram_phys_addr(tp, offset);
2971 if (offset > NVRAM_ADDR_MSK)
2972 return -EINVAL;
2974 ret = tg3_nvram_lock(tp);
2975 if (ret)
2976 return ret;
2978 tg3_enable_nvram_access(tp);
2980 tw32(NVRAM_ADDR, offset);
2981 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2982 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2984 if (ret == 0)
2985 *val = tr32(NVRAM_RDDATA);
2987 tg3_disable_nvram_access(tp);
2989 tg3_nvram_unlock(tp);
2991 return ret;
2994 /* Ensures NVRAM data is in bytestream format. */
2995 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2997 u32 v;
2998 int res = tg3_nvram_read(tp, offset, &v);
2999 if (!res)
3000 *val = cpu_to_be32(v);
3001 return res;
3004 #define RX_CPU_SCRATCH_BASE 0x30000
3005 #define RX_CPU_SCRATCH_SIZE 0x04000
3006 #define TX_CPU_SCRATCH_BASE 0x34000
3007 #define TX_CPU_SCRATCH_SIZE 0x04000
3009 /* tp->lock is held. */
3010 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3012 int i;
3014 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3016 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3017 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3019 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3020 return 0;
3022 if (offset == RX_CPU_BASE) {
3023 for (i = 0; i < 10000; i++) {
3024 tw32(offset + CPU_STATE, 0xffffffff);
3025 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3026 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3027 break;
3030 tw32(offset + CPU_STATE, 0xffffffff);
3031 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3032 udelay(10);
3033 } else {
3034 for (i = 0; i < 10000; i++) {
3035 tw32(offset + CPU_STATE, 0xffffffff);
3036 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3037 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3038 break;
3042 if (i >= 10000) {
3043 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3044 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3045 return -ENODEV;
3048 /* Clear firmware's nvram arbitration. */
3049 if (tg3_flag(tp, NVRAM))
3050 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3051 return 0;
3054 struct fw_info {
3055 unsigned int fw_base;
3056 unsigned int fw_len;
3057 const __be32 *fw_data;
3060 /* tp->lock is held. */
3061 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3062 u32 cpu_scratch_base, int cpu_scratch_size,
3063 struct fw_info *info)
3065 int err, lock_err, i;
3066 void (*write_op)(struct tg3 *, u32, u32);
3068 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3069 netdev_err(tp->dev,
3070 "%s: Trying to load TX cpu firmware which is 5705\n",
3071 __func__);
3072 return -EINVAL;
3075 if (tg3_flag(tp, 5705_PLUS))
3076 write_op = tg3_write_mem;
3077 else
3078 write_op = tg3_write_indirect_reg32;
3080 /* It is possible that bootcode is still loading at this point.
3081 * Get the nvram lock first before halting the cpu.
3083 lock_err = tg3_nvram_lock(tp);
3084 err = tg3_halt_cpu(tp, cpu_base);
3085 if (!lock_err)
3086 tg3_nvram_unlock(tp);
3087 if (err)
3088 goto out;
3090 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3091 write_op(tp, cpu_scratch_base + i, 0);
3092 tw32(cpu_base + CPU_STATE, 0xffffffff);
3093 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3094 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3095 write_op(tp, (cpu_scratch_base +
3096 (info->fw_base & 0xffff) +
3097 (i * sizeof(u32))),
3098 be32_to_cpu(info->fw_data[i]));
3100 err = 0;
3102 out:
3103 return err;
3106 /* tp->lock is held. */
3107 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3109 struct fw_info info;
3110 const __be32 *fw_data;
3111 int err, i;
3113 fw_data = (void *)tp->fw->data;
3115 /* Firmware blob starts with version numbers, followed by
3116 start address and length. We are setting complete length.
3117 length = end_address_of_bss - start_address_of_text.
3118 Remainder is the blob to be loaded contiguously
3119 from start address. */
3121 info.fw_base = be32_to_cpu(fw_data[1]);
3122 info.fw_len = tp->fw->size - 12;
3123 info.fw_data = &fw_data[3];
3125 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3126 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3127 &info);
3128 if (err)
3129 return err;
3131 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3132 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3133 &info);
3134 if (err)
3135 return err;
3137 /* Now startup only the RX cpu. */
3138 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3139 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3141 for (i = 0; i < 5; i++) {
3142 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3143 break;
3144 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3145 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3146 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3147 udelay(1000);
3149 if (i >= 5) {
3150 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3151 "should be %08x\n", __func__,
3152 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3153 return -ENODEV;
3155 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3156 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3158 return 0;
3161 /* tp->lock is held. */
3162 static int tg3_load_tso_firmware(struct tg3 *tp)
3164 struct fw_info info;
3165 const __be32 *fw_data;
3166 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3167 int err, i;
3169 if (tg3_flag(tp, HW_TSO_1) ||
3170 tg3_flag(tp, HW_TSO_2) ||
3171 tg3_flag(tp, HW_TSO_3))
3172 return 0;
3174 fw_data = (void *)tp->fw->data;
3176 /* Firmware blob starts with version numbers, followed by
3177 start address and length. We are setting complete length.
3178 length = end_address_of_bss - start_address_of_text.
3179 Remainder is the blob to be loaded contiguously
3180 from start address. */
3182 info.fw_base = be32_to_cpu(fw_data[1]);
3183 cpu_scratch_size = tp->fw_len;
3184 info.fw_len = tp->fw->size - 12;
3185 info.fw_data = &fw_data[3];
3187 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3188 cpu_base = RX_CPU_BASE;
3189 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3190 } else {
3191 cpu_base = TX_CPU_BASE;
3192 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3193 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3196 err = tg3_load_firmware_cpu(tp, cpu_base,
3197 cpu_scratch_base, cpu_scratch_size,
3198 &info);
3199 if (err)
3200 return err;
3202 /* Now startup the cpu. */
3203 tw32(cpu_base + CPU_STATE, 0xffffffff);
3204 tw32_f(cpu_base + CPU_PC, info.fw_base);
3206 for (i = 0; i < 5; i++) {
3207 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3208 break;
3209 tw32(cpu_base + CPU_STATE, 0xffffffff);
3210 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3211 tw32_f(cpu_base + CPU_PC, info.fw_base);
3212 udelay(1000);
3214 if (i >= 5) {
3215 netdev_err(tp->dev,
3216 "%s fails to set CPU PC, is %08x should be %08x\n",
3217 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3218 return -ENODEV;
3220 tw32(cpu_base + CPU_STATE, 0xffffffff);
3221 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3222 return 0;
3226 /* tp->lock is held. */
3227 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3229 u32 addr_high, addr_low;
3230 int i;
3232 addr_high = ((tp->dev->dev_addr[0] << 8) |
3233 tp->dev->dev_addr[1]);
3234 addr_low = ((tp->dev->dev_addr[2] << 24) |
3235 (tp->dev->dev_addr[3] << 16) |
3236 (tp->dev->dev_addr[4] << 8) |
3237 (tp->dev->dev_addr[5] << 0));
3238 for (i = 0; i < 4; i++) {
3239 if (i == 1 && skip_mac_1)
3240 continue;
3241 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3242 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3246 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3247 for (i = 0; i < 12; i++) {
3248 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3249 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3253 addr_high = (tp->dev->dev_addr[0] +
3254 tp->dev->dev_addr[1] +
3255 tp->dev->dev_addr[2] +
3256 tp->dev->dev_addr[3] +
3257 tp->dev->dev_addr[4] +
3258 tp->dev->dev_addr[5]) &
3259 TX_BACKOFF_SEED_MASK;
3260 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3263 static void tg3_enable_register_access(struct tg3 *tp)
3266 * Make sure register accesses (indirect or otherwise) will function
3267 * correctly.
3269 pci_write_config_dword(tp->pdev,
3270 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3273 static int tg3_power_up(struct tg3 *tp)
3275 int err;
3277 tg3_enable_register_access(tp);
3279 err = pci_set_power_state(tp->pdev, PCI_D0);
3280 if (!err) {
3281 /* Switch out of Vaux if it is a NIC */
3282 tg3_pwrsrc_switch_to_vmain(tp);
3283 } else {
3284 netdev_err(tp->dev, "Transition to D0 failed\n");
3287 return err;
3290 static int tg3_power_down_prepare(struct tg3 *tp)
3292 u32 misc_host_ctrl;
3293 bool device_should_wake, do_low_power;
3295 tg3_enable_register_access(tp);
3297 /* Restore the CLKREQ setting. */
3298 if (tg3_flag(tp, CLKREQ_BUG)) {
3299 u16 lnkctl;
3301 pci_read_config_word(tp->pdev,
3302 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3303 &lnkctl);
3304 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3305 pci_write_config_word(tp->pdev,
3306 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3307 lnkctl);
3310 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3311 tw32(TG3PCI_MISC_HOST_CTRL,
3312 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3314 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3315 tg3_flag(tp, WOL_ENABLE);
3317 if (tg3_flag(tp, USE_PHYLIB)) {
3318 do_low_power = false;
3319 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3320 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3321 struct phy_device *phydev;
3322 u32 phyid, advertising;
3324 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3326 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3328 tp->link_config.orig_speed = phydev->speed;
3329 tp->link_config.orig_duplex = phydev->duplex;
3330 tp->link_config.orig_autoneg = phydev->autoneg;
3331 tp->link_config.orig_advertising = phydev->advertising;
3333 advertising = ADVERTISED_TP |
3334 ADVERTISED_Pause |
3335 ADVERTISED_Autoneg |
3336 ADVERTISED_10baseT_Half;
3338 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3339 if (tg3_flag(tp, WOL_SPEED_100MB))
3340 advertising |=
3341 ADVERTISED_100baseT_Half |
3342 ADVERTISED_100baseT_Full |
3343 ADVERTISED_10baseT_Full;
3344 else
3345 advertising |= ADVERTISED_10baseT_Full;
3348 phydev->advertising = advertising;
3350 phy_start_aneg(phydev);
3352 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3353 if (phyid != PHY_ID_BCMAC131) {
3354 phyid &= PHY_BCM_OUI_MASK;
3355 if (phyid == PHY_BCM_OUI_1 ||
3356 phyid == PHY_BCM_OUI_2 ||
3357 phyid == PHY_BCM_OUI_3)
3358 do_low_power = true;
3361 } else {
3362 do_low_power = true;
3364 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3365 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3366 tp->link_config.orig_speed = tp->link_config.speed;
3367 tp->link_config.orig_duplex = tp->link_config.duplex;
3368 tp->link_config.orig_autoneg = tp->link_config.autoneg;
3371 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3372 tp->link_config.speed = SPEED_10;
3373 tp->link_config.duplex = DUPLEX_HALF;
3374 tp->link_config.autoneg = AUTONEG_ENABLE;
3375 tg3_setup_phy(tp, 0);
3379 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3380 u32 val;
3382 val = tr32(GRC_VCPU_EXT_CTRL);
3383 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3384 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3385 int i;
3386 u32 val;
3388 for (i = 0; i < 200; i++) {
3389 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3390 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3391 break;
3392 msleep(1);
3395 if (tg3_flag(tp, WOL_CAP))
3396 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3397 WOL_DRV_STATE_SHUTDOWN |
3398 WOL_DRV_WOL |
3399 WOL_SET_MAGIC_PKT);
3401 if (device_should_wake) {
3402 u32 mac_mode;
3404 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3405 if (do_low_power &&
3406 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3407 tg3_phy_auxctl_write(tp,
3408 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3409 MII_TG3_AUXCTL_PCTL_WOL_EN |
3410 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3411 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3412 udelay(40);
3415 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3416 mac_mode = MAC_MODE_PORT_MODE_GMII;
3417 else
3418 mac_mode = MAC_MODE_PORT_MODE_MII;
3420 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3421 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3422 ASIC_REV_5700) {
3423 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3424 SPEED_100 : SPEED_10;
3425 if (tg3_5700_link_polarity(tp, speed))
3426 mac_mode |= MAC_MODE_LINK_POLARITY;
3427 else
3428 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3430 } else {
3431 mac_mode = MAC_MODE_PORT_MODE_TBI;
3434 if (!tg3_flag(tp, 5750_PLUS))
3435 tw32(MAC_LED_CTRL, tp->led_ctrl);
3437 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3438 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3439 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3440 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3442 if (tg3_flag(tp, ENABLE_APE))
3443 mac_mode |= MAC_MODE_APE_TX_EN |
3444 MAC_MODE_APE_RX_EN |
3445 MAC_MODE_TDE_ENABLE;
3447 tw32_f(MAC_MODE, mac_mode);
3448 udelay(100);
3450 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3451 udelay(10);
3454 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3455 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3457 u32 base_val;
3459 base_val = tp->pci_clock_ctrl;
3460 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3461 CLOCK_CTRL_TXCLK_DISABLE);
3463 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3464 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3465 } else if (tg3_flag(tp, 5780_CLASS) ||
3466 tg3_flag(tp, CPMU_PRESENT) ||
3467 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3468 /* do nothing */
3469 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3470 u32 newbits1, newbits2;
3472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3474 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3475 CLOCK_CTRL_TXCLK_DISABLE |
3476 CLOCK_CTRL_ALTCLK);
3477 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3478 } else if (tg3_flag(tp, 5705_PLUS)) {
3479 newbits1 = CLOCK_CTRL_625_CORE;
3480 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3481 } else {
3482 newbits1 = CLOCK_CTRL_ALTCLK;
3483 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3486 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3487 40);
3489 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3490 40);
3492 if (!tg3_flag(tp, 5705_PLUS)) {
3493 u32 newbits3;
3495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3497 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3498 CLOCK_CTRL_TXCLK_DISABLE |
3499 CLOCK_CTRL_44MHZ_CORE);
3500 } else {
3501 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3504 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3505 tp->pci_clock_ctrl | newbits3, 40);
3509 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3510 tg3_power_down_phy(tp, do_low_power);
3512 tg3_frob_aux_power(tp, true);
3514 /* Workaround for unstable PLL clock */
3515 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3516 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3517 u32 val = tr32(0x7d00);
3519 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3520 tw32(0x7d00, val);
3521 if (!tg3_flag(tp, ENABLE_ASF)) {
3522 int err;
3524 err = tg3_nvram_lock(tp);
3525 tg3_halt_cpu(tp, RX_CPU_BASE);
3526 if (!err)
3527 tg3_nvram_unlock(tp);
3531 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3533 return 0;
3536 static void tg3_power_down(struct tg3 *tp)
3538 tg3_power_down_prepare(tp);
3540 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3541 pci_set_power_state(tp->pdev, PCI_D3hot);
3544 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3546 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3547 case MII_TG3_AUX_STAT_10HALF:
3548 *speed = SPEED_10;
3549 *duplex = DUPLEX_HALF;
3550 break;
3552 case MII_TG3_AUX_STAT_10FULL:
3553 *speed = SPEED_10;
3554 *duplex = DUPLEX_FULL;
3555 break;
3557 case MII_TG3_AUX_STAT_100HALF:
3558 *speed = SPEED_100;
3559 *duplex = DUPLEX_HALF;
3560 break;
3562 case MII_TG3_AUX_STAT_100FULL:
3563 *speed = SPEED_100;
3564 *duplex = DUPLEX_FULL;
3565 break;
3567 case MII_TG3_AUX_STAT_1000HALF:
3568 *speed = SPEED_1000;
3569 *duplex = DUPLEX_HALF;
3570 break;
3572 case MII_TG3_AUX_STAT_1000FULL:
3573 *speed = SPEED_1000;
3574 *duplex = DUPLEX_FULL;
3575 break;
3577 default:
3578 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3579 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3580 SPEED_10;
3581 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3582 DUPLEX_HALF;
3583 break;
3585 *speed = SPEED_INVALID;
3586 *duplex = DUPLEX_INVALID;
3587 break;
3591 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3593 int err = 0;
3594 u32 val, new_adv;
3596 new_adv = ADVERTISE_CSMA;
3597 if (advertise & ADVERTISED_10baseT_Half)
3598 new_adv |= ADVERTISE_10HALF;
3599 if (advertise & ADVERTISED_10baseT_Full)
3600 new_adv |= ADVERTISE_10FULL;
3601 if (advertise & ADVERTISED_100baseT_Half)
3602 new_adv |= ADVERTISE_100HALF;
3603 if (advertise & ADVERTISED_100baseT_Full)
3604 new_adv |= ADVERTISE_100FULL;
3606 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3608 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3609 if (err)
3610 goto done;
3612 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3613 goto done;
3615 new_adv = 0;
3616 if (advertise & ADVERTISED_1000baseT_Half)
3617 new_adv |= ADVERTISE_1000HALF;
3618 if (advertise & ADVERTISED_1000baseT_Full)
3619 new_adv |= ADVERTISE_1000FULL;
3621 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3622 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3623 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3625 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3626 if (err)
3627 goto done;
3629 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3630 goto done;
3632 tw32(TG3_CPMU_EEE_MODE,
3633 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3635 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3636 if (!err) {
3637 u32 err2;
3639 val = 0;
3640 /* Advertise 100-BaseTX EEE ability */
3641 if (advertise & ADVERTISED_100baseT_Full)
3642 val |= MDIO_AN_EEE_ADV_100TX;
3643 /* Advertise 1000-BaseT EEE ability */
3644 if (advertise & ADVERTISED_1000baseT_Full)
3645 val |= MDIO_AN_EEE_ADV_1000T;
3646 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3647 if (err)
3648 val = 0;
3650 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3651 case ASIC_REV_5717:
3652 case ASIC_REV_57765:
3653 case ASIC_REV_5719:
3654 /* If we advertised any eee advertisements above... */
3655 if (val)
3656 val = MII_TG3_DSP_TAP26_ALNOKO |
3657 MII_TG3_DSP_TAP26_RMRXSTO |
3658 MII_TG3_DSP_TAP26_OPCSINPT;
3659 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3660 /* Fall through */
3661 case ASIC_REV_5720:
3662 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3663 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3664 MII_TG3_DSP_CH34TP2_HIBW01);
3667 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3668 if (!err)
3669 err = err2;
3672 done:
3673 return err;
3676 static void tg3_phy_copper_begin(struct tg3 *tp)
3678 u32 new_adv;
3679 int i;
3681 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3682 new_adv = ADVERTISED_10baseT_Half |
3683 ADVERTISED_10baseT_Full;
3684 if (tg3_flag(tp, WOL_SPEED_100MB))
3685 new_adv |= ADVERTISED_100baseT_Half |
3686 ADVERTISED_100baseT_Full;
3688 tg3_phy_autoneg_cfg(tp, new_adv,
3689 FLOW_CTRL_TX | FLOW_CTRL_RX);
3690 } else if (tp->link_config.speed == SPEED_INVALID) {
3691 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3692 tp->link_config.advertising &=
3693 ~(ADVERTISED_1000baseT_Half |
3694 ADVERTISED_1000baseT_Full);
3696 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3697 tp->link_config.flowctrl);
3698 } else {
3699 /* Asking for a specific link mode. */
3700 if (tp->link_config.speed == SPEED_1000) {
3701 if (tp->link_config.duplex == DUPLEX_FULL)
3702 new_adv = ADVERTISED_1000baseT_Full;
3703 else
3704 new_adv = ADVERTISED_1000baseT_Half;
3705 } else if (tp->link_config.speed == SPEED_100) {
3706 if (tp->link_config.duplex == DUPLEX_FULL)
3707 new_adv = ADVERTISED_100baseT_Full;
3708 else
3709 new_adv = ADVERTISED_100baseT_Half;
3710 } else {
3711 if (tp->link_config.duplex == DUPLEX_FULL)
3712 new_adv = ADVERTISED_10baseT_Full;
3713 else
3714 new_adv = ADVERTISED_10baseT_Half;
3717 tg3_phy_autoneg_cfg(tp, new_adv,
3718 tp->link_config.flowctrl);
3721 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3722 tp->link_config.speed != SPEED_INVALID) {
3723 u32 bmcr, orig_bmcr;
3725 tp->link_config.active_speed = tp->link_config.speed;
3726 tp->link_config.active_duplex = tp->link_config.duplex;
3728 bmcr = 0;
3729 switch (tp->link_config.speed) {
3730 default:
3731 case SPEED_10:
3732 break;
3734 case SPEED_100:
3735 bmcr |= BMCR_SPEED100;
3736 break;
3738 case SPEED_1000:
3739 bmcr |= BMCR_SPEED1000;
3740 break;
3743 if (tp->link_config.duplex == DUPLEX_FULL)
3744 bmcr |= BMCR_FULLDPLX;
3746 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3747 (bmcr != orig_bmcr)) {
3748 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3749 for (i = 0; i < 1500; i++) {
3750 u32 tmp;
3752 udelay(10);
3753 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3754 tg3_readphy(tp, MII_BMSR, &tmp))
3755 continue;
3756 if (!(tmp & BMSR_LSTATUS)) {
3757 udelay(40);
3758 break;
3761 tg3_writephy(tp, MII_BMCR, bmcr);
3762 udelay(40);
3764 } else {
3765 tg3_writephy(tp, MII_BMCR,
3766 BMCR_ANENABLE | BMCR_ANRESTART);
3770 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3772 int err;
3774 /* Turn off tap power management. */
3775 /* Set Extended packet length bit */
3776 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3778 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3779 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3780 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3781 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3782 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3784 udelay(40);
3786 return err;
3789 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3791 u32 adv_reg, all_mask = 0;
3793 if (mask & ADVERTISED_10baseT_Half)
3794 all_mask |= ADVERTISE_10HALF;
3795 if (mask & ADVERTISED_10baseT_Full)
3796 all_mask |= ADVERTISE_10FULL;
3797 if (mask & ADVERTISED_100baseT_Half)
3798 all_mask |= ADVERTISE_100HALF;
3799 if (mask & ADVERTISED_100baseT_Full)
3800 all_mask |= ADVERTISE_100FULL;
3802 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3803 return 0;
3805 if ((adv_reg & ADVERTISE_ALL) != all_mask)
3806 return 0;
3808 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3809 u32 tg3_ctrl;
3811 all_mask = 0;
3812 if (mask & ADVERTISED_1000baseT_Half)
3813 all_mask |= ADVERTISE_1000HALF;
3814 if (mask & ADVERTISED_1000baseT_Full)
3815 all_mask |= ADVERTISE_1000FULL;
3817 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3818 return 0;
3820 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3821 if (tg3_ctrl != all_mask)
3822 return 0;
3825 return 1;
3828 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3830 u32 curadv, reqadv;
3832 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3833 return 1;
3835 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3836 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3838 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3839 if (curadv != reqadv)
3840 return 0;
3842 if (tg3_flag(tp, PAUSE_AUTONEG))
3843 tg3_readphy(tp, MII_LPA, rmtadv);
3844 } else {
3845 /* Reprogram the advertisement register, even if it
3846 * does not affect the current link. If the link
3847 * gets renegotiated in the future, we can save an
3848 * additional renegotiation cycle by advertising
3849 * it correctly in the first place.
3851 if (curadv != reqadv) {
3852 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3853 ADVERTISE_PAUSE_ASYM);
3854 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3858 return 1;
3861 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3863 int current_link_up;
3864 u32 bmsr, val;
3865 u32 lcl_adv, rmt_adv;
3866 u16 current_speed;
3867 u8 current_duplex;
3868 int i, err;
3870 tw32(MAC_EVENT, 0);
3872 tw32_f(MAC_STATUS,
3873 (MAC_STATUS_SYNC_CHANGED |
3874 MAC_STATUS_CFG_CHANGED |
3875 MAC_STATUS_MI_COMPLETION |
3876 MAC_STATUS_LNKSTATE_CHANGED));
3877 udelay(40);
3879 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3880 tw32_f(MAC_MI_MODE,
3881 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3882 udelay(80);
3885 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3887 /* Some third-party PHYs need to be reset on link going
3888 * down.
3890 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3893 netif_carrier_ok(tp->dev)) {
3894 tg3_readphy(tp, MII_BMSR, &bmsr);
3895 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3896 !(bmsr & BMSR_LSTATUS))
3897 force_reset = 1;
3899 if (force_reset)
3900 tg3_phy_reset(tp);
3902 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3903 tg3_readphy(tp, MII_BMSR, &bmsr);
3904 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3905 !tg3_flag(tp, INIT_COMPLETE))
3906 bmsr = 0;
3908 if (!(bmsr & BMSR_LSTATUS)) {
3909 err = tg3_init_5401phy_dsp(tp);
3910 if (err)
3911 return err;
3913 tg3_readphy(tp, MII_BMSR, &bmsr);
3914 for (i = 0; i < 1000; i++) {
3915 udelay(10);
3916 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3917 (bmsr & BMSR_LSTATUS)) {
3918 udelay(40);
3919 break;
3923 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3924 TG3_PHY_REV_BCM5401_B0 &&
3925 !(bmsr & BMSR_LSTATUS) &&
3926 tp->link_config.active_speed == SPEED_1000) {
3927 err = tg3_phy_reset(tp);
3928 if (!err)
3929 err = tg3_init_5401phy_dsp(tp);
3930 if (err)
3931 return err;
3934 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3935 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3936 /* 5701 {A0,B0} CRC bug workaround */
3937 tg3_writephy(tp, 0x15, 0x0a75);
3938 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3939 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3940 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3943 /* Clear pending interrupts... */
3944 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3945 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3947 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3948 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3949 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3950 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3953 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3954 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3955 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3956 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3957 else
3958 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3961 current_link_up = 0;
3962 current_speed = SPEED_INVALID;
3963 current_duplex = DUPLEX_INVALID;
3965 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3966 err = tg3_phy_auxctl_read(tp,
3967 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3968 &val);
3969 if (!err && !(val & (1 << 10))) {
3970 tg3_phy_auxctl_write(tp,
3971 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3972 val | (1 << 10));
3973 goto relink;
3977 bmsr = 0;
3978 for (i = 0; i < 100; i++) {
3979 tg3_readphy(tp, MII_BMSR, &bmsr);
3980 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3981 (bmsr & BMSR_LSTATUS))
3982 break;
3983 udelay(40);
3986 if (bmsr & BMSR_LSTATUS) {
3987 u32 aux_stat, bmcr;
3989 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3990 for (i = 0; i < 2000; i++) {
3991 udelay(10);
3992 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3993 aux_stat)
3994 break;
3997 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3998 &current_speed,
3999 &current_duplex);
4001 bmcr = 0;
4002 for (i = 0; i < 200; i++) {
4003 tg3_readphy(tp, MII_BMCR, &bmcr);
4004 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4005 continue;
4006 if (bmcr && bmcr != 0x7fff)
4007 break;
4008 udelay(10);
4011 lcl_adv = 0;
4012 rmt_adv = 0;
4014 tp->link_config.active_speed = current_speed;
4015 tp->link_config.active_duplex = current_duplex;
4017 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4018 if ((bmcr & BMCR_ANENABLE) &&
4019 tg3_copper_is_advertising_all(tp,
4020 tp->link_config.advertising)) {
4021 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4022 &rmt_adv))
4023 current_link_up = 1;
4025 } else {
4026 if (!(bmcr & BMCR_ANENABLE) &&
4027 tp->link_config.speed == current_speed &&
4028 tp->link_config.duplex == current_duplex &&
4029 tp->link_config.flowctrl ==
4030 tp->link_config.active_flowctrl) {
4031 current_link_up = 1;
4035 if (current_link_up == 1 &&
4036 tp->link_config.active_duplex == DUPLEX_FULL)
4037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4040 relink:
4041 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4042 tg3_phy_copper_begin(tp);
4044 tg3_readphy(tp, MII_BMSR, &bmsr);
4045 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4046 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4047 current_link_up = 1;
4050 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4051 if (current_link_up == 1) {
4052 if (tp->link_config.active_speed == SPEED_100 ||
4053 tp->link_config.active_speed == SPEED_10)
4054 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4055 else
4056 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4057 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4058 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4059 else
4060 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4062 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4063 if (tp->link_config.active_duplex == DUPLEX_HALF)
4064 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4067 if (current_link_up == 1 &&
4068 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4069 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4070 else
4071 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4074 /* ??? Without this setting Netgear GA302T PHY does not
4075 * ??? send/receive packets...
4077 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4078 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4079 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4080 tw32_f(MAC_MI_MODE, tp->mi_mode);
4081 udelay(80);
4084 tw32_f(MAC_MODE, tp->mac_mode);
4085 udelay(40);
4087 tg3_phy_eee_adjust(tp, current_link_up);
4089 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4090 /* Polled via timer. */
4091 tw32_f(MAC_EVENT, 0);
4092 } else {
4093 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4095 udelay(40);
4097 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4098 current_link_up == 1 &&
4099 tp->link_config.active_speed == SPEED_1000 &&
4100 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4101 udelay(120);
4102 tw32_f(MAC_STATUS,
4103 (MAC_STATUS_SYNC_CHANGED |
4104 MAC_STATUS_CFG_CHANGED));
4105 udelay(40);
4106 tg3_write_mem(tp,
4107 NIC_SRAM_FIRMWARE_MBOX,
4108 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4111 /* Prevent send BD corruption. */
4112 if (tg3_flag(tp, CLKREQ_BUG)) {
4113 u16 oldlnkctl, newlnkctl;
4115 pci_read_config_word(tp->pdev,
4116 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4117 &oldlnkctl);
4118 if (tp->link_config.active_speed == SPEED_100 ||
4119 tp->link_config.active_speed == SPEED_10)
4120 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4121 else
4122 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4123 if (newlnkctl != oldlnkctl)
4124 pci_write_config_word(tp->pdev,
4125 pci_pcie_cap(tp->pdev) +
4126 PCI_EXP_LNKCTL, newlnkctl);
4129 if (current_link_up != netif_carrier_ok(tp->dev)) {
4130 if (current_link_up)
4131 netif_carrier_on(tp->dev);
4132 else
4133 netif_carrier_off(tp->dev);
4134 tg3_link_report(tp);
4137 return 0;
4140 struct tg3_fiber_aneginfo {
4141 int state;
4142 #define ANEG_STATE_UNKNOWN 0
4143 #define ANEG_STATE_AN_ENABLE 1
4144 #define ANEG_STATE_RESTART_INIT 2
4145 #define ANEG_STATE_RESTART 3
4146 #define ANEG_STATE_DISABLE_LINK_OK 4
4147 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4148 #define ANEG_STATE_ABILITY_DETECT 6
4149 #define ANEG_STATE_ACK_DETECT_INIT 7
4150 #define ANEG_STATE_ACK_DETECT 8
4151 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4152 #define ANEG_STATE_COMPLETE_ACK 10
4153 #define ANEG_STATE_IDLE_DETECT_INIT 11
4154 #define ANEG_STATE_IDLE_DETECT 12
4155 #define ANEG_STATE_LINK_OK 13
4156 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4157 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4159 u32 flags;
4160 #define MR_AN_ENABLE 0x00000001
4161 #define MR_RESTART_AN 0x00000002
4162 #define MR_AN_COMPLETE 0x00000004
4163 #define MR_PAGE_RX 0x00000008
4164 #define MR_NP_LOADED 0x00000010
4165 #define MR_TOGGLE_TX 0x00000020
4166 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4167 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4168 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4169 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4170 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4171 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4172 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4173 #define MR_TOGGLE_RX 0x00002000
4174 #define MR_NP_RX 0x00004000
4176 #define MR_LINK_OK 0x80000000
4178 unsigned long link_time, cur_time;
4180 u32 ability_match_cfg;
4181 int ability_match_count;
4183 char ability_match, idle_match, ack_match;
4185 u32 txconfig, rxconfig;
4186 #define ANEG_CFG_NP 0x00000080
4187 #define ANEG_CFG_ACK 0x00000040
4188 #define ANEG_CFG_RF2 0x00000020
4189 #define ANEG_CFG_RF1 0x00000010
4190 #define ANEG_CFG_PS2 0x00000001
4191 #define ANEG_CFG_PS1 0x00008000
4192 #define ANEG_CFG_HD 0x00004000
4193 #define ANEG_CFG_FD 0x00002000
4194 #define ANEG_CFG_INVAL 0x00001f06
4197 #define ANEG_OK 0
4198 #define ANEG_DONE 1
4199 #define ANEG_TIMER_ENAB 2
4200 #define ANEG_FAILED -1
4202 #define ANEG_STATE_SETTLE_TIME 10000
4204 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4205 struct tg3_fiber_aneginfo *ap)
4207 u16 flowctrl;
4208 unsigned long delta;
4209 u32 rx_cfg_reg;
4210 int ret;
4212 if (ap->state == ANEG_STATE_UNKNOWN) {
4213 ap->rxconfig = 0;
4214 ap->link_time = 0;
4215 ap->cur_time = 0;
4216 ap->ability_match_cfg = 0;
4217 ap->ability_match_count = 0;
4218 ap->ability_match = 0;
4219 ap->idle_match = 0;
4220 ap->ack_match = 0;
4222 ap->cur_time++;
4224 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4225 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4227 if (rx_cfg_reg != ap->ability_match_cfg) {
4228 ap->ability_match_cfg = rx_cfg_reg;
4229 ap->ability_match = 0;
4230 ap->ability_match_count = 0;
4231 } else {
4232 if (++ap->ability_match_count > 1) {
4233 ap->ability_match = 1;
4234 ap->ability_match_cfg = rx_cfg_reg;
4237 if (rx_cfg_reg & ANEG_CFG_ACK)
4238 ap->ack_match = 1;
4239 else
4240 ap->ack_match = 0;
4242 ap->idle_match = 0;
4243 } else {
4244 ap->idle_match = 1;
4245 ap->ability_match_cfg = 0;
4246 ap->ability_match_count = 0;
4247 ap->ability_match = 0;
4248 ap->ack_match = 0;
4250 rx_cfg_reg = 0;
4253 ap->rxconfig = rx_cfg_reg;
4254 ret = ANEG_OK;
4256 switch (ap->state) {
4257 case ANEG_STATE_UNKNOWN:
4258 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4259 ap->state = ANEG_STATE_AN_ENABLE;
4261 /* fallthru */
4262 case ANEG_STATE_AN_ENABLE:
4263 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4264 if (ap->flags & MR_AN_ENABLE) {
4265 ap->link_time = 0;
4266 ap->cur_time = 0;
4267 ap->ability_match_cfg = 0;
4268 ap->ability_match_count = 0;
4269 ap->ability_match = 0;
4270 ap->idle_match = 0;
4271 ap->ack_match = 0;
4273 ap->state = ANEG_STATE_RESTART_INIT;
4274 } else {
4275 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4277 break;
4279 case ANEG_STATE_RESTART_INIT:
4280 ap->link_time = ap->cur_time;
4281 ap->flags &= ~(MR_NP_LOADED);
4282 ap->txconfig = 0;
4283 tw32(MAC_TX_AUTO_NEG, 0);
4284 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4285 tw32_f(MAC_MODE, tp->mac_mode);
4286 udelay(40);
4288 ret = ANEG_TIMER_ENAB;
4289 ap->state = ANEG_STATE_RESTART;
4291 /* fallthru */
4292 case ANEG_STATE_RESTART:
4293 delta = ap->cur_time - ap->link_time;
4294 if (delta > ANEG_STATE_SETTLE_TIME)
4295 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4296 else
4297 ret = ANEG_TIMER_ENAB;
4298 break;
4300 case ANEG_STATE_DISABLE_LINK_OK:
4301 ret = ANEG_DONE;
4302 break;
4304 case ANEG_STATE_ABILITY_DETECT_INIT:
4305 ap->flags &= ~(MR_TOGGLE_TX);
4306 ap->txconfig = ANEG_CFG_FD;
4307 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4308 if (flowctrl & ADVERTISE_1000XPAUSE)
4309 ap->txconfig |= ANEG_CFG_PS1;
4310 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4311 ap->txconfig |= ANEG_CFG_PS2;
4312 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4313 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4314 tw32_f(MAC_MODE, tp->mac_mode);
4315 udelay(40);
4317 ap->state = ANEG_STATE_ABILITY_DETECT;
4318 break;
4320 case ANEG_STATE_ABILITY_DETECT:
4321 if (ap->ability_match != 0 && ap->rxconfig != 0)
4322 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4323 break;
4325 case ANEG_STATE_ACK_DETECT_INIT:
4326 ap->txconfig |= ANEG_CFG_ACK;
4327 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4328 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4329 tw32_f(MAC_MODE, tp->mac_mode);
4330 udelay(40);
4332 ap->state = ANEG_STATE_ACK_DETECT;
4334 /* fallthru */
4335 case ANEG_STATE_ACK_DETECT:
4336 if (ap->ack_match != 0) {
4337 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4338 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4339 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4340 } else {
4341 ap->state = ANEG_STATE_AN_ENABLE;
4343 } else if (ap->ability_match != 0 &&
4344 ap->rxconfig == 0) {
4345 ap->state = ANEG_STATE_AN_ENABLE;
4347 break;
4349 case ANEG_STATE_COMPLETE_ACK_INIT:
4350 if (ap->rxconfig & ANEG_CFG_INVAL) {
4351 ret = ANEG_FAILED;
4352 break;
4354 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4355 MR_LP_ADV_HALF_DUPLEX |
4356 MR_LP_ADV_SYM_PAUSE |
4357 MR_LP_ADV_ASYM_PAUSE |
4358 MR_LP_ADV_REMOTE_FAULT1 |
4359 MR_LP_ADV_REMOTE_FAULT2 |
4360 MR_LP_ADV_NEXT_PAGE |
4361 MR_TOGGLE_RX |
4362 MR_NP_RX);
4363 if (ap->rxconfig & ANEG_CFG_FD)
4364 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4365 if (ap->rxconfig & ANEG_CFG_HD)
4366 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4367 if (ap->rxconfig & ANEG_CFG_PS1)
4368 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4369 if (ap->rxconfig & ANEG_CFG_PS2)
4370 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4371 if (ap->rxconfig & ANEG_CFG_RF1)
4372 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4373 if (ap->rxconfig & ANEG_CFG_RF2)
4374 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4375 if (ap->rxconfig & ANEG_CFG_NP)
4376 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4378 ap->link_time = ap->cur_time;
4380 ap->flags ^= (MR_TOGGLE_TX);
4381 if (ap->rxconfig & 0x0008)
4382 ap->flags |= MR_TOGGLE_RX;
4383 if (ap->rxconfig & ANEG_CFG_NP)
4384 ap->flags |= MR_NP_RX;
4385 ap->flags |= MR_PAGE_RX;
4387 ap->state = ANEG_STATE_COMPLETE_ACK;
4388 ret = ANEG_TIMER_ENAB;
4389 break;
4391 case ANEG_STATE_COMPLETE_ACK:
4392 if (ap->ability_match != 0 &&
4393 ap->rxconfig == 0) {
4394 ap->state = ANEG_STATE_AN_ENABLE;
4395 break;
4397 delta = ap->cur_time - ap->link_time;
4398 if (delta > ANEG_STATE_SETTLE_TIME) {
4399 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4400 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4401 } else {
4402 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4403 !(ap->flags & MR_NP_RX)) {
4404 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4405 } else {
4406 ret = ANEG_FAILED;
4410 break;
4412 case ANEG_STATE_IDLE_DETECT_INIT:
4413 ap->link_time = ap->cur_time;
4414 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4415 tw32_f(MAC_MODE, tp->mac_mode);
4416 udelay(40);
4418 ap->state = ANEG_STATE_IDLE_DETECT;
4419 ret = ANEG_TIMER_ENAB;
4420 break;
4422 case ANEG_STATE_IDLE_DETECT:
4423 if (ap->ability_match != 0 &&
4424 ap->rxconfig == 0) {
4425 ap->state = ANEG_STATE_AN_ENABLE;
4426 break;
4428 delta = ap->cur_time - ap->link_time;
4429 if (delta > ANEG_STATE_SETTLE_TIME) {
4430 /* XXX another gem from the Broadcom driver :( */
4431 ap->state = ANEG_STATE_LINK_OK;
4433 break;
4435 case ANEG_STATE_LINK_OK:
4436 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4437 ret = ANEG_DONE;
4438 break;
4440 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4441 /* ??? unimplemented */
4442 break;
4444 case ANEG_STATE_NEXT_PAGE_WAIT:
4445 /* ??? unimplemented */
4446 break;
4448 default:
4449 ret = ANEG_FAILED;
4450 break;
4453 return ret;
4456 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4458 int res = 0;
4459 struct tg3_fiber_aneginfo aninfo;
4460 int status = ANEG_FAILED;
4461 unsigned int tick;
4462 u32 tmp;
4464 tw32_f(MAC_TX_AUTO_NEG, 0);
4466 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4467 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4468 udelay(40);
4470 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4471 udelay(40);
4473 memset(&aninfo, 0, sizeof(aninfo));
4474 aninfo.flags |= MR_AN_ENABLE;
4475 aninfo.state = ANEG_STATE_UNKNOWN;
4476 aninfo.cur_time = 0;
4477 tick = 0;
4478 while (++tick < 195000) {
4479 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4480 if (status == ANEG_DONE || status == ANEG_FAILED)
4481 break;
4483 udelay(1);
4486 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4487 tw32_f(MAC_MODE, tp->mac_mode);
4488 udelay(40);
4490 *txflags = aninfo.txconfig;
4491 *rxflags = aninfo.flags;
4493 if (status == ANEG_DONE &&
4494 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4495 MR_LP_ADV_FULL_DUPLEX)))
4496 res = 1;
4498 return res;
4501 static void tg3_init_bcm8002(struct tg3 *tp)
4503 u32 mac_status = tr32(MAC_STATUS);
4504 int i;
4506 /* Reset when initting first time or we have a link. */
4507 if (tg3_flag(tp, INIT_COMPLETE) &&
4508 !(mac_status & MAC_STATUS_PCS_SYNCED))
4509 return;
4511 /* Set PLL lock range. */
4512 tg3_writephy(tp, 0x16, 0x8007);
4514 /* SW reset */
4515 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4517 /* Wait for reset to complete. */
4518 /* XXX schedule_timeout() ... */
4519 for (i = 0; i < 500; i++)
4520 udelay(10);
4522 /* Config mode; select PMA/Ch 1 regs. */
4523 tg3_writephy(tp, 0x10, 0x8411);
4525 /* Enable auto-lock and comdet, select txclk for tx. */
4526 tg3_writephy(tp, 0x11, 0x0a10);
4528 tg3_writephy(tp, 0x18, 0x00a0);
4529 tg3_writephy(tp, 0x16, 0x41ff);
4531 /* Assert and deassert POR. */
4532 tg3_writephy(tp, 0x13, 0x0400);
4533 udelay(40);
4534 tg3_writephy(tp, 0x13, 0x0000);
4536 tg3_writephy(tp, 0x11, 0x0a50);
4537 udelay(40);
4538 tg3_writephy(tp, 0x11, 0x0a10);
4540 /* Wait for signal to stabilize */
4541 /* XXX schedule_timeout() ... */
4542 for (i = 0; i < 15000; i++)
4543 udelay(10);
4545 /* Deselect the channel register so we can read the PHYID
4546 * later.
4548 tg3_writephy(tp, 0x10, 0x8011);
4551 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4553 u16 flowctrl;
4554 u32 sg_dig_ctrl, sg_dig_status;
4555 u32 serdes_cfg, expected_sg_dig_ctrl;
4556 int workaround, port_a;
4557 int current_link_up;
4559 serdes_cfg = 0;
4560 expected_sg_dig_ctrl = 0;
4561 workaround = 0;
4562 port_a = 1;
4563 current_link_up = 0;
4565 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4566 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4567 workaround = 1;
4568 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4569 port_a = 0;
4571 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4572 /* preserve bits 20-23 for voltage regulator */
4573 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4576 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4578 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4579 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4580 if (workaround) {
4581 u32 val = serdes_cfg;
4583 if (port_a)
4584 val |= 0xc010000;
4585 else
4586 val |= 0x4010000;
4587 tw32_f(MAC_SERDES_CFG, val);
4590 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4592 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4593 tg3_setup_flow_control(tp, 0, 0);
4594 current_link_up = 1;
4596 goto out;
4599 /* Want auto-negotiation. */
4600 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4602 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4603 if (flowctrl & ADVERTISE_1000XPAUSE)
4604 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4605 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4606 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4608 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4609 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4610 tp->serdes_counter &&
4611 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4612 MAC_STATUS_RCVD_CFG)) ==
4613 MAC_STATUS_PCS_SYNCED)) {
4614 tp->serdes_counter--;
4615 current_link_up = 1;
4616 goto out;
4618 restart_autoneg:
4619 if (workaround)
4620 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4621 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4622 udelay(5);
4623 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4625 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4626 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4627 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4628 MAC_STATUS_SIGNAL_DET)) {
4629 sg_dig_status = tr32(SG_DIG_STATUS);
4630 mac_status = tr32(MAC_STATUS);
4632 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4633 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4634 u32 local_adv = 0, remote_adv = 0;
4636 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4637 local_adv |= ADVERTISE_1000XPAUSE;
4638 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4639 local_adv |= ADVERTISE_1000XPSE_ASYM;
4641 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4642 remote_adv |= LPA_1000XPAUSE;
4643 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4644 remote_adv |= LPA_1000XPAUSE_ASYM;
4646 tg3_setup_flow_control(tp, local_adv, remote_adv);
4647 current_link_up = 1;
4648 tp->serdes_counter = 0;
4649 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4650 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4651 if (tp->serdes_counter)
4652 tp->serdes_counter--;
4653 else {
4654 if (workaround) {
4655 u32 val = serdes_cfg;
4657 if (port_a)
4658 val |= 0xc010000;
4659 else
4660 val |= 0x4010000;
4662 tw32_f(MAC_SERDES_CFG, val);
4665 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4666 udelay(40);
4668 /* Link parallel detection - link is up */
4669 /* only if we have PCS_SYNC and not */
4670 /* receiving config code words */
4671 mac_status = tr32(MAC_STATUS);
4672 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4673 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4674 tg3_setup_flow_control(tp, 0, 0);
4675 current_link_up = 1;
4676 tp->phy_flags |=
4677 TG3_PHYFLG_PARALLEL_DETECT;
4678 tp->serdes_counter =
4679 SERDES_PARALLEL_DET_TIMEOUT;
4680 } else
4681 goto restart_autoneg;
4684 } else {
4685 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4686 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4689 out:
4690 return current_link_up;
4693 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4695 int current_link_up = 0;
4697 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4698 goto out;
4700 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4701 u32 txflags, rxflags;
4702 int i;
4704 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4705 u32 local_adv = 0, remote_adv = 0;
4707 if (txflags & ANEG_CFG_PS1)
4708 local_adv |= ADVERTISE_1000XPAUSE;
4709 if (txflags & ANEG_CFG_PS2)
4710 local_adv |= ADVERTISE_1000XPSE_ASYM;
4712 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4713 remote_adv |= LPA_1000XPAUSE;
4714 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4715 remote_adv |= LPA_1000XPAUSE_ASYM;
4717 tg3_setup_flow_control(tp, local_adv, remote_adv);
4719 current_link_up = 1;
4721 for (i = 0; i < 30; i++) {
4722 udelay(20);
4723 tw32_f(MAC_STATUS,
4724 (MAC_STATUS_SYNC_CHANGED |
4725 MAC_STATUS_CFG_CHANGED));
4726 udelay(40);
4727 if ((tr32(MAC_STATUS) &
4728 (MAC_STATUS_SYNC_CHANGED |
4729 MAC_STATUS_CFG_CHANGED)) == 0)
4730 break;
4733 mac_status = tr32(MAC_STATUS);
4734 if (current_link_up == 0 &&
4735 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4736 !(mac_status & MAC_STATUS_RCVD_CFG))
4737 current_link_up = 1;
4738 } else {
4739 tg3_setup_flow_control(tp, 0, 0);
4741 /* Forcing 1000FD link up. */
4742 current_link_up = 1;
4744 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4745 udelay(40);
4747 tw32_f(MAC_MODE, tp->mac_mode);
4748 udelay(40);
4751 out:
4752 return current_link_up;
4755 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4757 u32 orig_pause_cfg;
4758 u16 orig_active_speed;
4759 u8 orig_active_duplex;
4760 u32 mac_status;
4761 int current_link_up;
4762 int i;
4764 orig_pause_cfg = tp->link_config.active_flowctrl;
4765 orig_active_speed = tp->link_config.active_speed;
4766 orig_active_duplex = tp->link_config.active_duplex;
4768 if (!tg3_flag(tp, HW_AUTONEG) &&
4769 netif_carrier_ok(tp->dev) &&
4770 tg3_flag(tp, INIT_COMPLETE)) {
4771 mac_status = tr32(MAC_STATUS);
4772 mac_status &= (MAC_STATUS_PCS_SYNCED |
4773 MAC_STATUS_SIGNAL_DET |
4774 MAC_STATUS_CFG_CHANGED |
4775 MAC_STATUS_RCVD_CFG);
4776 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4777 MAC_STATUS_SIGNAL_DET)) {
4778 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4779 MAC_STATUS_CFG_CHANGED));
4780 return 0;
4784 tw32_f(MAC_TX_AUTO_NEG, 0);
4786 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4787 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4788 tw32_f(MAC_MODE, tp->mac_mode);
4789 udelay(40);
4791 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4792 tg3_init_bcm8002(tp);
4794 /* Enable link change event even when serdes polling. */
4795 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4796 udelay(40);
4798 current_link_up = 0;
4799 mac_status = tr32(MAC_STATUS);
4801 if (tg3_flag(tp, HW_AUTONEG))
4802 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4803 else
4804 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4806 tp->napi[0].hw_status->status =
4807 (SD_STATUS_UPDATED |
4808 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4810 for (i = 0; i < 100; i++) {
4811 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4812 MAC_STATUS_CFG_CHANGED));
4813 udelay(5);
4814 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4815 MAC_STATUS_CFG_CHANGED |
4816 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4817 break;
4820 mac_status = tr32(MAC_STATUS);
4821 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4822 current_link_up = 0;
4823 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4824 tp->serdes_counter == 0) {
4825 tw32_f(MAC_MODE, (tp->mac_mode |
4826 MAC_MODE_SEND_CONFIGS));
4827 udelay(1);
4828 tw32_f(MAC_MODE, tp->mac_mode);
4832 if (current_link_up == 1) {
4833 tp->link_config.active_speed = SPEED_1000;
4834 tp->link_config.active_duplex = DUPLEX_FULL;
4835 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4836 LED_CTRL_LNKLED_OVERRIDE |
4837 LED_CTRL_1000MBPS_ON));
4838 } else {
4839 tp->link_config.active_speed = SPEED_INVALID;
4840 tp->link_config.active_duplex = DUPLEX_INVALID;
4841 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4842 LED_CTRL_LNKLED_OVERRIDE |
4843 LED_CTRL_TRAFFIC_OVERRIDE));
4846 if (current_link_up != netif_carrier_ok(tp->dev)) {
4847 if (current_link_up)
4848 netif_carrier_on(tp->dev);
4849 else
4850 netif_carrier_off(tp->dev);
4851 tg3_link_report(tp);
4852 } else {
4853 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4854 if (orig_pause_cfg != now_pause_cfg ||
4855 orig_active_speed != tp->link_config.active_speed ||
4856 orig_active_duplex != tp->link_config.active_duplex)
4857 tg3_link_report(tp);
4860 return 0;
4863 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4865 int current_link_up, err = 0;
4866 u32 bmsr, bmcr;
4867 u16 current_speed;
4868 u8 current_duplex;
4869 u32 local_adv, remote_adv;
4871 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4872 tw32_f(MAC_MODE, tp->mac_mode);
4873 udelay(40);
4875 tw32(MAC_EVENT, 0);
4877 tw32_f(MAC_STATUS,
4878 (MAC_STATUS_SYNC_CHANGED |
4879 MAC_STATUS_CFG_CHANGED |
4880 MAC_STATUS_MI_COMPLETION |
4881 MAC_STATUS_LNKSTATE_CHANGED));
4882 udelay(40);
4884 if (force_reset)
4885 tg3_phy_reset(tp);
4887 current_link_up = 0;
4888 current_speed = SPEED_INVALID;
4889 current_duplex = DUPLEX_INVALID;
4891 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4892 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4894 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4895 bmsr |= BMSR_LSTATUS;
4896 else
4897 bmsr &= ~BMSR_LSTATUS;
4900 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4902 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4903 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4904 /* do nothing, just check for link up at the end */
4905 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4906 u32 adv, new_adv;
4908 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4909 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4910 ADVERTISE_1000XPAUSE |
4911 ADVERTISE_1000XPSE_ASYM |
4912 ADVERTISE_SLCT);
4914 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4916 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4917 new_adv |= ADVERTISE_1000XHALF;
4918 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4919 new_adv |= ADVERTISE_1000XFULL;
4921 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4922 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4923 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4924 tg3_writephy(tp, MII_BMCR, bmcr);
4926 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4927 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4928 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4930 return err;
4932 } else {
4933 u32 new_bmcr;
4935 bmcr &= ~BMCR_SPEED1000;
4936 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4938 if (tp->link_config.duplex == DUPLEX_FULL)
4939 new_bmcr |= BMCR_FULLDPLX;
4941 if (new_bmcr != bmcr) {
4942 /* BMCR_SPEED1000 is a reserved bit that needs
4943 * to be set on write.
4945 new_bmcr |= BMCR_SPEED1000;
4947 /* Force a linkdown */
4948 if (netif_carrier_ok(tp->dev)) {
4949 u32 adv;
4951 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4952 adv &= ~(ADVERTISE_1000XFULL |
4953 ADVERTISE_1000XHALF |
4954 ADVERTISE_SLCT);
4955 tg3_writephy(tp, MII_ADVERTISE, adv);
4956 tg3_writephy(tp, MII_BMCR, bmcr |
4957 BMCR_ANRESTART |
4958 BMCR_ANENABLE);
4959 udelay(10);
4960 netif_carrier_off(tp->dev);
4962 tg3_writephy(tp, MII_BMCR, new_bmcr);
4963 bmcr = new_bmcr;
4964 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4965 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4966 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4967 ASIC_REV_5714) {
4968 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4969 bmsr |= BMSR_LSTATUS;
4970 else
4971 bmsr &= ~BMSR_LSTATUS;
4973 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4977 if (bmsr & BMSR_LSTATUS) {
4978 current_speed = SPEED_1000;
4979 current_link_up = 1;
4980 if (bmcr & BMCR_FULLDPLX)
4981 current_duplex = DUPLEX_FULL;
4982 else
4983 current_duplex = DUPLEX_HALF;
4985 local_adv = 0;
4986 remote_adv = 0;
4988 if (bmcr & BMCR_ANENABLE) {
4989 u32 common;
4991 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4992 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4993 common = local_adv & remote_adv;
4994 if (common & (ADVERTISE_1000XHALF |
4995 ADVERTISE_1000XFULL)) {
4996 if (common & ADVERTISE_1000XFULL)
4997 current_duplex = DUPLEX_FULL;
4998 else
4999 current_duplex = DUPLEX_HALF;
5000 } else if (!tg3_flag(tp, 5780_CLASS)) {
5001 /* Link is up via parallel detect */
5002 } else {
5003 current_link_up = 0;
5008 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5009 tg3_setup_flow_control(tp, local_adv, remote_adv);
5011 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5012 if (tp->link_config.active_duplex == DUPLEX_HALF)
5013 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5015 tw32_f(MAC_MODE, tp->mac_mode);
5016 udelay(40);
5018 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5020 tp->link_config.active_speed = current_speed;
5021 tp->link_config.active_duplex = current_duplex;
5023 if (current_link_up != netif_carrier_ok(tp->dev)) {
5024 if (current_link_up)
5025 netif_carrier_on(tp->dev);
5026 else {
5027 netif_carrier_off(tp->dev);
5028 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5030 tg3_link_report(tp);
5032 return err;
5035 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5037 if (tp->serdes_counter) {
5038 /* Give autoneg time to complete. */
5039 tp->serdes_counter--;
5040 return;
5043 if (!netif_carrier_ok(tp->dev) &&
5044 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5045 u32 bmcr;
5047 tg3_readphy(tp, MII_BMCR, &bmcr);
5048 if (bmcr & BMCR_ANENABLE) {
5049 u32 phy1, phy2;
5051 /* Select shadow register 0x1f */
5052 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5053 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5055 /* Select expansion interrupt status register */
5056 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5057 MII_TG3_DSP_EXP1_INT_STAT);
5058 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5059 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5061 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5062 /* We have signal detect and not receiving
5063 * config code words, link is up by parallel
5064 * detection.
5067 bmcr &= ~BMCR_ANENABLE;
5068 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5069 tg3_writephy(tp, MII_BMCR, bmcr);
5070 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5073 } else if (netif_carrier_ok(tp->dev) &&
5074 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5075 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5076 u32 phy2;
5078 /* Select expansion interrupt status register */
5079 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5080 MII_TG3_DSP_EXP1_INT_STAT);
5081 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5082 if (phy2 & 0x20) {
5083 u32 bmcr;
5085 /* Config code words received, turn on autoneg. */
5086 tg3_readphy(tp, MII_BMCR, &bmcr);
5087 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5089 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5095 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5097 u32 val;
5098 int err;
5100 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5101 err = tg3_setup_fiber_phy(tp, force_reset);
5102 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5103 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5104 else
5105 err = tg3_setup_copper_phy(tp, force_reset);
5107 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5108 u32 scale;
5110 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5111 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5112 scale = 65;
5113 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5114 scale = 6;
5115 else
5116 scale = 12;
5118 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5119 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5120 tw32(GRC_MISC_CFG, val);
5123 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5124 (6 << TX_LENGTHS_IPG_SHIFT);
5125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5126 val |= tr32(MAC_TX_LENGTHS) &
5127 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5128 TX_LENGTHS_CNT_DWN_VAL_MSK);
5130 if (tp->link_config.active_speed == SPEED_1000 &&
5131 tp->link_config.active_duplex == DUPLEX_HALF)
5132 tw32(MAC_TX_LENGTHS, val |
5133 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5134 else
5135 tw32(MAC_TX_LENGTHS, val |
5136 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5138 if (!tg3_flag(tp, 5705_PLUS)) {
5139 if (netif_carrier_ok(tp->dev)) {
5140 tw32(HOSTCC_STAT_COAL_TICKS,
5141 tp->coal.stats_block_coalesce_usecs);
5142 } else {
5143 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5147 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5148 val = tr32(PCIE_PWR_MGMT_THRESH);
5149 if (!netif_carrier_ok(tp->dev))
5150 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5151 tp->pwrmgmt_thresh;
5152 else
5153 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5154 tw32(PCIE_PWR_MGMT_THRESH, val);
5157 return err;
5160 static inline int tg3_irq_sync(struct tg3 *tp)
5162 return tp->irq_sync;
5165 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5167 int i;
5169 dst = (u32 *)((u8 *)dst + off);
5170 for (i = 0; i < len; i += sizeof(u32))
5171 *dst++ = tr32(off + i);
5174 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5176 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5177 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5178 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5179 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5180 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5181 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5182 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5183 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5184 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5185 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5186 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5187 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5188 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5189 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5190 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5191 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5192 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5193 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5194 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5196 if (tg3_flag(tp, SUPPORT_MSIX))
5197 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5199 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5200 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5201 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5202 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5203 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5204 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5205 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5206 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5208 if (!tg3_flag(tp, 5705_PLUS)) {
5209 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5210 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5211 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5214 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5215 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5216 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5217 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5218 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5220 if (tg3_flag(tp, NVRAM))
5221 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5224 static void tg3_dump_state(struct tg3 *tp)
5226 int i;
5227 u32 *regs;
5229 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5230 if (!regs) {
5231 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5232 return;
5235 if (tg3_flag(tp, PCI_EXPRESS)) {
5236 /* Read up to but not including private PCI registers */
5237 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5238 regs[i / sizeof(u32)] = tr32(i);
5239 } else
5240 tg3_dump_legacy_regs(tp, regs);
5242 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5243 if (!regs[i + 0] && !regs[i + 1] &&
5244 !regs[i + 2] && !regs[i + 3])
5245 continue;
5247 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5248 i * 4,
5249 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5252 kfree(regs);
5254 for (i = 0; i < tp->irq_cnt; i++) {
5255 struct tg3_napi *tnapi = &tp->napi[i];
5257 /* SW status block */
5258 netdev_err(tp->dev,
5259 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5261 tnapi->hw_status->status,
5262 tnapi->hw_status->status_tag,
5263 tnapi->hw_status->rx_jumbo_consumer,
5264 tnapi->hw_status->rx_consumer,
5265 tnapi->hw_status->rx_mini_consumer,
5266 tnapi->hw_status->idx[0].rx_producer,
5267 tnapi->hw_status->idx[0].tx_consumer);
5269 netdev_err(tp->dev,
5270 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5272 tnapi->last_tag, tnapi->last_irq_tag,
5273 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5274 tnapi->rx_rcb_ptr,
5275 tnapi->prodring.rx_std_prod_idx,
5276 tnapi->prodring.rx_std_cons_idx,
5277 tnapi->prodring.rx_jmb_prod_idx,
5278 tnapi->prodring.rx_jmb_cons_idx);
5282 /* This is called whenever we suspect that the system chipset is re-
5283 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5284 * is bogus tx completions. We try to recover by setting the
5285 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5286 * in the workqueue.
5288 static void tg3_tx_recover(struct tg3 *tp)
5290 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5291 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5293 netdev_warn(tp->dev,
5294 "The system may be re-ordering memory-mapped I/O "
5295 "cycles to the network device, attempting to recover. "
5296 "Please report the problem to the driver maintainer "
5297 "and include system chipset information.\n");
5299 spin_lock(&tp->lock);
5300 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5301 spin_unlock(&tp->lock);
5304 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5306 /* Tell compiler to fetch tx indices from memory. */
5307 barrier();
5308 return tnapi->tx_pending -
5309 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5312 /* Tigon3 never reports partial packet sends. So we do not
5313 * need special logic to handle SKBs that have not had all
5314 * of their frags sent yet, like SunGEM does.
5316 static void tg3_tx(struct tg3_napi *tnapi)
5318 struct tg3 *tp = tnapi->tp;
5319 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5320 u32 sw_idx = tnapi->tx_cons;
5321 struct netdev_queue *txq;
5322 int index = tnapi - tp->napi;
5324 if (tg3_flag(tp, ENABLE_TSS))
5325 index--;
5327 txq = netdev_get_tx_queue(tp->dev, index);
5329 while (sw_idx != hw_idx) {
5330 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5331 struct sk_buff *skb = ri->skb;
5332 int i, tx_bug = 0;
5334 if (unlikely(skb == NULL)) {
5335 tg3_tx_recover(tp);
5336 return;
5339 pci_unmap_single(tp->pdev,
5340 dma_unmap_addr(ri, mapping),
5341 skb_headlen(skb),
5342 PCI_DMA_TODEVICE);
5344 ri->skb = NULL;
5346 while (ri->fragmented) {
5347 ri->fragmented = false;
5348 sw_idx = NEXT_TX(sw_idx);
5349 ri = &tnapi->tx_buffers[sw_idx];
5352 sw_idx = NEXT_TX(sw_idx);
5354 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5355 ri = &tnapi->tx_buffers[sw_idx];
5356 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5357 tx_bug = 1;
5359 pci_unmap_page(tp->pdev,
5360 dma_unmap_addr(ri, mapping),
5361 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5362 PCI_DMA_TODEVICE);
5364 while (ri->fragmented) {
5365 ri->fragmented = false;
5366 sw_idx = NEXT_TX(sw_idx);
5367 ri = &tnapi->tx_buffers[sw_idx];
5370 sw_idx = NEXT_TX(sw_idx);
5373 dev_kfree_skb(skb);
5375 if (unlikely(tx_bug)) {
5376 tg3_tx_recover(tp);
5377 return;
5381 tnapi->tx_cons = sw_idx;
5383 /* Need to make the tx_cons update visible to tg3_start_xmit()
5384 * before checking for netif_queue_stopped(). Without the
5385 * memory barrier, there is a small possibility that tg3_start_xmit()
5386 * will miss it and cause the queue to be stopped forever.
5388 smp_mb();
5390 if (unlikely(netif_tx_queue_stopped(txq) &&
5391 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5392 __netif_tx_lock(txq, smp_processor_id());
5393 if (netif_tx_queue_stopped(txq) &&
5394 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5395 netif_tx_wake_queue(txq);
5396 __netif_tx_unlock(txq);
5400 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5402 if (!ri->skb)
5403 return;
5405 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5406 map_sz, PCI_DMA_FROMDEVICE);
5407 dev_kfree_skb_any(ri->skb);
5408 ri->skb = NULL;
5411 /* Returns size of skb allocated or < 0 on error.
5413 * We only need to fill in the address because the other members
5414 * of the RX descriptor are invariant, see tg3_init_rings.
5416 * Note the purposeful assymetry of cpu vs. chip accesses. For
5417 * posting buffers we only dirty the first cache line of the RX
5418 * descriptor (containing the address). Whereas for the RX status
5419 * buffers the cpu only reads the last cacheline of the RX descriptor
5420 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5422 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5423 u32 opaque_key, u32 dest_idx_unmasked)
5425 struct tg3_rx_buffer_desc *desc;
5426 struct ring_info *map;
5427 struct sk_buff *skb;
5428 dma_addr_t mapping;
5429 int skb_size, dest_idx;
5431 switch (opaque_key) {
5432 case RXD_OPAQUE_RING_STD:
5433 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5434 desc = &tpr->rx_std[dest_idx];
5435 map = &tpr->rx_std_buffers[dest_idx];
5436 skb_size = tp->rx_pkt_map_sz;
5437 break;
5439 case RXD_OPAQUE_RING_JUMBO:
5440 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5441 desc = &tpr->rx_jmb[dest_idx].std;
5442 map = &tpr->rx_jmb_buffers[dest_idx];
5443 skb_size = TG3_RX_JMB_MAP_SZ;
5444 break;
5446 default:
5447 return -EINVAL;
5450 /* Do not overwrite any of the map or rp information
5451 * until we are sure we can commit to a new buffer.
5453 * Callers depend upon this behavior and assume that
5454 * we leave everything unchanged if we fail.
5456 skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5457 if (skb == NULL)
5458 return -ENOMEM;
5460 skb_reserve(skb, TG3_RX_OFFSET(tp));
5462 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5463 PCI_DMA_FROMDEVICE);
5464 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5465 dev_kfree_skb(skb);
5466 return -EIO;
5469 map->skb = skb;
5470 dma_unmap_addr_set(map, mapping, mapping);
5472 desc->addr_hi = ((u64)mapping >> 32);
5473 desc->addr_lo = ((u64)mapping & 0xffffffff);
5475 return skb_size;
5478 /* We only need to move over in the address because the other
5479 * members of the RX descriptor are invariant. See notes above
5480 * tg3_alloc_rx_skb for full details.
5482 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5483 struct tg3_rx_prodring_set *dpr,
5484 u32 opaque_key, int src_idx,
5485 u32 dest_idx_unmasked)
5487 struct tg3 *tp = tnapi->tp;
5488 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5489 struct ring_info *src_map, *dest_map;
5490 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5491 int dest_idx;
5493 switch (opaque_key) {
5494 case RXD_OPAQUE_RING_STD:
5495 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5496 dest_desc = &dpr->rx_std[dest_idx];
5497 dest_map = &dpr->rx_std_buffers[dest_idx];
5498 src_desc = &spr->rx_std[src_idx];
5499 src_map = &spr->rx_std_buffers[src_idx];
5500 break;
5502 case RXD_OPAQUE_RING_JUMBO:
5503 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5504 dest_desc = &dpr->rx_jmb[dest_idx].std;
5505 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5506 src_desc = &spr->rx_jmb[src_idx].std;
5507 src_map = &spr->rx_jmb_buffers[src_idx];
5508 break;
5510 default:
5511 return;
5514 dest_map->skb = src_map->skb;
5515 dma_unmap_addr_set(dest_map, mapping,
5516 dma_unmap_addr(src_map, mapping));
5517 dest_desc->addr_hi = src_desc->addr_hi;
5518 dest_desc->addr_lo = src_desc->addr_lo;
5520 /* Ensure that the update to the skb happens after the physical
5521 * addresses have been transferred to the new BD location.
5523 smp_wmb();
5525 src_map->skb = NULL;
5528 /* The RX ring scheme is composed of multiple rings which post fresh
5529 * buffers to the chip, and one special ring the chip uses to report
5530 * status back to the host.
5532 * The special ring reports the status of received packets to the
5533 * host. The chip does not write into the original descriptor the
5534 * RX buffer was obtained from. The chip simply takes the original
5535 * descriptor as provided by the host, updates the status and length
5536 * field, then writes this into the next status ring entry.
5538 * Each ring the host uses to post buffers to the chip is described
5539 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5540 * it is first placed into the on-chip ram. When the packet's length
5541 * is known, it walks down the TG3_BDINFO entries to select the ring.
5542 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5543 * which is within the range of the new packet's length is chosen.
5545 * The "separate ring for rx status" scheme may sound queer, but it makes
5546 * sense from a cache coherency perspective. If only the host writes
5547 * to the buffer post rings, and only the chip writes to the rx status
5548 * rings, then cache lines never move beyond shared-modified state.
5549 * If both the host and chip were to write into the same ring, cache line
5550 * eviction could occur since both entities want it in an exclusive state.
5552 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5554 struct tg3 *tp = tnapi->tp;
5555 u32 work_mask, rx_std_posted = 0;
5556 u32 std_prod_idx, jmb_prod_idx;
5557 u32 sw_idx = tnapi->rx_rcb_ptr;
5558 u16 hw_idx;
5559 int received;
5560 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5562 hw_idx = *(tnapi->rx_rcb_prod_idx);
5564 * We need to order the read of hw_idx and the read of
5565 * the opaque cookie.
5567 rmb();
5568 work_mask = 0;
5569 received = 0;
5570 std_prod_idx = tpr->rx_std_prod_idx;
5571 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5572 while (sw_idx != hw_idx && budget > 0) {
5573 struct ring_info *ri;
5574 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5575 unsigned int len;
5576 struct sk_buff *skb;
5577 dma_addr_t dma_addr;
5578 u32 opaque_key, desc_idx, *post_ptr;
5580 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5581 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5582 if (opaque_key == RXD_OPAQUE_RING_STD) {
5583 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5584 dma_addr = dma_unmap_addr(ri, mapping);
5585 skb = ri->skb;
5586 post_ptr = &std_prod_idx;
5587 rx_std_posted++;
5588 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5589 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5590 dma_addr = dma_unmap_addr(ri, mapping);
5591 skb = ri->skb;
5592 post_ptr = &jmb_prod_idx;
5593 } else
5594 goto next_pkt_nopost;
5596 work_mask |= opaque_key;
5598 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5599 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5600 drop_it:
5601 tg3_recycle_rx(tnapi, tpr, opaque_key,
5602 desc_idx, *post_ptr);
5603 drop_it_no_recycle:
5604 /* Other statistics kept track of by card. */
5605 tp->rx_dropped++;
5606 goto next_pkt;
5609 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5610 ETH_FCS_LEN;
5612 if (len > TG3_RX_COPY_THRESH(tp)) {
5613 int skb_size;
5615 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5616 *post_ptr);
5617 if (skb_size < 0)
5618 goto drop_it;
5620 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5621 PCI_DMA_FROMDEVICE);
5623 /* Ensure that the update to the skb happens
5624 * after the usage of the old DMA mapping.
5626 smp_wmb();
5628 ri->skb = NULL;
5630 skb_put(skb, len);
5631 } else {
5632 struct sk_buff *copy_skb;
5634 tg3_recycle_rx(tnapi, tpr, opaque_key,
5635 desc_idx, *post_ptr);
5637 copy_skb = netdev_alloc_skb(tp->dev, len +
5638 TG3_RAW_IP_ALIGN);
5639 if (copy_skb == NULL)
5640 goto drop_it_no_recycle;
5642 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5643 skb_put(copy_skb, len);
5644 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5645 skb_copy_from_linear_data(skb, copy_skb->data, len);
5646 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5648 /* We'll reuse the original ring buffer. */
5649 skb = copy_skb;
5652 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5653 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5654 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5655 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5656 skb->ip_summed = CHECKSUM_UNNECESSARY;
5657 else
5658 skb_checksum_none_assert(skb);
5660 skb->protocol = eth_type_trans(skb, tp->dev);
5662 if (len > (tp->dev->mtu + ETH_HLEN) &&
5663 skb->protocol != htons(ETH_P_8021Q)) {
5664 dev_kfree_skb(skb);
5665 goto drop_it_no_recycle;
5668 if (desc->type_flags & RXD_FLAG_VLAN &&
5669 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5670 __vlan_hwaccel_put_tag(skb,
5671 desc->err_vlan & RXD_VLAN_MASK);
5673 napi_gro_receive(&tnapi->napi, skb);
5675 received++;
5676 budget--;
5678 next_pkt:
5679 (*post_ptr)++;
5681 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5682 tpr->rx_std_prod_idx = std_prod_idx &
5683 tp->rx_std_ring_mask;
5684 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5685 tpr->rx_std_prod_idx);
5686 work_mask &= ~RXD_OPAQUE_RING_STD;
5687 rx_std_posted = 0;
5689 next_pkt_nopost:
5690 sw_idx++;
5691 sw_idx &= tp->rx_ret_ring_mask;
5693 /* Refresh hw_idx to see if there is new work */
5694 if (sw_idx == hw_idx) {
5695 hw_idx = *(tnapi->rx_rcb_prod_idx);
5696 rmb();
5700 /* ACK the status ring. */
5701 tnapi->rx_rcb_ptr = sw_idx;
5702 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5704 /* Refill RX ring(s). */
5705 if (!tg3_flag(tp, ENABLE_RSS)) {
5706 if (work_mask & RXD_OPAQUE_RING_STD) {
5707 tpr->rx_std_prod_idx = std_prod_idx &
5708 tp->rx_std_ring_mask;
5709 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5710 tpr->rx_std_prod_idx);
5712 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5713 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5714 tp->rx_jmb_ring_mask;
5715 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5716 tpr->rx_jmb_prod_idx);
5718 mmiowb();
5719 } else if (work_mask) {
5720 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5721 * updated before the producer indices can be updated.
5723 smp_wmb();
5725 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5726 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5728 if (tnapi != &tp->napi[1])
5729 napi_schedule(&tp->napi[1].napi);
5732 return received;
5735 static void tg3_poll_link(struct tg3 *tp)
5737 /* handle link change and other phy events */
5738 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5739 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5741 if (sblk->status & SD_STATUS_LINK_CHG) {
5742 sblk->status = SD_STATUS_UPDATED |
5743 (sblk->status & ~SD_STATUS_LINK_CHG);
5744 spin_lock(&tp->lock);
5745 if (tg3_flag(tp, USE_PHYLIB)) {
5746 tw32_f(MAC_STATUS,
5747 (MAC_STATUS_SYNC_CHANGED |
5748 MAC_STATUS_CFG_CHANGED |
5749 MAC_STATUS_MI_COMPLETION |
5750 MAC_STATUS_LNKSTATE_CHANGED));
5751 udelay(40);
5752 } else
5753 tg3_setup_phy(tp, 0);
5754 spin_unlock(&tp->lock);
5759 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5760 struct tg3_rx_prodring_set *dpr,
5761 struct tg3_rx_prodring_set *spr)
5763 u32 si, di, cpycnt, src_prod_idx;
5764 int i, err = 0;
5766 while (1) {
5767 src_prod_idx = spr->rx_std_prod_idx;
5769 /* Make sure updates to the rx_std_buffers[] entries and the
5770 * standard producer index are seen in the correct order.
5772 smp_rmb();
5774 if (spr->rx_std_cons_idx == src_prod_idx)
5775 break;
5777 if (spr->rx_std_cons_idx < src_prod_idx)
5778 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5779 else
5780 cpycnt = tp->rx_std_ring_mask + 1 -
5781 spr->rx_std_cons_idx;
5783 cpycnt = min(cpycnt,
5784 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5786 si = spr->rx_std_cons_idx;
5787 di = dpr->rx_std_prod_idx;
5789 for (i = di; i < di + cpycnt; i++) {
5790 if (dpr->rx_std_buffers[i].skb) {
5791 cpycnt = i - di;
5792 err = -ENOSPC;
5793 break;
5797 if (!cpycnt)
5798 break;
5800 /* Ensure that updates to the rx_std_buffers ring and the
5801 * shadowed hardware producer ring from tg3_recycle_skb() are
5802 * ordered correctly WRT the skb check above.
5804 smp_rmb();
5806 memcpy(&dpr->rx_std_buffers[di],
5807 &spr->rx_std_buffers[si],
5808 cpycnt * sizeof(struct ring_info));
5810 for (i = 0; i < cpycnt; i++, di++, si++) {
5811 struct tg3_rx_buffer_desc *sbd, *dbd;
5812 sbd = &spr->rx_std[si];
5813 dbd = &dpr->rx_std[di];
5814 dbd->addr_hi = sbd->addr_hi;
5815 dbd->addr_lo = sbd->addr_lo;
5818 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5819 tp->rx_std_ring_mask;
5820 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5821 tp->rx_std_ring_mask;
5824 while (1) {
5825 src_prod_idx = spr->rx_jmb_prod_idx;
5827 /* Make sure updates to the rx_jmb_buffers[] entries and
5828 * the jumbo producer index are seen in the correct order.
5830 smp_rmb();
5832 if (spr->rx_jmb_cons_idx == src_prod_idx)
5833 break;
5835 if (spr->rx_jmb_cons_idx < src_prod_idx)
5836 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5837 else
5838 cpycnt = tp->rx_jmb_ring_mask + 1 -
5839 spr->rx_jmb_cons_idx;
5841 cpycnt = min(cpycnt,
5842 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5844 si = spr->rx_jmb_cons_idx;
5845 di = dpr->rx_jmb_prod_idx;
5847 for (i = di; i < di + cpycnt; i++) {
5848 if (dpr->rx_jmb_buffers[i].skb) {
5849 cpycnt = i - di;
5850 err = -ENOSPC;
5851 break;
5855 if (!cpycnt)
5856 break;
5858 /* Ensure that updates to the rx_jmb_buffers ring and the
5859 * shadowed hardware producer ring from tg3_recycle_skb() are
5860 * ordered correctly WRT the skb check above.
5862 smp_rmb();
5864 memcpy(&dpr->rx_jmb_buffers[di],
5865 &spr->rx_jmb_buffers[si],
5866 cpycnt * sizeof(struct ring_info));
5868 for (i = 0; i < cpycnt; i++, di++, si++) {
5869 struct tg3_rx_buffer_desc *sbd, *dbd;
5870 sbd = &spr->rx_jmb[si].std;
5871 dbd = &dpr->rx_jmb[di].std;
5872 dbd->addr_hi = sbd->addr_hi;
5873 dbd->addr_lo = sbd->addr_lo;
5876 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5877 tp->rx_jmb_ring_mask;
5878 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5879 tp->rx_jmb_ring_mask;
5882 return err;
5885 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5887 struct tg3 *tp = tnapi->tp;
5889 /* run TX completion thread */
5890 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5891 tg3_tx(tnapi);
5892 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5893 return work_done;
5896 /* run RX thread, within the bounds set by NAPI.
5897 * All RX "locking" is done by ensuring outside
5898 * code synchronizes with tg3->napi.poll()
5900 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5901 work_done += tg3_rx(tnapi, budget - work_done);
5903 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5904 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5905 int i, err = 0;
5906 u32 std_prod_idx = dpr->rx_std_prod_idx;
5907 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5909 for (i = 1; i < tp->irq_cnt; i++)
5910 err |= tg3_rx_prodring_xfer(tp, dpr,
5911 &tp->napi[i].prodring);
5913 wmb();
5915 if (std_prod_idx != dpr->rx_std_prod_idx)
5916 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5917 dpr->rx_std_prod_idx);
5919 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5920 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5921 dpr->rx_jmb_prod_idx);
5923 mmiowb();
5925 if (err)
5926 tw32_f(HOSTCC_MODE, tp->coal_now);
5929 return work_done;
5932 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5934 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5935 schedule_work(&tp->reset_task);
5938 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5940 cancel_work_sync(&tp->reset_task);
5941 tg3_flag_clear(tp, RESET_TASK_PENDING);
5944 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5946 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5947 struct tg3 *tp = tnapi->tp;
5948 int work_done = 0;
5949 struct tg3_hw_status *sblk = tnapi->hw_status;
5951 while (1) {
5952 work_done = tg3_poll_work(tnapi, work_done, budget);
5954 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5955 goto tx_recovery;
5957 if (unlikely(work_done >= budget))
5958 break;
5960 /* tp->last_tag is used in tg3_int_reenable() below
5961 * to tell the hw how much work has been processed,
5962 * so we must read it before checking for more work.
5964 tnapi->last_tag = sblk->status_tag;
5965 tnapi->last_irq_tag = tnapi->last_tag;
5966 rmb();
5968 /* check for RX/TX work to do */
5969 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5970 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5971 napi_complete(napi);
5972 /* Reenable interrupts. */
5973 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5974 mmiowb();
5975 break;
5979 return work_done;
5981 tx_recovery:
5982 /* work_done is guaranteed to be less than budget. */
5983 napi_complete(napi);
5984 tg3_reset_task_schedule(tp);
5985 return work_done;
5988 static void tg3_process_error(struct tg3 *tp)
5990 u32 val;
5991 bool real_error = false;
5993 if (tg3_flag(tp, ERROR_PROCESSED))
5994 return;
5996 /* Check Flow Attention register */
5997 val = tr32(HOSTCC_FLOW_ATTN);
5998 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5999 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6000 real_error = true;
6003 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6004 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6005 real_error = true;
6008 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6009 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6010 real_error = true;
6013 if (!real_error)
6014 return;
6016 tg3_dump_state(tp);
6018 tg3_flag_set(tp, ERROR_PROCESSED);
6019 tg3_reset_task_schedule(tp);
6022 static int tg3_poll(struct napi_struct *napi, int budget)
6024 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6025 struct tg3 *tp = tnapi->tp;
6026 int work_done = 0;
6027 struct tg3_hw_status *sblk = tnapi->hw_status;
6029 while (1) {
6030 if (sblk->status & SD_STATUS_ERROR)
6031 tg3_process_error(tp);
6033 tg3_poll_link(tp);
6035 work_done = tg3_poll_work(tnapi, work_done, budget);
6037 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6038 goto tx_recovery;
6040 if (unlikely(work_done >= budget))
6041 break;
6043 if (tg3_flag(tp, TAGGED_STATUS)) {
6044 /* tp->last_tag is used in tg3_int_reenable() below
6045 * to tell the hw how much work has been processed,
6046 * so we must read it before checking for more work.
6048 tnapi->last_tag = sblk->status_tag;
6049 tnapi->last_irq_tag = tnapi->last_tag;
6050 rmb();
6051 } else
6052 sblk->status &= ~SD_STATUS_UPDATED;
6054 if (likely(!tg3_has_work(tnapi))) {
6055 napi_complete(napi);
6056 tg3_int_reenable(tnapi);
6057 break;
6061 return work_done;
6063 tx_recovery:
6064 /* work_done is guaranteed to be less than budget. */
6065 napi_complete(napi);
6066 tg3_reset_task_schedule(tp);
6067 return work_done;
6070 static void tg3_napi_disable(struct tg3 *tp)
6072 int i;
6074 for (i = tp->irq_cnt - 1; i >= 0; i--)
6075 napi_disable(&tp->napi[i].napi);
6078 static void tg3_napi_enable(struct tg3 *tp)
6080 int i;
6082 for (i = 0; i < tp->irq_cnt; i++)
6083 napi_enable(&tp->napi[i].napi);
6086 static void tg3_napi_init(struct tg3 *tp)
6088 int i;
6090 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6091 for (i = 1; i < tp->irq_cnt; i++)
6092 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6095 static void tg3_napi_fini(struct tg3 *tp)
6097 int i;
6099 for (i = 0; i < tp->irq_cnt; i++)
6100 netif_napi_del(&tp->napi[i].napi);
6103 static inline void tg3_netif_stop(struct tg3 *tp)
6105 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6106 tg3_napi_disable(tp);
6107 netif_tx_disable(tp->dev);
6110 static inline void tg3_netif_start(struct tg3 *tp)
6112 /* NOTE: unconditional netif_tx_wake_all_queues is only
6113 * appropriate so long as all callers are assured to
6114 * have free tx slots (such as after tg3_init_hw)
6116 netif_tx_wake_all_queues(tp->dev);
6118 tg3_napi_enable(tp);
6119 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6120 tg3_enable_ints(tp);
6123 static void tg3_irq_quiesce(struct tg3 *tp)
6125 int i;
6127 BUG_ON(tp->irq_sync);
6129 tp->irq_sync = 1;
6130 smp_mb();
6132 for (i = 0; i < tp->irq_cnt; i++)
6133 synchronize_irq(tp->napi[i].irq_vec);
6136 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6137 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6138 * with as well. Most of the time, this is not necessary except when
6139 * shutting down the device.
6141 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6143 spin_lock_bh(&tp->lock);
6144 if (irq_sync)
6145 tg3_irq_quiesce(tp);
6148 static inline void tg3_full_unlock(struct tg3 *tp)
6150 spin_unlock_bh(&tp->lock);
6153 /* One-shot MSI handler - Chip automatically disables interrupt
6154 * after sending MSI so driver doesn't have to do it.
6156 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6158 struct tg3_napi *tnapi = dev_id;
6159 struct tg3 *tp = tnapi->tp;
6161 prefetch(tnapi->hw_status);
6162 if (tnapi->rx_rcb)
6163 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6165 if (likely(!tg3_irq_sync(tp)))
6166 napi_schedule(&tnapi->napi);
6168 return IRQ_HANDLED;
6171 /* MSI ISR - No need to check for interrupt sharing and no need to
6172 * flush status block and interrupt mailbox. PCI ordering rules
6173 * guarantee that MSI will arrive after the status block.
6175 static irqreturn_t tg3_msi(int irq, void *dev_id)
6177 struct tg3_napi *tnapi = dev_id;
6178 struct tg3 *tp = tnapi->tp;
6180 prefetch(tnapi->hw_status);
6181 if (tnapi->rx_rcb)
6182 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6184 * Writing any value to intr-mbox-0 clears PCI INTA# and
6185 * chip-internal interrupt pending events.
6186 * Writing non-zero to intr-mbox-0 additional tells the
6187 * NIC to stop sending us irqs, engaging "in-intr-handler"
6188 * event coalescing.
6190 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6191 if (likely(!tg3_irq_sync(tp)))
6192 napi_schedule(&tnapi->napi);
6194 return IRQ_RETVAL(1);
6197 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6199 struct tg3_napi *tnapi = dev_id;
6200 struct tg3 *tp = tnapi->tp;
6201 struct tg3_hw_status *sblk = tnapi->hw_status;
6202 unsigned int handled = 1;
6204 /* In INTx mode, it is possible for the interrupt to arrive at
6205 * the CPU before the status block posted prior to the interrupt.
6206 * Reading the PCI State register will confirm whether the
6207 * interrupt is ours and will flush the status block.
6209 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6210 if (tg3_flag(tp, CHIP_RESETTING) ||
6211 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6212 handled = 0;
6213 goto out;
6218 * Writing any value to intr-mbox-0 clears PCI INTA# and
6219 * chip-internal interrupt pending events.
6220 * Writing non-zero to intr-mbox-0 additional tells the
6221 * NIC to stop sending us irqs, engaging "in-intr-handler"
6222 * event coalescing.
6224 * Flush the mailbox to de-assert the IRQ immediately to prevent
6225 * spurious interrupts. The flush impacts performance but
6226 * excessive spurious interrupts can be worse in some cases.
6228 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6229 if (tg3_irq_sync(tp))
6230 goto out;
6231 sblk->status &= ~SD_STATUS_UPDATED;
6232 if (likely(tg3_has_work(tnapi))) {
6233 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6234 napi_schedule(&tnapi->napi);
6235 } else {
6236 /* No work, shared interrupt perhaps? re-enable
6237 * interrupts, and flush that PCI write
6239 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6240 0x00000000);
6242 out:
6243 return IRQ_RETVAL(handled);
6246 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6248 struct tg3_napi *tnapi = dev_id;
6249 struct tg3 *tp = tnapi->tp;
6250 struct tg3_hw_status *sblk = tnapi->hw_status;
6251 unsigned int handled = 1;
6253 /* In INTx mode, it is possible for the interrupt to arrive at
6254 * the CPU before the status block posted prior to the interrupt.
6255 * Reading the PCI State register will confirm whether the
6256 * interrupt is ours and will flush the status block.
6258 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6259 if (tg3_flag(tp, CHIP_RESETTING) ||
6260 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6261 handled = 0;
6262 goto out;
6267 * writing any value to intr-mbox-0 clears PCI INTA# and
6268 * chip-internal interrupt pending events.
6269 * writing non-zero to intr-mbox-0 additional tells the
6270 * NIC to stop sending us irqs, engaging "in-intr-handler"
6271 * event coalescing.
6273 * Flush the mailbox to de-assert the IRQ immediately to prevent
6274 * spurious interrupts. The flush impacts performance but
6275 * excessive spurious interrupts can be worse in some cases.
6277 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6280 * In a shared interrupt configuration, sometimes other devices'
6281 * interrupts will scream. We record the current status tag here
6282 * so that the above check can report that the screaming interrupts
6283 * are unhandled. Eventually they will be silenced.
6285 tnapi->last_irq_tag = sblk->status_tag;
6287 if (tg3_irq_sync(tp))
6288 goto out;
6290 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6292 napi_schedule(&tnapi->napi);
6294 out:
6295 return IRQ_RETVAL(handled);
6298 /* ISR for interrupt test */
6299 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6301 struct tg3_napi *tnapi = dev_id;
6302 struct tg3 *tp = tnapi->tp;
6303 struct tg3_hw_status *sblk = tnapi->hw_status;
6305 if ((sblk->status & SD_STATUS_UPDATED) ||
6306 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6307 tg3_disable_ints(tp);
6308 return IRQ_RETVAL(1);
6310 return IRQ_RETVAL(0);
6313 static int tg3_init_hw(struct tg3 *, int);
6314 static int tg3_halt(struct tg3 *, int, int);
6316 /* Restart hardware after configuration changes, self-test, etc.
6317 * Invoked with tp->lock held.
6319 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6320 __releases(tp->lock)
6321 __acquires(tp->lock)
6323 int err;
6325 err = tg3_init_hw(tp, reset_phy);
6326 if (err) {
6327 netdev_err(tp->dev,
6328 "Failed to re-initialize device, aborting\n");
6329 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6330 tg3_full_unlock(tp);
6331 del_timer_sync(&tp->timer);
6332 tp->irq_sync = 0;
6333 tg3_napi_enable(tp);
6334 dev_close(tp->dev);
6335 tg3_full_lock(tp, 0);
6337 return err;
6340 #ifdef CONFIG_NET_POLL_CONTROLLER
6341 static void tg3_poll_controller(struct net_device *dev)
6343 int i;
6344 struct tg3 *tp = netdev_priv(dev);
6346 for (i = 0; i < tp->irq_cnt; i++)
6347 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6349 #endif
6351 static void tg3_reset_task(struct work_struct *work)
6353 struct tg3 *tp = container_of(work, struct tg3, reset_task);
6354 int err;
6356 tg3_full_lock(tp, 0);
6358 if (!netif_running(tp->dev)) {
6359 tg3_flag_clear(tp, RESET_TASK_PENDING);
6360 tg3_full_unlock(tp);
6361 return;
6364 tg3_full_unlock(tp);
6366 tg3_phy_stop(tp);
6368 tg3_netif_stop(tp);
6370 tg3_full_lock(tp, 1);
6372 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6373 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6374 tp->write32_rx_mbox = tg3_write_flush_reg32;
6375 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6376 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6379 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6380 err = tg3_init_hw(tp, 1);
6381 if (err)
6382 goto out;
6384 tg3_netif_start(tp);
6386 out:
6387 tg3_full_unlock(tp);
6389 if (!err)
6390 tg3_phy_start(tp);
6392 tg3_flag_clear(tp, RESET_TASK_PENDING);
6395 static void tg3_tx_timeout(struct net_device *dev)
6397 struct tg3 *tp = netdev_priv(dev);
6399 if (netif_msg_tx_err(tp)) {
6400 netdev_err(dev, "transmit timed out, resetting\n");
6401 tg3_dump_state(tp);
6404 tg3_reset_task_schedule(tp);
6407 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6408 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6410 u32 base = (u32) mapping & 0xffffffff;
6412 return (base > 0xffffdcc0) && (base + len + 8 < base);
6415 /* Test for DMA addresses > 40-bit */
6416 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6417 int len)
6419 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6420 if (tg3_flag(tp, 40BIT_DMA_BUG))
6421 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6422 return 0;
6423 #else
6424 return 0;
6425 #endif
6428 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6429 dma_addr_t mapping, u32 len, u32 flags,
6430 u32 mss, u32 vlan)
6432 txbd->addr_hi = ((u64) mapping >> 32);
6433 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6434 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6435 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6438 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6439 dma_addr_t map, u32 len, u32 flags,
6440 u32 mss, u32 vlan)
6442 struct tg3 *tp = tnapi->tp;
6443 bool hwbug = false;
6445 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6446 hwbug = 1;
6448 if (tg3_4g_overflow_test(map, len))
6449 hwbug = 1;
6451 if (tg3_40bit_overflow_test(tp, map, len))
6452 hwbug = 1;
6454 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6455 u32 prvidx = *entry;
6456 u32 tmp_flag = flags & ~TXD_FLAG_END;
6457 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6458 u32 frag_len = TG3_TX_BD_DMA_MAX;
6459 len -= TG3_TX_BD_DMA_MAX;
6461 /* Avoid the 8byte DMA problem */
6462 if (len <= 8) {
6463 len += TG3_TX_BD_DMA_MAX / 2;
6464 frag_len = TG3_TX_BD_DMA_MAX / 2;
6467 tnapi->tx_buffers[*entry].fragmented = true;
6469 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6470 frag_len, tmp_flag, mss, vlan);
6471 *budget -= 1;
6472 prvidx = *entry;
6473 *entry = NEXT_TX(*entry);
6475 map += frag_len;
6478 if (len) {
6479 if (*budget) {
6480 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6481 len, flags, mss, vlan);
6482 *budget -= 1;
6483 *entry = NEXT_TX(*entry);
6484 } else {
6485 hwbug = 1;
6486 tnapi->tx_buffers[prvidx].fragmented = false;
6489 } else {
6490 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6491 len, flags, mss, vlan);
6492 *entry = NEXT_TX(*entry);
6495 return hwbug;
6498 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6500 int i;
6501 struct sk_buff *skb;
6502 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6504 skb = txb->skb;
6505 txb->skb = NULL;
6507 pci_unmap_single(tnapi->tp->pdev,
6508 dma_unmap_addr(txb, mapping),
6509 skb_headlen(skb),
6510 PCI_DMA_TODEVICE);
6512 while (txb->fragmented) {
6513 txb->fragmented = false;
6514 entry = NEXT_TX(entry);
6515 txb = &tnapi->tx_buffers[entry];
6518 for (i = 0; i <= last; i++) {
6519 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6521 entry = NEXT_TX(entry);
6522 txb = &tnapi->tx_buffers[entry];
6524 pci_unmap_page(tnapi->tp->pdev,
6525 dma_unmap_addr(txb, mapping),
6526 skb_frag_size(frag), PCI_DMA_TODEVICE);
6528 while (txb->fragmented) {
6529 txb->fragmented = false;
6530 entry = NEXT_TX(entry);
6531 txb = &tnapi->tx_buffers[entry];
6536 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6537 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6538 struct sk_buff **pskb,
6539 u32 *entry, u32 *budget,
6540 u32 base_flags, u32 mss, u32 vlan)
6542 struct tg3 *tp = tnapi->tp;
6543 struct sk_buff *new_skb, *skb = *pskb;
6544 dma_addr_t new_addr = 0;
6545 int ret = 0;
6547 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6548 new_skb = skb_copy(skb, GFP_ATOMIC);
6549 else {
6550 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6552 new_skb = skb_copy_expand(skb,
6553 skb_headroom(skb) + more_headroom,
6554 skb_tailroom(skb), GFP_ATOMIC);
6557 if (!new_skb) {
6558 ret = -1;
6559 } else {
6560 /* New SKB is guaranteed to be linear. */
6561 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6562 PCI_DMA_TODEVICE);
6563 /* Make sure the mapping succeeded */
6564 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6565 dev_kfree_skb(new_skb);
6566 ret = -1;
6567 } else {
6568 u32 save_entry = *entry;
6570 base_flags |= TXD_FLAG_END;
6572 tnapi->tx_buffers[*entry].skb = new_skb;
6573 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6574 mapping, new_addr);
6576 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6577 new_skb->len, base_flags,
6578 mss, vlan)) {
6579 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6580 dev_kfree_skb(new_skb);
6581 ret = -1;
6586 dev_kfree_skb(skb);
6587 *pskb = new_skb;
6588 return ret;
6591 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6593 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6594 * TSO header is greater than 80 bytes.
6596 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6598 struct sk_buff *segs, *nskb;
6599 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6601 /* Estimate the number of fragments in the worst case */
6602 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6603 netif_stop_queue(tp->dev);
6605 /* netif_tx_stop_queue() must be done before checking
6606 * checking tx index in tg3_tx_avail() below, because in
6607 * tg3_tx(), we update tx index before checking for
6608 * netif_tx_queue_stopped().
6610 smp_mb();
6611 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6612 return NETDEV_TX_BUSY;
6614 netif_wake_queue(tp->dev);
6617 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6618 if (IS_ERR(segs))
6619 goto tg3_tso_bug_end;
6621 do {
6622 nskb = segs;
6623 segs = segs->next;
6624 nskb->next = NULL;
6625 tg3_start_xmit(nskb, tp->dev);
6626 } while (segs);
6628 tg3_tso_bug_end:
6629 dev_kfree_skb(skb);
6631 return NETDEV_TX_OK;
6634 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6635 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6637 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6639 struct tg3 *tp = netdev_priv(dev);
6640 u32 len, entry, base_flags, mss, vlan = 0;
6641 u32 budget;
6642 int i = -1, would_hit_hwbug;
6643 dma_addr_t mapping;
6644 struct tg3_napi *tnapi;
6645 struct netdev_queue *txq;
6646 unsigned int last;
6648 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6649 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6650 if (tg3_flag(tp, ENABLE_TSS))
6651 tnapi++;
6653 budget = tg3_tx_avail(tnapi);
6655 /* We are running in BH disabled context with netif_tx_lock
6656 * and TX reclaim runs via tp->napi.poll inside of a software
6657 * interrupt. Furthermore, IRQ processing runs lockless so we have
6658 * no IRQ context deadlocks to worry about either. Rejoice!
6660 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6661 if (!netif_tx_queue_stopped(txq)) {
6662 netif_tx_stop_queue(txq);
6664 /* This is a hard error, log it. */
6665 netdev_err(dev,
6666 "BUG! Tx Ring full when queue awake!\n");
6668 return NETDEV_TX_BUSY;
6671 entry = tnapi->tx_prod;
6672 base_flags = 0;
6673 if (skb->ip_summed == CHECKSUM_PARTIAL)
6674 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6676 mss = skb_shinfo(skb)->gso_size;
6677 if (mss) {
6678 struct iphdr *iph;
6679 u32 tcp_opt_len, hdr_len;
6681 if (skb_header_cloned(skb) &&
6682 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6683 goto drop;
6685 iph = ip_hdr(skb);
6686 tcp_opt_len = tcp_optlen(skb);
6688 if (skb_is_gso_v6(skb)) {
6689 hdr_len = skb_headlen(skb) - ETH_HLEN;
6690 } else {
6691 u32 ip_tcp_len;
6693 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6694 hdr_len = ip_tcp_len + tcp_opt_len;
6696 iph->check = 0;
6697 iph->tot_len = htons(mss + hdr_len);
6700 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6701 tg3_flag(tp, TSO_BUG))
6702 return tg3_tso_bug(tp, skb);
6704 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6705 TXD_FLAG_CPU_POST_DMA);
6707 if (tg3_flag(tp, HW_TSO_1) ||
6708 tg3_flag(tp, HW_TSO_2) ||
6709 tg3_flag(tp, HW_TSO_3)) {
6710 tcp_hdr(skb)->check = 0;
6711 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6712 } else
6713 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6714 iph->daddr, 0,
6715 IPPROTO_TCP,
6718 if (tg3_flag(tp, HW_TSO_3)) {
6719 mss |= (hdr_len & 0xc) << 12;
6720 if (hdr_len & 0x10)
6721 base_flags |= 0x00000010;
6722 base_flags |= (hdr_len & 0x3e0) << 5;
6723 } else if (tg3_flag(tp, HW_TSO_2))
6724 mss |= hdr_len << 9;
6725 else if (tg3_flag(tp, HW_TSO_1) ||
6726 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6727 if (tcp_opt_len || iph->ihl > 5) {
6728 int tsflags;
6730 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6731 mss |= (tsflags << 11);
6733 } else {
6734 if (tcp_opt_len || iph->ihl > 5) {
6735 int tsflags;
6737 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6738 base_flags |= tsflags << 12;
6743 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6744 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6745 base_flags |= TXD_FLAG_JMB_PKT;
6747 if (vlan_tx_tag_present(skb)) {
6748 base_flags |= TXD_FLAG_VLAN;
6749 vlan = vlan_tx_tag_get(skb);
6752 len = skb_headlen(skb);
6754 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6755 if (pci_dma_mapping_error(tp->pdev, mapping))
6756 goto drop;
6759 tnapi->tx_buffers[entry].skb = skb;
6760 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6762 would_hit_hwbug = 0;
6764 if (tg3_flag(tp, 5701_DMA_BUG))
6765 would_hit_hwbug = 1;
6767 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6768 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6769 mss, vlan)) {
6770 would_hit_hwbug = 1;
6771 /* Now loop through additional data fragments, and queue them. */
6772 } else if (skb_shinfo(skb)->nr_frags > 0) {
6773 u32 tmp_mss = mss;
6775 if (!tg3_flag(tp, HW_TSO_1) &&
6776 !tg3_flag(tp, HW_TSO_2) &&
6777 !tg3_flag(tp, HW_TSO_3))
6778 tmp_mss = 0;
6780 last = skb_shinfo(skb)->nr_frags - 1;
6781 for (i = 0; i <= last; i++) {
6782 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6784 len = skb_frag_size(frag);
6785 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6786 len, DMA_TO_DEVICE);
6788 tnapi->tx_buffers[entry].skb = NULL;
6789 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6790 mapping);
6791 if (dma_mapping_error(&tp->pdev->dev, mapping))
6792 goto dma_error;
6794 if (!budget ||
6795 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6796 len, base_flags |
6797 ((i == last) ? TXD_FLAG_END : 0),
6798 tmp_mss, vlan)) {
6799 would_hit_hwbug = 1;
6800 break;
6805 if (would_hit_hwbug) {
6806 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6808 /* If the workaround fails due to memory/mapping
6809 * failure, silently drop this packet.
6811 entry = tnapi->tx_prod;
6812 budget = tg3_tx_avail(tnapi);
6813 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6814 base_flags, mss, vlan))
6815 goto drop_nofree;
6818 skb_tx_timestamp(skb);
6820 /* Packets are ready, update Tx producer idx local and on card. */
6821 tw32_tx_mbox(tnapi->prodmbox, entry);
6823 tnapi->tx_prod = entry;
6824 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6825 netif_tx_stop_queue(txq);
6827 /* netif_tx_stop_queue() must be done before checking
6828 * checking tx index in tg3_tx_avail() below, because in
6829 * tg3_tx(), we update tx index before checking for
6830 * netif_tx_queue_stopped().
6832 smp_mb();
6833 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6834 netif_tx_wake_queue(txq);
6837 mmiowb();
6838 return NETDEV_TX_OK;
6840 dma_error:
6841 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6842 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6843 drop:
6844 dev_kfree_skb(skb);
6845 drop_nofree:
6846 tp->tx_dropped++;
6847 return NETDEV_TX_OK;
6850 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6852 if (enable) {
6853 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6854 MAC_MODE_PORT_MODE_MASK);
6856 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6858 if (!tg3_flag(tp, 5705_PLUS))
6859 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6861 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6862 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6863 else
6864 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6865 } else {
6866 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6868 if (tg3_flag(tp, 5705_PLUS) ||
6869 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6870 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6871 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6874 tw32(MAC_MODE, tp->mac_mode);
6875 udelay(40);
6878 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6880 u32 val, bmcr, mac_mode, ptest = 0;
6882 tg3_phy_toggle_apd(tp, false);
6883 tg3_phy_toggle_automdix(tp, 0);
6885 if (extlpbk && tg3_phy_set_extloopbk(tp))
6886 return -EIO;
6888 bmcr = BMCR_FULLDPLX;
6889 switch (speed) {
6890 case SPEED_10:
6891 break;
6892 case SPEED_100:
6893 bmcr |= BMCR_SPEED100;
6894 break;
6895 case SPEED_1000:
6896 default:
6897 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6898 speed = SPEED_100;
6899 bmcr |= BMCR_SPEED100;
6900 } else {
6901 speed = SPEED_1000;
6902 bmcr |= BMCR_SPEED1000;
6906 if (extlpbk) {
6907 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6908 tg3_readphy(tp, MII_CTRL1000, &val);
6909 val |= CTL1000_AS_MASTER |
6910 CTL1000_ENABLE_MASTER;
6911 tg3_writephy(tp, MII_CTRL1000, val);
6912 } else {
6913 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6914 MII_TG3_FET_PTEST_TRIM_2;
6915 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6917 } else
6918 bmcr |= BMCR_LOOPBACK;
6920 tg3_writephy(tp, MII_BMCR, bmcr);
6922 /* The write needs to be flushed for the FETs */
6923 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6924 tg3_readphy(tp, MII_BMCR, &bmcr);
6926 udelay(40);
6928 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6930 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6931 MII_TG3_FET_PTEST_FRC_TX_LINK |
6932 MII_TG3_FET_PTEST_FRC_TX_LOCK);
6934 /* The write needs to be flushed for the AC131 */
6935 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6938 /* Reset to prevent losing 1st rx packet intermittently */
6939 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6940 tg3_flag(tp, 5780_CLASS)) {
6941 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6942 udelay(10);
6943 tw32_f(MAC_RX_MODE, tp->rx_mode);
6946 mac_mode = tp->mac_mode &
6947 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6948 if (speed == SPEED_1000)
6949 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6950 else
6951 mac_mode |= MAC_MODE_PORT_MODE_MII;
6953 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6954 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6956 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6957 mac_mode &= ~MAC_MODE_LINK_POLARITY;
6958 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6959 mac_mode |= MAC_MODE_LINK_POLARITY;
6961 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6962 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6965 tw32(MAC_MODE, mac_mode);
6966 udelay(40);
6968 return 0;
6971 static void tg3_set_loopback(struct net_device *dev, u32 features)
6973 struct tg3 *tp = netdev_priv(dev);
6975 if (features & NETIF_F_LOOPBACK) {
6976 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6977 return;
6979 spin_lock_bh(&tp->lock);
6980 tg3_mac_loopback(tp, true);
6981 netif_carrier_on(tp->dev);
6982 spin_unlock_bh(&tp->lock);
6983 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6984 } else {
6985 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6986 return;
6988 spin_lock_bh(&tp->lock);
6989 tg3_mac_loopback(tp, false);
6990 /* Force link status check */
6991 tg3_setup_phy(tp, 1);
6992 spin_unlock_bh(&tp->lock);
6993 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6997 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6999 struct tg3 *tp = netdev_priv(dev);
7001 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7002 features &= ~NETIF_F_ALL_TSO;
7004 return features;
7007 static int tg3_set_features(struct net_device *dev, u32 features)
7009 u32 changed = dev->features ^ features;
7011 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7012 tg3_set_loopback(dev, features);
7014 return 0;
7017 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7018 int new_mtu)
7020 dev->mtu = new_mtu;
7022 if (new_mtu > ETH_DATA_LEN) {
7023 if (tg3_flag(tp, 5780_CLASS)) {
7024 netdev_update_features(dev);
7025 tg3_flag_clear(tp, TSO_CAPABLE);
7026 } else {
7027 tg3_flag_set(tp, JUMBO_RING_ENABLE);
7029 } else {
7030 if (tg3_flag(tp, 5780_CLASS)) {
7031 tg3_flag_set(tp, TSO_CAPABLE);
7032 netdev_update_features(dev);
7034 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7038 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7040 struct tg3 *tp = netdev_priv(dev);
7041 int err;
7043 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7044 return -EINVAL;
7046 if (!netif_running(dev)) {
7047 /* We'll just catch it later when the
7048 * device is up'd.
7050 tg3_set_mtu(dev, tp, new_mtu);
7051 return 0;
7054 tg3_phy_stop(tp);
7056 tg3_netif_stop(tp);
7058 tg3_full_lock(tp, 1);
7060 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7062 tg3_set_mtu(dev, tp, new_mtu);
7064 err = tg3_restart_hw(tp, 0);
7066 if (!err)
7067 tg3_netif_start(tp);
7069 tg3_full_unlock(tp);
7071 if (!err)
7072 tg3_phy_start(tp);
7074 return err;
7077 static void tg3_rx_prodring_free(struct tg3 *tp,
7078 struct tg3_rx_prodring_set *tpr)
7080 int i;
7082 if (tpr != &tp->napi[0].prodring) {
7083 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7084 i = (i + 1) & tp->rx_std_ring_mask)
7085 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7086 tp->rx_pkt_map_sz);
7088 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7089 for (i = tpr->rx_jmb_cons_idx;
7090 i != tpr->rx_jmb_prod_idx;
7091 i = (i + 1) & tp->rx_jmb_ring_mask) {
7092 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7093 TG3_RX_JMB_MAP_SZ);
7097 return;
7100 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7101 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7102 tp->rx_pkt_map_sz);
7104 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7105 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7106 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7107 TG3_RX_JMB_MAP_SZ);
7111 /* Initialize rx rings for packet processing.
7113 * The chip has been shut down and the driver detached from
7114 * the networking, so no interrupts or new tx packets will
7115 * end up in the driver. tp->{tx,}lock are held and thus
7116 * we may not sleep.
7118 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7119 struct tg3_rx_prodring_set *tpr)
7121 u32 i, rx_pkt_dma_sz;
7123 tpr->rx_std_cons_idx = 0;
7124 tpr->rx_std_prod_idx = 0;
7125 tpr->rx_jmb_cons_idx = 0;
7126 tpr->rx_jmb_prod_idx = 0;
7128 if (tpr != &tp->napi[0].prodring) {
7129 memset(&tpr->rx_std_buffers[0], 0,
7130 TG3_RX_STD_BUFF_RING_SIZE(tp));
7131 if (tpr->rx_jmb_buffers)
7132 memset(&tpr->rx_jmb_buffers[0], 0,
7133 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7134 goto done;
7137 /* Zero out all descriptors. */
7138 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7140 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7141 if (tg3_flag(tp, 5780_CLASS) &&
7142 tp->dev->mtu > ETH_DATA_LEN)
7143 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7144 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7146 /* Initialize invariants of the rings, we only set this
7147 * stuff once. This works because the card does not
7148 * write into the rx buffer posting rings.
7150 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7151 struct tg3_rx_buffer_desc *rxd;
7153 rxd = &tpr->rx_std[i];
7154 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7155 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7156 rxd->opaque = (RXD_OPAQUE_RING_STD |
7157 (i << RXD_OPAQUE_INDEX_SHIFT));
7160 /* Now allocate fresh SKBs for each rx ring. */
7161 for (i = 0; i < tp->rx_pending; i++) {
7162 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7163 netdev_warn(tp->dev,
7164 "Using a smaller RX standard ring. Only "
7165 "%d out of %d buffers were allocated "
7166 "successfully\n", i, tp->rx_pending);
7167 if (i == 0)
7168 goto initfail;
7169 tp->rx_pending = i;
7170 break;
7174 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7175 goto done;
7177 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7179 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7180 goto done;
7182 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7183 struct tg3_rx_buffer_desc *rxd;
7185 rxd = &tpr->rx_jmb[i].std;
7186 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7187 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7188 RXD_FLAG_JUMBO;
7189 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7190 (i << RXD_OPAQUE_INDEX_SHIFT));
7193 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7194 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7195 netdev_warn(tp->dev,
7196 "Using a smaller RX jumbo ring. Only %d "
7197 "out of %d buffers were allocated "
7198 "successfully\n", i, tp->rx_jumbo_pending);
7199 if (i == 0)
7200 goto initfail;
7201 tp->rx_jumbo_pending = i;
7202 break;
7206 done:
7207 return 0;
7209 initfail:
7210 tg3_rx_prodring_free(tp, tpr);
7211 return -ENOMEM;
7214 static void tg3_rx_prodring_fini(struct tg3 *tp,
7215 struct tg3_rx_prodring_set *tpr)
7217 kfree(tpr->rx_std_buffers);
7218 tpr->rx_std_buffers = NULL;
7219 kfree(tpr->rx_jmb_buffers);
7220 tpr->rx_jmb_buffers = NULL;
7221 if (tpr->rx_std) {
7222 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7223 tpr->rx_std, tpr->rx_std_mapping);
7224 tpr->rx_std = NULL;
7226 if (tpr->rx_jmb) {
7227 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7228 tpr->rx_jmb, tpr->rx_jmb_mapping);
7229 tpr->rx_jmb = NULL;
7233 static int tg3_rx_prodring_init(struct tg3 *tp,
7234 struct tg3_rx_prodring_set *tpr)
7236 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7237 GFP_KERNEL);
7238 if (!tpr->rx_std_buffers)
7239 return -ENOMEM;
7241 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7242 TG3_RX_STD_RING_BYTES(tp),
7243 &tpr->rx_std_mapping,
7244 GFP_KERNEL);
7245 if (!tpr->rx_std)
7246 goto err_out;
7248 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7249 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7250 GFP_KERNEL);
7251 if (!tpr->rx_jmb_buffers)
7252 goto err_out;
7254 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7255 TG3_RX_JMB_RING_BYTES(tp),
7256 &tpr->rx_jmb_mapping,
7257 GFP_KERNEL);
7258 if (!tpr->rx_jmb)
7259 goto err_out;
7262 return 0;
7264 err_out:
7265 tg3_rx_prodring_fini(tp, tpr);
7266 return -ENOMEM;
7269 /* Free up pending packets in all rx/tx rings.
7271 * The chip has been shut down and the driver detached from
7272 * the networking, so no interrupts or new tx packets will
7273 * end up in the driver. tp->{tx,}lock is not held and we are not
7274 * in an interrupt context and thus may sleep.
7276 static void tg3_free_rings(struct tg3 *tp)
7278 int i, j;
7280 for (j = 0; j < tp->irq_cnt; j++) {
7281 struct tg3_napi *tnapi = &tp->napi[j];
7283 tg3_rx_prodring_free(tp, &tnapi->prodring);
7285 if (!tnapi->tx_buffers)
7286 continue;
7288 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7289 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7291 if (!skb)
7292 continue;
7294 tg3_tx_skb_unmap(tnapi, i,
7295 skb_shinfo(skb)->nr_frags - 1);
7297 dev_kfree_skb_any(skb);
7302 /* Initialize tx/rx rings for packet processing.
7304 * The chip has been shut down and the driver detached from
7305 * the networking, so no interrupts or new tx packets will
7306 * end up in the driver. tp->{tx,}lock are held and thus
7307 * we may not sleep.
7309 static int tg3_init_rings(struct tg3 *tp)
7311 int i;
7313 /* Free up all the SKBs. */
7314 tg3_free_rings(tp);
7316 for (i = 0; i < tp->irq_cnt; i++) {
7317 struct tg3_napi *tnapi = &tp->napi[i];
7319 tnapi->last_tag = 0;
7320 tnapi->last_irq_tag = 0;
7321 tnapi->hw_status->status = 0;
7322 tnapi->hw_status->status_tag = 0;
7323 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7325 tnapi->tx_prod = 0;
7326 tnapi->tx_cons = 0;
7327 if (tnapi->tx_ring)
7328 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7330 tnapi->rx_rcb_ptr = 0;
7331 if (tnapi->rx_rcb)
7332 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7334 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7335 tg3_free_rings(tp);
7336 return -ENOMEM;
7340 return 0;
7344 * Must not be invoked with interrupt sources disabled and
7345 * the hardware shutdown down.
7347 static void tg3_free_consistent(struct tg3 *tp)
7349 int i;
7351 for (i = 0; i < tp->irq_cnt; i++) {
7352 struct tg3_napi *tnapi = &tp->napi[i];
7354 if (tnapi->tx_ring) {
7355 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7356 tnapi->tx_ring, tnapi->tx_desc_mapping);
7357 tnapi->tx_ring = NULL;
7360 kfree(tnapi->tx_buffers);
7361 tnapi->tx_buffers = NULL;
7363 if (tnapi->rx_rcb) {
7364 dma_free_coherent(&tp->pdev->dev,
7365 TG3_RX_RCB_RING_BYTES(tp),
7366 tnapi->rx_rcb,
7367 tnapi->rx_rcb_mapping);
7368 tnapi->rx_rcb = NULL;
7371 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7373 if (tnapi->hw_status) {
7374 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7375 tnapi->hw_status,
7376 tnapi->status_mapping);
7377 tnapi->hw_status = NULL;
7381 if (tp->hw_stats) {
7382 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7383 tp->hw_stats, tp->stats_mapping);
7384 tp->hw_stats = NULL;
7389 * Must not be invoked with interrupt sources disabled and
7390 * the hardware shutdown down. Can sleep.
7392 static int tg3_alloc_consistent(struct tg3 *tp)
7394 int i;
7396 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7397 sizeof(struct tg3_hw_stats),
7398 &tp->stats_mapping,
7399 GFP_KERNEL);
7400 if (!tp->hw_stats)
7401 goto err_out;
7403 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7405 for (i = 0; i < tp->irq_cnt; i++) {
7406 struct tg3_napi *tnapi = &tp->napi[i];
7407 struct tg3_hw_status *sblk;
7409 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7410 TG3_HW_STATUS_SIZE,
7411 &tnapi->status_mapping,
7412 GFP_KERNEL);
7413 if (!tnapi->hw_status)
7414 goto err_out;
7416 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7417 sblk = tnapi->hw_status;
7419 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7420 goto err_out;
7422 /* If multivector TSS is enabled, vector 0 does not handle
7423 * tx interrupts. Don't allocate any resources for it.
7425 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7426 (i && tg3_flag(tp, ENABLE_TSS))) {
7427 tnapi->tx_buffers = kzalloc(
7428 sizeof(struct tg3_tx_ring_info) *
7429 TG3_TX_RING_SIZE, GFP_KERNEL);
7430 if (!tnapi->tx_buffers)
7431 goto err_out;
7433 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7434 TG3_TX_RING_BYTES,
7435 &tnapi->tx_desc_mapping,
7436 GFP_KERNEL);
7437 if (!tnapi->tx_ring)
7438 goto err_out;
7442 * When RSS is enabled, the status block format changes
7443 * slightly. The "rx_jumbo_consumer", "reserved",
7444 * and "rx_mini_consumer" members get mapped to the
7445 * other three rx return ring producer indexes.
7447 switch (i) {
7448 default:
7449 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7450 break;
7451 case 2:
7452 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7453 break;
7454 case 3:
7455 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7456 break;
7457 case 4:
7458 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7459 break;
7463 * If multivector RSS is enabled, vector 0 does not handle
7464 * rx or tx interrupts. Don't allocate any resources for it.
7466 if (!i && tg3_flag(tp, ENABLE_RSS))
7467 continue;
7469 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7470 TG3_RX_RCB_RING_BYTES(tp),
7471 &tnapi->rx_rcb_mapping,
7472 GFP_KERNEL);
7473 if (!tnapi->rx_rcb)
7474 goto err_out;
7476 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7479 return 0;
7481 err_out:
7482 tg3_free_consistent(tp);
7483 return -ENOMEM;
7486 #define MAX_WAIT_CNT 1000
7488 /* To stop a block, clear the enable bit and poll till it
7489 * clears. tp->lock is held.
7491 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7493 unsigned int i;
7494 u32 val;
7496 if (tg3_flag(tp, 5705_PLUS)) {
7497 switch (ofs) {
7498 case RCVLSC_MODE:
7499 case DMAC_MODE:
7500 case MBFREE_MODE:
7501 case BUFMGR_MODE:
7502 case MEMARB_MODE:
7503 /* We can't enable/disable these bits of the
7504 * 5705/5750, just say success.
7506 return 0;
7508 default:
7509 break;
7513 val = tr32(ofs);
7514 val &= ~enable_bit;
7515 tw32_f(ofs, val);
7517 for (i = 0; i < MAX_WAIT_CNT; i++) {
7518 udelay(100);
7519 val = tr32(ofs);
7520 if ((val & enable_bit) == 0)
7521 break;
7524 if (i == MAX_WAIT_CNT && !silent) {
7525 dev_err(&tp->pdev->dev,
7526 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7527 ofs, enable_bit);
7528 return -ENODEV;
7531 return 0;
7534 /* tp->lock is held. */
7535 static int tg3_abort_hw(struct tg3 *tp, int silent)
7537 int i, err;
7539 tg3_disable_ints(tp);
7541 tp->rx_mode &= ~RX_MODE_ENABLE;
7542 tw32_f(MAC_RX_MODE, tp->rx_mode);
7543 udelay(10);
7545 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7546 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7547 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7548 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7549 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7550 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7552 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7553 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7554 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7555 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7556 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7557 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7558 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7560 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7561 tw32_f(MAC_MODE, tp->mac_mode);
7562 udelay(40);
7564 tp->tx_mode &= ~TX_MODE_ENABLE;
7565 tw32_f(MAC_TX_MODE, tp->tx_mode);
7567 for (i = 0; i < MAX_WAIT_CNT; i++) {
7568 udelay(100);
7569 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7570 break;
7572 if (i >= MAX_WAIT_CNT) {
7573 dev_err(&tp->pdev->dev,
7574 "%s timed out, TX_MODE_ENABLE will not clear "
7575 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7576 err |= -ENODEV;
7579 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7580 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7581 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7583 tw32(FTQ_RESET, 0xffffffff);
7584 tw32(FTQ_RESET, 0x00000000);
7586 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7587 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7589 for (i = 0; i < tp->irq_cnt; i++) {
7590 struct tg3_napi *tnapi = &tp->napi[i];
7591 if (tnapi->hw_status)
7592 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7594 if (tp->hw_stats)
7595 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7597 return err;
7600 /* Save PCI command register before chip reset */
7601 static void tg3_save_pci_state(struct tg3 *tp)
7603 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7606 /* Restore PCI state after chip reset */
7607 static void tg3_restore_pci_state(struct tg3 *tp)
7609 u32 val;
7611 /* Re-enable indirect register accesses. */
7612 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7613 tp->misc_host_ctrl);
7615 /* Set MAX PCI retry to zero. */
7616 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7617 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7618 tg3_flag(tp, PCIX_MODE))
7619 val |= PCISTATE_RETRY_SAME_DMA;
7620 /* Allow reads and writes to the APE register and memory space. */
7621 if (tg3_flag(tp, ENABLE_APE))
7622 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7623 PCISTATE_ALLOW_APE_SHMEM_WR |
7624 PCISTATE_ALLOW_APE_PSPACE_WR;
7625 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7627 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7629 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7630 if (tg3_flag(tp, PCI_EXPRESS))
7631 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7632 else {
7633 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7634 tp->pci_cacheline_sz);
7635 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7636 tp->pci_lat_timer);
7640 /* Make sure PCI-X relaxed ordering bit is clear. */
7641 if (tg3_flag(tp, PCIX_MODE)) {
7642 u16 pcix_cmd;
7644 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7645 &pcix_cmd);
7646 pcix_cmd &= ~PCI_X_CMD_ERO;
7647 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7648 pcix_cmd);
7651 if (tg3_flag(tp, 5780_CLASS)) {
7653 /* Chip reset on 5780 will reset MSI enable bit,
7654 * so need to restore it.
7656 if (tg3_flag(tp, USING_MSI)) {
7657 u16 ctrl;
7659 pci_read_config_word(tp->pdev,
7660 tp->msi_cap + PCI_MSI_FLAGS,
7661 &ctrl);
7662 pci_write_config_word(tp->pdev,
7663 tp->msi_cap + PCI_MSI_FLAGS,
7664 ctrl | PCI_MSI_FLAGS_ENABLE);
7665 val = tr32(MSGINT_MODE);
7666 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7671 /* tp->lock is held. */
7672 static int tg3_chip_reset(struct tg3 *tp)
7674 u32 val;
7675 void (*write_op)(struct tg3 *, u32, u32);
7676 int i, err;
7678 tg3_nvram_lock(tp);
7680 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7682 /* No matching tg3_nvram_unlock() after this because
7683 * chip reset below will undo the nvram lock.
7685 tp->nvram_lock_cnt = 0;
7687 /* GRC_MISC_CFG core clock reset will clear the memory
7688 * enable bit in PCI register 4 and the MSI enable bit
7689 * on some chips, so we save relevant registers here.
7691 tg3_save_pci_state(tp);
7693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7694 tg3_flag(tp, 5755_PLUS))
7695 tw32(GRC_FASTBOOT_PC, 0);
7698 * We must avoid the readl() that normally takes place.
7699 * It locks machines, causes machine checks, and other
7700 * fun things. So, temporarily disable the 5701
7701 * hardware workaround, while we do the reset.
7703 write_op = tp->write32;
7704 if (write_op == tg3_write_flush_reg32)
7705 tp->write32 = tg3_write32;
7707 /* Prevent the irq handler from reading or writing PCI registers
7708 * during chip reset when the memory enable bit in the PCI command
7709 * register may be cleared. The chip does not generate interrupt
7710 * at this time, but the irq handler may still be called due to irq
7711 * sharing or irqpoll.
7713 tg3_flag_set(tp, CHIP_RESETTING);
7714 for (i = 0; i < tp->irq_cnt; i++) {
7715 struct tg3_napi *tnapi = &tp->napi[i];
7716 if (tnapi->hw_status) {
7717 tnapi->hw_status->status = 0;
7718 tnapi->hw_status->status_tag = 0;
7720 tnapi->last_tag = 0;
7721 tnapi->last_irq_tag = 0;
7723 smp_mb();
7725 for (i = 0; i < tp->irq_cnt; i++)
7726 synchronize_irq(tp->napi[i].irq_vec);
7728 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7729 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7730 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7733 /* do the reset */
7734 val = GRC_MISC_CFG_CORECLK_RESET;
7736 if (tg3_flag(tp, PCI_EXPRESS)) {
7737 /* Force PCIe 1.0a mode */
7738 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7739 !tg3_flag(tp, 57765_PLUS) &&
7740 tr32(TG3_PCIE_PHY_TSTCTL) ==
7741 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7742 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7744 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7745 tw32(GRC_MISC_CFG, (1 << 29));
7746 val |= (1 << 29);
7750 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7751 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7752 tw32(GRC_VCPU_EXT_CTRL,
7753 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7756 /* Manage gphy power for all CPMU absent PCIe devices. */
7757 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7758 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7760 tw32(GRC_MISC_CFG, val);
7762 /* restore 5701 hardware bug workaround write method */
7763 tp->write32 = write_op;
7765 /* Unfortunately, we have to delay before the PCI read back.
7766 * Some 575X chips even will not respond to a PCI cfg access
7767 * when the reset command is given to the chip.
7769 * How do these hardware designers expect things to work
7770 * properly if the PCI write is posted for a long period
7771 * of time? It is always necessary to have some method by
7772 * which a register read back can occur to push the write
7773 * out which does the reset.
7775 * For most tg3 variants the trick below was working.
7776 * Ho hum...
7778 udelay(120);
7780 /* Flush PCI posted writes. The normal MMIO registers
7781 * are inaccessible at this time so this is the only
7782 * way to make this reliably (actually, this is no longer
7783 * the case, see above). I tried to use indirect
7784 * register read/write but this upset some 5701 variants.
7786 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7788 udelay(120);
7790 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7791 u16 val16;
7793 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7794 int i;
7795 u32 cfg_val;
7797 /* Wait for link training to complete. */
7798 for (i = 0; i < 5000; i++)
7799 udelay(100);
7801 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7802 pci_write_config_dword(tp->pdev, 0xc4,
7803 cfg_val | (1 << 15));
7806 /* Clear the "no snoop" and "relaxed ordering" bits. */
7807 pci_read_config_word(tp->pdev,
7808 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7809 &val16);
7810 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7811 PCI_EXP_DEVCTL_NOSNOOP_EN);
7813 * Older PCIe devices only support the 128 byte
7814 * MPS setting. Enforce the restriction.
7816 if (!tg3_flag(tp, CPMU_PRESENT))
7817 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7818 pci_write_config_word(tp->pdev,
7819 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7820 val16);
7822 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7824 /* Clear error status */
7825 pci_write_config_word(tp->pdev,
7826 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7827 PCI_EXP_DEVSTA_CED |
7828 PCI_EXP_DEVSTA_NFED |
7829 PCI_EXP_DEVSTA_FED |
7830 PCI_EXP_DEVSTA_URD);
7833 tg3_restore_pci_state(tp);
7835 tg3_flag_clear(tp, CHIP_RESETTING);
7836 tg3_flag_clear(tp, ERROR_PROCESSED);
7838 val = 0;
7839 if (tg3_flag(tp, 5780_CLASS))
7840 val = tr32(MEMARB_MODE);
7841 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7843 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7844 tg3_stop_fw(tp);
7845 tw32(0x5000, 0x400);
7848 tw32(GRC_MODE, tp->grc_mode);
7850 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7851 val = tr32(0xc4);
7853 tw32(0xc4, val | (1 << 15));
7856 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7857 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7858 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7859 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7860 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7861 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7864 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7865 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7866 val = tp->mac_mode;
7867 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7868 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7869 val = tp->mac_mode;
7870 } else
7871 val = 0;
7873 tw32_f(MAC_MODE, val);
7874 udelay(40);
7876 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7878 err = tg3_poll_fw(tp);
7879 if (err)
7880 return err;
7882 tg3_mdio_start(tp);
7884 if (tg3_flag(tp, PCI_EXPRESS) &&
7885 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7886 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7887 !tg3_flag(tp, 57765_PLUS)) {
7888 val = tr32(0x7c00);
7890 tw32(0x7c00, val | (1 << 25));
7893 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7894 val = tr32(TG3_CPMU_CLCK_ORIDE);
7895 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7898 /* Reprobe ASF enable state. */
7899 tg3_flag_clear(tp, ENABLE_ASF);
7900 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7901 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7902 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7903 u32 nic_cfg;
7905 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7906 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7907 tg3_flag_set(tp, ENABLE_ASF);
7908 tp->last_event_jiffies = jiffies;
7909 if (tg3_flag(tp, 5750_PLUS))
7910 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7914 return 0;
7917 /* tp->lock is held. */
7918 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7920 int err;
7922 tg3_stop_fw(tp);
7924 tg3_write_sig_pre_reset(tp, kind);
7926 tg3_abort_hw(tp, silent);
7927 err = tg3_chip_reset(tp);
7929 __tg3_set_mac_addr(tp, 0);
7931 tg3_write_sig_legacy(tp, kind);
7932 tg3_write_sig_post_reset(tp, kind);
7934 if (err)
7935 return err;
7937 return 0;
7940 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7942 struct tg3 *tp = netdev_priv(dev);
7943 struct sockaddr *addr = p;
7944 int err = 0, skip_mac_1 = 0;
7946 if (!is_valid_ether_addr(addr->sa_data))
7947 return -EINVAL;
7949 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7951 if (!netif_running(dev))
7952 return 0;
7954 if (tg3_flag(tp, ENABLE_ASF)) {
7955 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7957 addr0_high = tr32(MAC_ADDR_0_HIGH);
7958 addr0_low = tr32(MAC_ADDR_0_LOW);
7959 addr1_high = tr32(MAC_ADDR_1_HIGH);
7960 addr1_low = tr32(MAC_ADDR_1_LOW);
7962 /* Skip MAC addr 1 if ASF is using it. */
7963 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7964 !(addr1_high == 0 && addr1_low == 0))
7965 skip_mac_1 = 1;
7967 spin_lock_bh(&tp->lock);
7968 __tg3_set_mac_addr(tp, skip_mac_1);
7969 spin_unlock_bh(&tp->lock);
7971 return err;
7974 /* tp->lock is held. */
7975 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7976 dma_addr_t mapping, u32 maxlen_flags,
7977 u32 nic_addr)
7979 tg3_write_mem(tp,
7980 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7981 ((u64) mapping >> 32));
7982 tg3_write_mem(tp,
7983 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7984 ((u64) mapping & 0xffffffff));
7985 tg3_write_mem(tp,
7986 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7987 maxlen_flags);
7989 if (!tg3_flag(tp, 5705_PLUS))
7990 tg3_write_mem(tp,
7991 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7992 nic_addr);
7995 static void __tg3_set_rx_mode(struct net_device *);
7996 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7998 int i;
8000 if (!tg3_flag(tp, ENABLE_TSS)) {
8001 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8002 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8003 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8004 } else {
8005 tw32(HOSTCC_TXCOL_TICKS, 0);
8006 tw32(HOSTCC_TXMAX_FRAMES, 0);
8007 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8010 if (!tg3_flag(tp, ENABLE_RSS)) {
8011 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8012 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8013 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8014 } else {
8015 tw32(HOSTCC_RXCOL_TICKS, 0);
8016 tw32(HOSTCC_RXMAX_FRAMES, 0);
8017 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8020 if (!tg3_flag(tp, 5705_PLUS)) {
8021 u32 val = ec->stats_block_coalesce_usecs;
8023 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8024 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8026 if (!netif_carrier_ok(tp->dev))
8027 val = 0;
8029 tw32(HOSTCC_STAT_COAL_TICKS, val);
8032 for (i = 0; i < tp->irq_cnt - 1; i++) {
8033 u32 reg;
8035 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8036 tw32(reg, ec->rx_coalesce_usecs);
8037 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8038 tw32(reg, ec->rx_max_coalesced_frames);
8039 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8040 tw32(reg, ec->rx_max_coalesced_frames_irq);
8042 if (tg3_flag(tp, ENABLE_TSS)) {
8043 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8044 tw32(reg, ec->tx_coalesce_usecs);
8045 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8046 tw32(reg, ec->tx_max_coalesced_frames);
8047 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8048 tw32(reg, ec->tx_max_coalesced_frames_irq);
8052 for (; i < tp->irq_max - 1; i++) {
8053 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8054 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8055 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8057 if (tg3_flag(tp, ENABLE_TSS)) {
8058 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8059 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8060 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8065 /* tp->lock is held. */
8066 static void tg3_rings_reset(struct tg3 *tp)
8068 int i;
8069 u32 stblk, txrcb, rxrcb, limit;
8070 struct tg3_napi *tnapi = &tp->napi[0];
8072 /* Disable all transmit rings but the first. */
8073 if (!tg3_flag(tp, 5705_PLUS))
8074 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8075 else if (tg3_flag(tp, 5717_PLUS))
8076 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8077 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8078 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8079 else
8080 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8082 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8083 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8084 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8085 BDINFO_FLAGS_DISABLED);
8088 /* Disable all receive return rings but the first. */
8089 if (tg3_flag(tp, 5717_PLUS))
8090 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8091 else if (!tg3_flag(tp, 5705_PLUS))
8092 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8093 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8095 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8096 else
8097 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8099 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8100 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8101 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8102 BDINFO_FLAGS_DISABLED);
8104 /* Disable interrupts */
8105 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8106 tp->napi[0].chk_msi_cnt = 0;
8107 tp->napi[0].last_rx_cons = 0;
8108 tp->napi[0].last_tx_cons = 0;
8110 /* Zero mailbox registers. */
8111 if (tg3_flag(tp, SUPPORT_MSIX)) {
8112 for (i = 1; i < tp->irq_max; i++) {
8113 tp->napi[i].tx_prod = 0;
8114 tp->napi[i].tx_cons = 0;
8115 if (tg3_flag(tp, ENABLE_TSS))
8116 tw32_mailbox(tp->napi[i].prodmbox, 0);
8117 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8118 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8119 tp->napi[i].chk_msi_cnt = 0;
8120 tp->napi[i].last_rx_cons = 0;
8121 tp->napi[i].last_tx_cons = 0;
8123 if (!tg3_flag(tp, ENABLE_TSS))
8124 tw32_mailbox(tp->napi[0].prodmbox, 0);
8125 } else {
8126 tp->napi[0].tx_prod = 0;
8127 tp->napi[0].tx_cons = 0;
8128 tw32_mailbox(tp->napi[0].prodmbox, 0);
8129 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8132 /* Make sure the NIC-based send BD rings are disabled. */
8133 if (!tg3_flag(tp, 5705_PLUS)) {
8134 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8135 for (i = 0; i < 16; i++)
8136 tw32_tx_mbox(mbox + i * 8, 0);
8139 txrcb = NIC_SRAM_SEND_RCB;
8140 rxrcb = NIC_SRAM_RCV_RET_RCB;
8142 /* Clear status block in ram. */
8143 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8145 /* Set status block DMA address */
8146 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8147 ((u64) tnapi->status_mapping >> 32));
8148 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8149 ((u64) tnapi->status_mapping & 0xffffffff));
8151 if (tnapi->tx_ring) {
8152 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8153 (TG3_TX_RING_SIZE <<
8154 BDINFO_FLAGS_MAXLEN_SHIFT),
8155 NIC_SRAM_TX_BUFFER_DESC);
8156 txrcb += TG3_BDINFO_SIZE;
8159 if (tnapi->rx_rcb) {
8160 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8161 (tp->rx_ret_ring_mask + 1) <<
8162 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8163 rxrcb += TG3_BDINFO_SIZE;
8166 stblk = HOSTCC_STATBLCK_RING1;
8168 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8169 u64 mapping = (u64)tnapi->status_mapping;
8170 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8171 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8173 /* Clear status block in ram. */
8174 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8176 if (tnapi->tx_ring) {
8177 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8178 (TG3_TX_RING_SIZE <<
8179 BDINFO_FLAGS_MAXLEN_SHIFT),
8180 NIC_SRAM_TX_BUFFER_DESC);
8181 txrcb += TG3_BDINFO_SIZE;
8184 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8185 ((tp->rx_ret_ring_mask + 1) <<
8186 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8188 stblk += 8;
8189 rxrcb += TG3_BDINFO_SIZE;
8193 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8195 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8197 if (!tg3_flag(tp, 5750_PLUS) ||
8198 tg3_flag(tp, 5780_CLASS) ||
8199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8200 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8201 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8202 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8203 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8204 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8205 else
8206 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8208 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8209 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8211 val = min(nic_rep_thresh, host_rep_thresh);
8212 tw32(RCVBDI_STD_THRESH, val);
8214 if (tg3_flag(tp, 57765_PLUS))
8215 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8217 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8218 return;
8220 if (!tg3_flag(tp, 5705_PLUS))
8221 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8222 else
8223 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8225 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8227 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8228 tw32(RCVBDI_JUMBO_THRESH, val);
8230 if (tg3_flag(tp, 57765_PLUS))
8231 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8234 /* tp->lock is held. */
8235 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8237 u32 val, rdmac_mode;
8238 int i, err, limit;
8239 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8241 tg3_disable_ints(tp);
8243 tg3_stop_fw(tp);
8245 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8247 if (tg3_flag(tp, INIT_COMPLETE))
8248 tg3_abort_hw(tp, 1);
8250 /* Enable MAC control of LPI */
8251 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8252 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8253 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8254 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8256 tw32_f(TG3_CPMU_EEE_CTRL,
8257 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8259 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8260 TG3_CPMU_EEEMD_LPI_IN_TX |
8261 TG3_CPMU_EEEMD_LPI_IN_RX |
8262 TG3_CPMU_EEEMD_EEE_ENABLE;
8264 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8265 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8267 if (tg3_flag(tp, ENABLE_APE))
8268 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8270 tw32_f(TG3_CPMU_EEE_MODE, val);
8272 tw32_f(TG3_CPMU_EEE_DBTMR1,
8273 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8274 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8276 tw32_f(TG3_CPMU_EEE_DBTMR2,
8277 TG3_CPMU_DBTMR2_APE_TX_2047US |
8278 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8281 if (reset_phy)
8282 tg3_phy_reset(tp);
8284 err = tg3_chip_reset(tp);
8285 if (err)
8286 return err;
8288 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8290 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8291 val = tr32(TG3_CPMU_CTRL);
8292 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8293 tw32(TG3_CPMU_CTRL, val);
8295 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8296 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8297 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8298 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8300 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8301 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8302 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8303 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8305 val = tr32(TG3_CPMU_HST_ACC);
8306 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8307 val |= CPMU_HST_ACC_MACCLK_6_25;
8308 tw32(TG3_CPMU_HST_ACC, val);
8311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8312 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8313 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8314 PCIE_PWR_MGMT_L1_THRESH_4MS;
8315 tw32(PCIE_PWR_MGMT_THRESH, val);
8317 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8318 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8320 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8322 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8323 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8326 if (tg3_flag(tp, L1PLLPD_EN)) {
8327 u32 grc_mode = tr32(GRC_MODE);
8329 /* Access the lower 1K of PL PCIE block registers. */
8330 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8331 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8333 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8334 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8335 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8337 tw32(GRC_MODE, grc_mode);
8340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8341 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8342 u32 grc_mode = tr32(GRC_MODE);
8344 /* Access the lower 1K of PL PCIE block registers. */
8345 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8346 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8348 val = tr32(TG3_PCIE_TLDLPL_PORT +
8349 TG3_PCIE_PL_LO_PHYCTL5);
8350 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8351 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8353 tw32(GRC_MODE, grc_mode);
8356 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8357 u32 grc_mode = tr32(GRC_MODE);
8359 /* Access the lower 1K of DL PCIE block registers. */
8360 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8361 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8363 val = tr32(TG3_PCIE_TLDLPL_PORT +
8364 TG3_PCIE_DL_LO_FTSMAX);
8365 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8366 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8367 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8369 tw32(GRC_MODE, grc_mode);
8372 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8373 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8374 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8375 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8378 /* This works around an issue with Athlon chipsets on
8379 * B3 tigon3 silicon. This bit has no effect on any
8380 * other revision. But do not set this on PCI Express
8381 * chips and don't even touch the clocks if the CPMU is present.
8383 if (!tg3_flag(tp, CPMU_PRESENT)) {
8384 if (!tg3_flag(tp, PCI_EXPRESS))
8385 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8386 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8389 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8390 tg3_flag(tp, PCIX_MODE)) {
8391 val = tr32(TG3PCI_PCISTATE);
8392 val |= PCISTATE_RETRY_SAME_DMA;
8393 tw32(TG3PCI_PCISTATE, val);
8396 if (tg3_flag(tp, ENABLE_APE)) {
8397 /* Allow reads and writes to the
8398 * APE register and memory space.
8400 val = tr32(TG3PCI_PCISTATE);
8401 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8402 PCISTATE_ALLOW_APE_SHMEM_WR |
8403 PCISTATE_ALLOW_APE_PSPACE_WR;
8404 tw32(TG3PCI_PCISTATE, val);
8407 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8408 /* Enable some hw fixes. */
8409 val = tr32(TG3PCI_MSI_DATA);
8410 val |= (1 << 26) | (1 << 28) | (1 << 29);
8411 tw32(TG3PCI_MSI_DATA, val);
8414 /* Descriptor ring init may make accesses to the
8415 * NIC SRAM area to setup the TX descriptors, so we
8416 * can only do this after the hardware has been
8417 * successfully reset.
8419 err = tg3_init_rings(tp);
8420 if (err)
8421 return err;
8423 if (tg3_flag(tp, 57765_PLUS)) {
8424 val = tr32(TG3PCI_DMA_RW_CTRL) &
8425 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8426 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8427 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8428 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8429 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8430 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8431 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8432 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8433 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8434 /* This value is determined during the probe time DMA
8435 * engine test, tg3_test_dma.
8437 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8440 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8441 GRC_MODE_4X_NIC_SEND_RINGS |
8442 GRC_MODE_NO_TX_PHDR_CSUM |
8443 GRC_MODE_NO_RX_PHDR_CSUM);
8444 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8446 /* Pseudo-header checksum is done by hardware logic and not
8447 * the offload processers, so make the chip do the pseudo-
8448 * header checksums on receive. For transmit it is more
8449 * convenient to do the pseudo-header checksum in software
8450 * as Linux does that on transmit for us in all cases.
8452 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8454 tw32(GRC_MODE,
8455 tp->grc_mode |
8456 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8458 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8459 val = tr32(GRC_MISC_CFG);
8460 val &= ~0xff;
8461 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8462 tw32(GRC_MISC_CFG, val);
8464 /* Initialize MBUF/DESC pool. */
8465 if (tg3_flag(tp, 5750_PLUS)) {
8466 /* Do nothing. */
8467 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8468 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8470 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8471 else
8472 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8473 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8474 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8475 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8476 int fw_len;
8478 fw_len = tp->fw_len;
8479 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8480 tw32(BUFMGR_MB_POOL_ADDR,
8481 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8482 tw32(BUFMGR_MB_POOL_SIZE,
8483 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8486 if (tp->dev->mtu <= ETH_DATA_LEN) {
8487 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8488 tp->bufmgr_config.mbuf_read_dma_low_water);
8489 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8490 tp->bufmgr_config.mbuf_mac_rx_low_water);
8491 tw32(BUFMGR_MB_HIGH_WATER,
8492 tp->bufmgr_config.mbuf_high_water);
8493 } else {
8494 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8495 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8496 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8497 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8498 tw32(BUFMGR_MB_HIGH_WATER,
8499 tp->bufmgr_config.mbuf_high_water_jumbo);
8501 tw32(BUFMGR_DMA_LOW_WATER,
8502 tp->bufmgr_config.dma_low_water);
8503 tw32(BUFMGR_DMA_HIGH_WATER,
8504 tp->bufmgr_config.dma_high_water);
8506 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8508 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8510 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8511 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8512 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8513 tw32(BUFMGR_MODE, val);
8514 for (i = 0; i < 2000; i++) {
8515 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8516 break;
8517 udelay(10);
8519 if (i >= 2000) {
8520 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8521 return -ENODEV;
8524 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8525 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8527 tg3_setup_rxbd_thresholds(tp);
8529 /* Initialize TG3_BDINFO's at:
8530 * RCVDBDI_STD_BD: standard eth size rx ring
8531 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8532 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8534 * like so:
8535 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8536 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8537 * ring attribute flags
8538 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8540 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8541 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8543 * The size of each ring is fixed in the firmware, but the location is
8544 * configurable.
8546 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8547 ((u64) tpr->rx_std_mapping >> 32));
8548 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8549 ((u64) tpr->rx_std_mapping & 0xffffffff));
8550 if (!tg3_flag(tp, 5717_PLUS))
8551 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8552 NIC_SRAM_RX_BUFFER_DESC);
8554 /* Disable the mini ring */
8555 if (!tg3_flag(tp, 5705_PLUS))
8556 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8557 BDINFO_FLAGS_DISABLED);
8559 /* Program the jumbo buffer descriptor ring control
8560 * blocks on those devices that have them.
8562 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8563 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8565 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8566 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8567 ((u64) tpr->rx_jmb_mapping >> 32));
8568 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8569 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8570 val = TG3_RX_JMB_RING_SIZE(tp) <<
8571 BDINFO_FLAGS_MAXLEN_SHIFT;
8572 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8573 val | BDINFO_FLAGS_USE_EXT_RECV);
8574 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8575 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8576 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8577 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8578 } else {
8579 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8580 BDINFO_FLAGS_DISABLED);
8583 if (tg3_flag(tp, 57765_PLUS)) {
8584 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8585 val = TG3_RX_STD_MAX_SIZE_5700;
8586 else
8587 val = TG3_RX_STD_MAX_SIZE_5717;
8588 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8589 val |= (TG3_RX_STD_DMA_SZ << 2);
8590 } else
8591 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8592 } else
8593 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8595 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8597 tpr->rx_std_prod_idx = tp->rx_pending;
8598 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8600 tpr->rx_jmb_prod_idx =
8601 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8602 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8604 tg3_rings_reset(tp);
8606 /* Initialize MAC address and backoff seed. */
8607 __tg3_set_mac_addr(tp, 0);
8609 /* MTU + ethernet header + FCS + optional VLAN tag */
8610 tw32(MAC_RX_MTU_SIZE,
8611 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8613 /* The slot time is changed by tg3_setup_phy if we
8614 * run at gigabit with half duplex.
8616 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8617 (6 << TX_LENGTHS_IPG_SHIFT) |
8618 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8621 val |= tr32(MAC_TX_LENGTHS) &
8622 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8623 TX_LENGTHS_CNT_DWN_VAL_MSK);
8625 tw32(MAC_TX_LENGTHS, val);
8627 /* Receive rules. */
8628 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8629 tw32(RCVLPC_CONFIG, 0x0181);
8631 /* Calculate RDMAC_MODE setting early, we need it to determine
8632 * the RCVLPC_STATE_ENABLE mask.
8634 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8635 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8636 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8637 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8638 RDMAC_MODE_LNGREAD_ENAB);
8640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8641 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8643 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8644 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8645 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8646 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8647 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8648 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8650 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8651 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8652 if (tg3_flag(tp, TSO_CAPABLE) &&
8653 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8654 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8655 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8656 !tg3_flag(tp, IS_5788)) {
8657 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8661 if (tg3_flag(tp, PCI_EXPRESS))
8662 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8664 if (tg3_flag(tp, HW_TSO_1) ||
8665 tg3_flag(tp, HW_TSO_2) ||
8666 tg3_flag(tp, HW_TSO_3))
8667 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8669 if (tg3_flag(tp, 57765_PLUS) ||
8670 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8672 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8675 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8678 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8680 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8681 tg3_flag(tp, 57765_PLUS)) {
8682 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8684 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8685 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8686 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8687 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8688 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8689 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8690 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8692 tw32(TG3_RDMA_RSRVCTRL_REG,
8693 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8696 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8697 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8698 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8699 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8700 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8701 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8704 /* Receive/send statistics. */
8705 if (tg3_flag(tp, 5750_PLUS)) {
8706 val = tr32(RCVLPC_STATS_ENABLE);
8707 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8708 tw32(RCVLPC_STATS_ENABLE, val);
8709 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8710 tg3_flag(tp, TSO_CAPABLE)) {
8711 val = tr32(RCVLPC_STATS_ENABLE);
8712 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8713 tw32(RCVLPC_STATS_ENABLE, val);
8714 } else {
8715 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8717 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8718 tw32(SNDDATAI_STATSENAB, 0xffffff);
8719 tw32(SNDDATAI_STATSCTRL,
8720 (SNDDATAI_SCTRL_ENABLE |
8721 SNDDATAI_SCTRL_FASTUPD));
8723 /* Setup host coalescing engine. */
8724 tw32(HOSTCC_MODE, 0);
8725 for (i = 0; i < 2000; i++) {
8726 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8727 break;
8728 udelay(10);
8731 __tg3_set_coalesce(tp, &tp->coal);
8733 if (!tg3_flag(tp, 5705_PLUS)) {
8734 /* Status/statistics block address. See tg3_timer,
8735 * the tg3_periodic_fetch_stats call there, and
8736 * tg3_get_stats to see how this works for 5705/5750 chips.
8738 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8739 ((u64) tp->stats_mapping >> 32));
8740 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8741 ((u64) tp->stats_mapping & 0xffffffff));
8742 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8744 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8746 /* Clear statistics and status block memory areas */
8747 for (i = NIC_SRAM_STATS_BLK;
8748 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8749 i += sizeof(u32)) {
8750 tg3_write_mem(tp, i, 0);
8751 udelay(40);
8755 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8757 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8758 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8759 if (!tg3_flag(tp, 5705_PLUS))
8760 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8762 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8763 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8764 /* reset to prevent losing 1st rx packet intermittently */
8765 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8766 udelay(10);
8769 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8770 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8771 MAC_MODE_FHDE_ENABLE;
8772 if (tg3_flag(tp, ENABLE_APE))
8773 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8774 if (!tg3_flag(tp, 5705_PLUS) &&
8775 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8776 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8777 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8778 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8779 udelay(40);
8781 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8782 * If TG3_FLAG_IS_NIC is zero, we should read the
8783 * register to preserve the GPIO settings for LOMs. The GPIOs,
8784 * whether used as inputs or outputs, are set by boot code after
8785 * reset.
8787 if (!tg3_flag(tp, IS_NIC)) {
8788 u32 gpio_mask;
8790 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8791 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8792 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8795 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8796 GRC_LCLCTRL_GPIO_OUTPUT3;
8798 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8799 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8801 tp->grc_local_ctrl &= ~gpio_mask;
8802 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8804 /* GPIO1 must be driven high for eeprom write protect */
8805 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8806 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8807 GRC_LCLCTRL_GPIO_OUTPUT1);
8809 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8810 udelay(100);
8812 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8813 val = tr32(MSGINT_MODE);
8814 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8815 if (!tg3_flag(tp, 1SHOT_MSI))
8816 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8817 tw32(MSGINT_MODE, val);
8820 if (!tg3_flag(tp, 5705_PLUS)) {
8821 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8822 udelay(40);
8825 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8826 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8827 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8828 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8829 WDMAC_MODE_LNGREAD_ENAB);
8831 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8832 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8833 if (tg3_flag(tp, TSO_CAPABLE) &&
8834 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8835 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8836 /* nothing */
8837 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8838 !tg3_flag(tp, IS_5788)) {
8839 val |= WDMAC_MODE_RX_ACCEL;
8843 /* Enable host coalescing bug fix */
8844 if (tg3_flag(tp, 5755_PLUS))
8845 val |= WDMAC_MODE_STATUS_TAG_FIX;
8847 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8848 val |= WDMAC_MODE_BURST_ALL_DATA;
8850 tw32_f(WDMAC_MODE, val);
8851 udelay(40);
8853 if (tg3_flag(tp, PCIX_MODE)) {
8854 u16 pcix_cmd;
8856 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8857 &pcix_cmd);
8858 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8859 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8860 pcix_cmd |= PCI_X_CMD_READ_2K;
8861 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8862 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8863 pcix_cmd |= PCI_X_CMD_READ_2K;
8865 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8866 pcix_cmd);
8869 tw32_f(RDMAC_MODE, rdmac_mode);
8870 udelay(40);
8872 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8873 if (!tg3_flag(tp, 5705_PLUS))
8874 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8876 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8877 tw32(SNDDATAC_MODE,
8878 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8879 else
8880 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8882 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8883 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8884 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8885 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8886 val |= RCVDBDI_MODE_LRG_RING_SZ;
8887 tw32(RCVDBDI_MODE, val);
8888 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8889 if (tg3_flag(tp, HW_TSO_1) ||
8890 tg3_flag(tp, HW_TSO_2) ||
8891 tg3_flag(tp, HW_TSO_3))
8892 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8893 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8894 if (tg3_flag(tp, ENABLE_TSS))
8895 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8896 tw32(SNDBDI_MODE, val);
8897 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8899 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8900 err = tg3_load_5701_a0_firmware_fix(tp);
8901 if (err)
8902 return err;
8905 if (tg3_flag(tp, TSO_CAPABLE)) {
8906 err = tg3_load_tso_firmware(tp);
8907 if (err)
8908 return err;
8911 tp->tx_mode = TX_MODE_ENABLE;
8913 if (tg3_flag(tp, 5755_PLUS) ||
8914 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8915 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8918 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8919 tp->tx_mode &= ~val;
8920 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8923 tw32_f(MAC_TX_MODE, tp->tx_mode);
8924 udelay(100);
8926 if (tg3_flag(tp, ENABLE_RSS)) {
8927 int i = 0;
8928 u32 reg = MAC_RSS_INDIR_TBL_0;
8930 if (tp->irq_cnt == 2) {
8931 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8932 tw32(reg, 0x0);
8933 reg += 4;
8935 } else {
8936 u32 val;
8938 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8939 val = i % (tp->irq_cnt - 1);
8940 i++;
8941 for (; i % 8; i++) {
8942 val <<= 4;
8943 val |= (i % (tp->irq_cnt - 1));
8945 tw32(reg, val);
8946 reg += 4;
8950 /* Setup the "secret" hash key. */
8951 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8952 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8953 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8954 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8955 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8956 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8957 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8958 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8959 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8960 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8963 tp->rx_mode = RX_MODE_ENABLE;
8964 if (tg3_flag(tp, 5755_PLUS))
8965 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8967 if (tg3_flag(tp, ENABLE_RSS))
8968 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8969 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8970 RX_MODE_RSS_IPV6_HASH_EN |
8971 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8972 RX_MODE_RSS_IPV4_HASH_EN |
8973 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8975 tw32_f(MAC_RX_MODE, tp->rx_mode);
8976 udelay(10);
8978 tw32(MAC_LED_CTRL, tp->led_ctrl);
8980 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8981 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8982 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8983 udelay(10);
8985 tw32_f(MAC_RX_MODE, tp->rx_mode);
8986 udelay(10);
8988 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8989 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8990 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8991 /* Set drive transmission level to 1.2V */
8992 /* only if the signal pre-emphasis bit is not set */
8993 val = tr32(MAC_SERDES_CFG);
8994 val &= 0xfffff000;
8995 val |= 0x880;
8996 tw32(MAC_SERDES_CFG, val);
8998 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8999 tw32(MAC_SERDES_CFG, 0x616000);
9002 /* Prevent chip from dropping frames when flow control
9003 * is enabled.
9005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9006 val = 1;
9007 else
9008 val = 2;
9009 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9011 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9012 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9013 /* Use hardware link auto-negotiation */
9014 tg3_flag_set(tp, HW_AUTONEG);
9017 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9018 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9019 u32 tmp;
9021 tmp = tr32(SERDES_RX_CTRL);
9022 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9023 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9024 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9025 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9028 if (!tg3_flag(tp, USE_PHYLIB)) {
9029 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9030 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9031 tp->link_config.speed = tp->link_config.orig_speed;
9032 tp->link_config.duplex = tp->link_config.orig_duplex;
9033 tp->link_config.autoneg = tp->link_config.orig_autoneg;
9036 err = tg3_setup_phy(tp, 0);
9037 if (err)
9038 return err;
9040 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9041 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9042 u32 tmp;
9044 /* Clear CRC stats. */
9045 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9046 tg3_writephy(tp, MII_TG3_TEST1,
9047 tmp | MII_TG3_TEST1_CRC_EN);
9048 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9053 __tg3_set_rx_mode(tp->dev);
9055 /* Initialize receive rules. */
9056 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9057 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9058 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9059 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9061 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9062 limit = 8;
9063 else
9064 limit = 16;
9065 if (tg3_flag(tp, ENABLE_ASF))
9066 limit -= 4;
9067 switch (limit) {
9068 case 16:
9069 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9070 case 15:
9071 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9072 case 14:
9073 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9074 case 13:
9075 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9076 case 12:
9077 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9078 case 11:
9079 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9080 case 10:
9081 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9082 case 9:
9083 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9084 case 8:
9085 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9086 case 7:
9087 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9088 case 6:
9089 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9090 case 5:
9091 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9092 case 4:
9093 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9094 case 3:
9095 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9096 case 2:
9097 case 1:
9099 default:
9100 break;
9103 if (tg3_flag(tp, ENABLE_APE))
9104 /* Write our heartbeat update interval to APE. */
9105 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9106 APE_HOST_HEARTBEAT_INT_DISABLE);
9108 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9110 return 0;
9113 /* Called at device open time to get the chip ready for
9114 * packet processing. Invoked with tp->lock held.
9116 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9118 tg3_switch_clocks(tp);
9120 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9122 return tg3_reset_hw(tp, reset_phy);
9125 #define TG3_STAT_ADD32(PSTAT, REG) \
9126 do { u32 __val = tr32(REG); \
9127 (PSTAT)->low += __val; \
9128 if ((PSTAT)->low < __val) \
9129 (PSTAT)->high += 1; \
9130 } while (0)
9132 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9134 struct tg3_hw_stats *sp = tp->hw_stats;
9136 if (!netif_carrier_ok(tp->dev))
9137 return;
9139 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9140 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9141 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9142 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9143 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9144 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9145 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9146 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9147 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9148 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9149 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9150 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9151 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9153 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9154 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9155 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9156 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9157 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9158 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9159 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9160 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9161 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9162 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9163 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9164 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9165 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9166 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9168 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9169 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9170 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9171 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9172 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9173 } else {
9174 u32 val = tr32(HOSTCC_FLOW_ATTN);
9175 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9176 if (val) {
9177 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9178 sp->rx_discards.low += val;
9179 if (sp->rx_discards.low < val)
9180 sp->rx_discards.high += 1;
9182 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9184 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9187 static void tg3_chk_missed_msi(struct tg3 *tp)
9189 u32 i;
9191 for (i = 0; i < tp->irq_cnt; i++) {
9192 struct tg3_napi *tnapi = &tp->napi[i];
9194 if (tg3_has_work(tnapi)) {
9195 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9196 tnapi->last_tx_cons == tnapi->tx_cons) {
9197 if (tnapi->chk_msi_cnt < 1) {
9198 tnapi->chk_msi_cnt++;
9199 return;
9201 tg3_msi(0, tnapi);
9204 tnapi->chk_msi_cnt = 0;
9205 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9206 tnapi->last_tx_cons = tnapi->tx_cons;
9210 static void tg3_timer(unsigned long __opaque)
9212 struct tg3 *tp = (struct tg3 *) __opaque;
9214 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9215 goto restart_timer;
9217 spin_lock(&tp->lock);
9219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9221 tg3_chk_missed_msi(tp);
9223 if (!tg3_flag(tp, TAGGED_STATUS)) {
9224 /* All of this garbage is because when using non-tagged
9225 * IRQ status the mailbox/status_block protocol the chip
9226 * uses with the cpu is race prone.
9228 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9229 tw32(GRC_LOCAL_CTRL,
9230 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9231 } else {
9232 tw32(HOSTCC_MODE, tp->coalesce_mode |
9233 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9236 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9237 spin_unlock(&tp->lock);
9238 tg3_reset_task_schedule(tp);
9239 goto restart_timer;
9243 /* This part only runs once per second. */
9244 if (!--tp->timer_counter) {
9245 if (tg3_flag(tp, 5705_PLUS))
9246 tg3_periodic_fetch_stats(tp);
9248 if (tp->setlpicnt && !--tp->setlpicnt)
9249 tg3_phy_eee_enable(tp);
9251 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9252 u32 mac_stat;
9253 int phy_event;
9255 mac_stat = tr32(MAC_STATUS);
9257 phy_event = 0;
9258 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9259 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9260 phy_event = 1;
9261 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9262 phy_event = 1;
9264 if (phy_event)
9265 tg3_setup_phy(tp, 0);
9266 } else if (tg3_flag(tp, POLL_SERDES)) {
9267 u32 mac_stat = tr32(MAC_STATUS);
9268 int need_setup = 0;
9270 if (netif_carrier_ok(tp->dev) &&
9271 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9272 need_setup = 1;
9274 if (!netif_carrier_ok(tp->dev) &&
9275 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9276 MAC_STATUS_SIGNAL_DET))) {
9277 need_setup = 1;
9279 if (need_setup) {
9280 if (!tp->serdes_counter) {
9281 tw32_f(MAC_MODE,
9282 (tp->mac_mode &
9283 ~MAC_MODE_PORT_MODE_MASK));
9284 udelay(40);
9285 tw32_f(MAC_MODE, tp->mac_mode);
9286 udelay(40);
9288 tg3_setup_phy(tp, 0);
9290 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9291 tg3_flag(tp, 5780_CLASS)) {
9292 tg3_serdes_parallel_detect(tp);
9295 tp->timer_counter = tp->timer_multiplier;
9298 /* Heartbeat is only sent once every 2 seconds.
9300 * The heartbeat is to tell the ASF firmware that the host
9301 * driver is still alive. In the event that the OS crashes,
9302 * ASF needs to reset the hardware to free up the FIFO space
9303 * that may be filled with rx packets destined for the host.
9304 * If the FIFO is full, ASF will no longer function properly.
9306 * Unintended resets have been reported on real time kernels
9307 * where the timer doesn't run on time. Netpoll will also have
9308 * same problem.
9310 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9311 * to check the ring condition when the heartbeat is expiring
9312 * before doing the reset. This will prevent most unintended
9313 * resets.
9315 if (!--tp->asf_counter) {
9316 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9317 tg3_wait_for_event_ack(tp);
9319 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9320 FWCMD_NICDRV_ALIVE3);
9321 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9322 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9323 TG3_FW_UPDATE_TIMEOUT_SEC);
9325 tg3_generate_fw_event(tp);
9327 tp->asf_counter = tp->asf_multiplier;
9330 spin_unlock(&tp->lock);
9332 restart_timer:
9333 tp->timer.expires = jiffies + tp->timer_offset;
9334 add_timer(&tp->timer);
9337 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9339 irq_handler_t fn;
9340 unsigned long flags;
9341 char *name;
9342 struct tg3_napi *tnapi = &tp->napi[irq_num];
9344 if (tp->irq_cnt == 1)
9345 name = tp->dev->name;
9346 else {
9347 name = &tnapi->irq_lbl[0];
9348 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9349 name[IFNAMSIZ-1] = 0;
9352 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9353 fn = tg3_msi;
9354 if (tg3_flag(tp, 1SHOT_MSI))
9355 fn = tg3_msi_1shot;
9356 flags = 0;
9357 } else {
9358 fn = tg3_interrupt;
9359 if (tg3_flag(tp, TAGGED_STATUS))
9360 fn = tg3_interrupt_tagged;
9361 flags = IRQF_SHARED;
9364 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9367 static int tg3_test_interrupt(struct tg3 *tp)
9369 struct tg3_napi *tnapi = &tp->napi[0];
9370 struct net_device *dev = tp->dev;
9371 int err, i, intr_ok = 0;
9372 u32 val;
9374 if (!netif_running(dev))
9375 return -ENODEV;
9377 tg3_disable_ints(tp);
9379 free_irq(tnapi->irq_vec, tnapi);
9382 * Turn off MSI one shot mode. Otherwise this test has no
9383 * observable way to know whether the interrupt was delivered.
9385 if (tg3_flag(tp, 57765_PLUS)) {
9386 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9387 tw32(MSGINT_MODE, val);
9390 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9391 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9392 if (err)
9393 return err;
9395 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9396 tg3_enable_ints(tp);
9398 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9399 tnapi->coal_now);
9401 for (i = 0; i < 5; i++) {
9402 u32 int_mbox, misc_host_ctrl;
9404 int_mbox = tr32_mailbox(tnapi->int_mbox);
9405 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9407 if ((int_mbox != 0) ||
9408 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9409 intr_ok = 1;
9410 break;
9413 if (tg3_flag(tp, 57765_PLUS) &&
9414 tnapi->hw_status->status_tag != tnapi->last_tag)
9415 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9417 msleep(10);
9420 tg3_disable_ints(tp);
9422 free_irq(tnapi->irq_vec, tnapi);
9424 err = tg3_request_irq(tp, 0);
9426 if (err)
9427 return err;
9429 if (intr_ok) {
9430 /* Reenable MSI one shot mode. */
9431 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9432 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9433 tw32(MSGINT_MODE, val);
9435 return 0;
9438 return -EIO;
9441 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9442 * successfully restored
9444 static int tg3_test_msi(struct tg3 *tp)
9446 int err;
9447 u16 pci_cmd;
9449 if (!tg3_flag(tp, USING_MSI))
9450 return 0;
9452 /* Turn off SERR reporting in case MSI terminates with Master
9453 * Abort.
9455 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9456 pci_write_config_word(tp->pdev, PCI_COMMAND,
9457 pci_cmd & ~PCI_COMMAND_SERR);
9459 err = tg3_test_interrupt(tp);
9461 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9463 if (!err)
9464 return 0;
9466 /* other failures */
9467 if (err != -EIO)
9468 return err;
9470 /* MSI test failed, go back to INTx mode */
9471 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9472 "to INTx mode. Please report this failure to the PCI "
9473 "maintainer and include system chipset information\n");
9475 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9477 pci_disable_msi(tp->pdev);
9479 tg3_flag_clear(tp, USING_MSI);
9480 tp->napi[0].irq_vec = tp->pdev->irq;
9482 err = tg3_request_irq(tp, 0);
9483 if (err)
9484 return err;
9486 /* Need to reset the chip because the MSI cycle may have terminated
9487 * with Master Abort.
9489 tg3_full_lock(tp, 1);
9491 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9492 err = tg3_init_hw(tp, 1);
9494 tg3_full_unlock(tp);
9496 if (err)
9497 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9499 return err;
9502 static int tg3_request_firmware(struct tg3 *tp)
9504 const __be32 *fw_data;
9506 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9507 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9508 tp->fw_needed);
9509 return -ENOENT;
9512 fw_data = (void *)tp->fw->data;
9514 /* Firmware blob starts with version numbers, followed by
9515 * start address and _full_ length including BSS sections
9516 * (which must be longer than the actual data, of course
9519 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9520 if (tp->fw_len < (tp->fw->size - 12)) {
9521 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9522 tp->fw_len, tp->fw_needed);
9523 release_firmware(tp->fw);
9524 tp->fw = NULL;
9525 return -EINVAL;
9528 /* We no longer need firmware; we have it. */
9529 tp->fw_needed = NULL;
9530 return 0;
9533 static bool tg3_enable_msix(struct tg3 *tp)
9535 int i, rc, cpus = num_online_cpus();
9536 struct msix_entry msix_ent[tp->irq_max];
9538 if (cpus == 1)
9539 /* Just fallback to the simpler MSI mode. */
9540 return false;
9543 * We want as many rx rings enabled as there are cpus.
9544 * The first MSIX vector only deals with link interrupts, etc,
9545 * so we add one to the number of vectors we are requesting.
9547 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9549 for (i = 0; i < tp->irq_max; i++) {
9550 msix_ent[i].entry = i;
9551 msix_ent[i].vector = 0;
9554 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9555 if (rc < 0) {
9556 return false;
9557 } else if (rc != 0) {
9558 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9559 return false;
9560 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9561 tp->irq_cnt, rc);
9562 tp->irq_cnt = rc;
9565 for (i = 0; i < tp->irq_max; i++)
9566 tp->napi[i].irq_vec = msix_ent[i].vector;
9568 netif_set_real_num_tx_queues(tp->dev, 1);
9569 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9570 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9571 pci_disable_msix(tp->pdev);
9572 return false;
9575 if (tp->irq_cnt > 1) {
9576 tg3_flag_set(tp, ENABLE_RSS);
9578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9580 tg3_flag_set(tp, ENABLE_TSS);
9581 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9585 return true;
9588 static void tg3_ints_init(struct tg3 *tp)
9590 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9591 !tg3_flag(tp, TAGGED_STATUS)) {
9592 /* All MSI supporting chips should support tagged
9593 * status. Assert that this is the case.
9595 netdev_warn(tp->dev,
9596 "MSI without TAGGED_STATUS? Not using MSI\n");
9597 goto defcfg;
9600 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9601 tg3_flag_set(tp, USING_MSIX);
9602 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9603 tg3_flag_set(tp, USING_MSI);
9605 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9606 u32 msi_mode = tr32(MSGINT_MODE);
9607 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9608 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9609 if (!tg3_flag(tp, 1SHOT_MSI))
9610 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9611 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9613 defcfg:
9614 if (!tg3_flag(tp, USING_MSIX)) {
9615 tp->irq_cnt = 1;
9616 tp->napi[0].irq_vec = tp->pdev->irq;
9617 netif_set_real_num_tx_queues(tp->dev, 1);
9618 netif_set_real_num_rx_queues(tp->dev, 1);
9622 static void tg3_ints_fini(struct tg3 *tp)
9624 if (tg3_flag(tp, USING_MSIX))
9625 pci_disable_msix(tp->pdev);
9626 else if (tg3_flag(tp, USING_MSI))
9627 pci_disable_msi(tp->pdev);
9628 tg3_flag_clear(tp, USING_MSI);
9629 tg3_flag_clear(tp, USING_MSIX);
9630 tg3_flag_clear(tp, ENABLE_RSS);
9631 tg3_flag_clear(tp, ENABLE_TSS);
9634 static int tg3_open(struct net_device *dev)
9636 struct tg3 *tp = netdev_priv(dev);
9637 int i, err;
9639 if (tp->fw_needed) {
9640 err = tg3_request_firmware(tp);
9641 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9642 if (err)
9643 return err;
9644 } else if (err) {
9645 netdev_warn(tp->dev, "TSO capability disabled\n");
9646 tg3_flag_clear(tp, TSO_CAPABLE);
9647 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9648 netdev_notice(tp->dev, "TSO capability restored\n");
9649 tg3_flag_set(tp, TSO_CAPABLE);
9653 netif_carrier_off(tp->dev);
9655 err = tg3_power_up(tp);
9656 if (err)
9657 return err;
9659 tg3_full_lock(tp, 0);
9661 tg3_disable_ints(tp);
9662 tg3_flag_clear(tp, INIT_COMPLETE);
9664 tg3_full_unlock(tp);
9667 * Setup interrupts first so we know how
9668 * many NAPI resources to allocate
9670 tg3_ints_init(tp);
9672 /* The placement of this call is tied
9673 * to the setup and use of Host TX descriptors.
9675 err = tg3_alloc_consistent(tp);
9676 if (err)
9677 goto err_out1;
9679 tg3_napi_init(tp);
9681 tg3_napi_enable(tp);
9683 for (i = 0; i < tp->irq_cnt; i++) {
9684 struct tg3_napi *tnapi = &tp->napi[i];
9685 err = tg3_request_irq(tp, i);
9686 if (err) {
9687 for (i--; i >= 0; i--) {
9688 tnapi = &tp->napi[i];
9689 free_irq(tnapi->irq_vec, tnapi);
9691 goto err_out2;
9695 tg3_full_lock(tp, 0);
9697 err = tg3_init_hw(tp, 1);
9698 if (err) {
9699 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9700 tg3_free_rings(tp);
9701 } else {
9702 if (tg3_flag(tp, TAGGED_STATUS) &&
9703 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9704 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9705 tp->timer_offset = HZ;
9706 else
9707 tp->timer_offset = HZ / 10;
9709 BUG_ON(tp->timer_offset > HZ);
9710 tp->timer_counter = tp->timer_multiplier =
9711 (HZ / tp->timer_offset);
9712 tp->asf_counter = tp->asf_multiplier =
9713 ((HZ / tp->timer_offset) * 2);
9715 init_timer(&tp->timer);
9716 tp->timer.expires = jiffies + tp->timer_offset;
9717 tp->timer.data = (unsigned long) tp;
9718 tp->timer.function = tg3_timer;
9721 tg3_full_unlock(tp);
9723 if (err)
9724 goto err_out3;
9726 if (tg3_flag(tp, USING_MSI)) {
9727 err = tg3_test_msi(tp);
9729 if (err) {
9730 tg3_full_lock(tp, 0);
9731 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9732 tg3_free_rings(tp);
9733 tg3_full_unlock(tp);
9735 goto err_out2;
9738 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9739 u32 val = tr32(PCIE_TRANSACTION_CFG);
9741 tw32(PCIE_TRANSACTION_CFG,
9742 val | PCIE_TRANS_CFG_1SHOT_MSI);
9746 tg3_phy_start(tp);
9748 tg3_full_lock(tp, 0);
9750 add_timer(&tp->timer);
9751 tg3_flag_set(tp, INIT_COMPLETE);
9752 tg3_enable_ints(tp);
9754 tg3_full_unlock(tp);
9756 netif_tx_start_all_queues(dev);
9759 * Reset loopback feature if it was turned on while the device was down
9760 * make sure that it's installed properly now.
9762 if (dev->features & NETIF_F_LOOPBACK)
9763 tg3_set_loopback(dev, dev->features);
9765 return 0;
9767 err_out3:
9768 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9769 struct tg3_napi *tnapi = &tp->napi[i];
9770 free_irq(tnapi->irq_vec, tnapi);
9773 err_out2:
9774 tg3_napi_disable(tp);
9775 tg3_napi_fini(tp);
9776 tg3_free_consistent(tp);
9778 err_out1:
9779 tg3_ints_fini(tp);
9780 tg3_frob_aux_power(tp, false);
9781 pci_set_power_state(tp->pdev, PCI_D3hot);
9782 return err;
9785 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9786 struct rtnl_link_stats64 *);
9787 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9789 static int tg3_close(struct net_device *dev)
9791 int i;
9792 struct tg3 *tp = netdev_priv(dev);
9794 tg3_napi_disable(tp);
9795 tg3_reset_task_cancel(tp);
9797 netif_tx_stop_all_queues(dev);
9799 del_timer_sync(&tp->timer);
9801 tg3_phy_stop(tp);
9803 tg3_full_lock(tp, 1);
9805 tg3_disable_ints(tp);
9807 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9808 tg3_free_rings(tp);
9809 tg3_flag_clear(tp, INIT_COMPLETE);
9811 tg3_full_unlock(tp);
9813 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9814 struct tg3_napi *tnapi = &tp->napi[i];
9815 free_irq(tnapi->irq_vec, tnapi);
9818 tg3_ints_fini(tp);
9820 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9822 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9823 sizeof(tp->estats_prev));
9825 tg3_napi_fini(tp);
9827 tg3_free_consistent(tp);
9829 tg3_power_down(tp);
9831 netif_carrier_off(tp->dev);
9833 return 0;
9836 static inline u64 get_stat64(tg3_stat64_t *val)
9838 return ((u64)val->high << 32) | ((u64)val->low);
9841 static u64 calc_crc_errors(struct tg3 *tp)
9843 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9845 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9846 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9848 u32 val;
9850 spin_lock_bh(&tp->lock);
9851 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9852 tg3_writephy(tp, MII_TG3_TEST1,
9853 val | MII_TG3_TEST1_CRC_EN);
9854 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9855 } else
9856 val = 0;
9857 spin_unlock_bh(&tp->lock);
9859 tp->phy_crc_errors += val;
9861 return tp->phy_crc_errors;
9864 return get_stat64(&hw_stats->rx_fcs_errors);
9867 #define ESTAT_ADD(member) \
9868 estats->member = old_estats->member + \
9869 get_stat64(&hw_stats->member)
9871 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9873 struct tg3_ethtool_stats *estats = &tp->estats;
9874 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9875 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9877 if (!hw_stats)
9878 return old_estats;
9880 ESTAT_ADD(rx_octets);
9881 ESTAT_ADD(rx_fragments);
9882 ESTAT_ADD(rx_ucast_packets);
9883 ESTAT_ADD(rx_mcast_packets);
9884 ESTAT_ADD(rx_bcast_packets);
9885 ESTAT_ADD(rx_fcs_errors);
9886 ESTAT_ADD(rx_align_errors);
9887 ESTAT_ADD(rx_xon_pause_rcvd);
9888 ESTAT_ADD(rx_xoff_pause_rcvd);
9889 ESTAT_ADD(rx_mac_ctrl_rcvd);
9890 ESTAT_ADD(rx_xoff_entered);
9891 ESTAT_ADD(rx_frame_too_long_errors);
9892 ESTAT_ADD(rx_jabbers);
9893 ESTAT_ADD(rx_undersize_packets);
9894 ESTAT_ADD(rx_in_length_errors);
9895 ESTAT_ADD(rx_out_length_errors);
9896 ESTAT_ADD(rx_64_or_less_octet_packets);
9897 ESTAT_ADD(rx_65_to_127_octet_packets);
9898 ESTAT_ADD(rx_128_to_255_octet_packets);
9899 ESTAT_ADD(rx_256_to_511_octet_packets);
9900 ESTAT_ADD(rx_512_to_1023_octet_packets);
9901 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9902 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9903 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9904 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9905 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9907 ESTAT_ADD(tx_octets);
9908 ESTAT_ADD(tx_collisions);
9909 ESTAT_ADD(tx_xon_sent);
9910 ESTAT_ADD(tx_xoff_sent);
9911 ESTAT_ADD(tx_flow_control);
9912 ESTAT_ADD(tx_mac_errors);
9913 ESTAT_ADD(tx_single_collisions);
9914 ESTAT_ADD(tx_mult_collisions);
9915 ESTAT_ADD(tx_deferred);
9916 ESTAT_ADD(tx_excessive_collisions);
9917 ESTAT_ADD(tx_late_collisions);
9918 ESTAT_ADD(tx_collide_2times);
9919 ESTAT_ADD(tx_collide_3times);
9920 ESTAT_ADD(tx_collide_4times);
9921 ESTAT_ADD(tx_collide_5times);
9922 ESTAT_ADD(tx_collide_6times);
9923 ESTAT_ADD(tx_collide_7times);
9924 ESTAT_ADD(tx_collide_8times);
9925 ESTAT_ADD(tx_collide_9times);
9926 ESTAT_ADD(tx_collide_10times);
9927 ESTAT_ADD(tx_collide_11times);
9928 ESTAT_ADD(tx_collide_12times);
9929 ESTAT_ADD(tx_collide_13times);
9930 ESTAT_ADD(tx_collide_14times);
9931 ESTAT_ADD(tx_collide_15times);
9932 ESTAT_ADD(tx_ucast_packets);
9933 ESTAT_ADD(tx_mcast_packets);
9934 ESTAT_ADD(tx_bcast_packets);
9935 ESTAT_ADD(tx_carrier_sense_errors);
9936 ESTAT_ADD(tx_discards);
9937 ESTAT_ADD(tx_errors);
9939 ESTAT_ADD(dma_writeq_full);
9940 ESTAT_ADD(dma_write_prioq_full);
9941 ESTAT_ADD(rxbds_empty);
9942 ESTAT_ADD(rx_discards);
9943 ESTAT_ADD(rx_errors);
9944 ESTAT_ADD(rx_threshold_hit);
9946 ESTAT_ADD(dma_readq_full);
9947 ESTAT_ADD(dma_read_prioq_full);
9948 ESTAT_ADD(tx_comp_queue_full);
9950 ESTAT_ADD(ring_set_send_prod_index);
9951 ESTAT_ADD(ring_status_update);
9952 ESTAT_ADD(nic_irqs);
9953 ESTAT_ADD(nic_avoided_irqs);
9954 ESTAT_ADD(nic_tx_threshold_hit);
9956 ESTAT_ADD(mbuf_lwm_thresh_hit);
9958 return estats;
9961 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9962 struct rtnl_link_stats64 *stats)
9964 struct tg3 *tp = netdev_priv(dev);
9965 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9966 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9968 if (!hw_stats)
9969 return old_stats;
9971 stats->rx_packets = old_stats->rx_packets +
9972 get_stat64(&hw_stats->rx_ucast_packets) +
9973 get_stat64(&hw_stats->rx_mcast_packets) +
9974 get_stat64(&hw_stats->rx_bcast_packets);
9976 stats->tx_packets = old_stats->tx_packets +
9977 get_stat64(&hw_stats->tx_ucast_packets) +
9978 get_stat64(&hw_stats->tx_mcast_packets) +
9979 get_stat64(&hw_stats->tx_bcast_packets);
9981 stats->rx_bytes = old_stats->rx_bytes +
9982 get_stat64(&hw_stats->rx_octets);
9983 stats->tx_bytes = old_stats->tx_bytes +
9984 get_stat64(&hw_stats->tx_octets);
9986 stats->rx_errors = old_stats->rx_errors +
9987 get_stat64(&hw_stats->rx_errors);
9988 stats->tx_errors = old_stats->tx_errors +
9989 get_stat64(&hw_stats->tx_errors) +
9990 get_stat64(&hw_stats->tx_mac_errors) +
9991 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9992 get_stat64(&hw_stats->tx_discards);
9994 stats->multicast = old_stats->multicast +
9995 get_stat64(&hw_stats->rx_mcast_packets);
9996 stats->collisions = old_stats->collisions +
9997 get_stat64(&hw_stats->tx_collisions);
9999 stats->rx_length_errors = old_stats->rx_length_errors +
10000 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10001 get_stat64(&hw_stats->rx_undersize_packets);
10003 stats->rx_over_errors = old_stats->rx_over_errors +
10004 get_stat64(&hw_stats->rxbds_empty);
10005 stats->rx_frame_errors = old_stats->rx_frame_errors +
10006 get_stat64(&hw_stats->rx_align_errors);
10007 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10008 get_stat64(&hw_stats->tx_discards);
10009 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10010 get_stat64(&hw_stats->tx_carrier_sense_errors);
10012 stats->rx_crc_errors = old_stats->rx_crc_errors +
10013 calc_crc_errors(tp);
10015 stats->rx_missed_errors = old_stats->rx_missed_errors +
10016 get_stat64(&hw_stats->rx_discards);
10018 stats->rx_dropped = tp->rx_dropped;
10019 stats->tx_dropped = tp->tx_dropped;
10021 return stats;
10024 static inline u32 calc_crc(unsigned char *buf, int len)
10026 u32 reg;
10027 u32 tmp;
10028 int j, k;
10030 reg = 0xffffffff;
10032 for (j = 0; j < len; j++) {
10033 reg ^= buf[j];
10035 for (k = 0; k < 8; k++) {
10036 tmp = reg & 0x01;
10038 reg >>= 1;
10040 if (tmp)
10041 reg ^= 0xedb88320;
10045 return ~reg;
10048 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10050 /* accept or reject all multicast frames */
10051 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10052 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10053 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10054 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10057 static void __tg3_set_rx_mode(struct net_device *dev)
10059 struct tg3 *tp = netdev_priv(dev);
10060 u32 rx_mode;
10062 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10063 RX_MODE_KEEP_VLAN_TAG);
10065 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10066 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10067 * flag clear.
10069 if (!tg3_flag(tp, ENABLE_ASF))
10070 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10071 #endif
10073 if (dev->flags & IFF_PROMISC) {
10074 /* Promiscuous mode. */
10075 rx_mode |= RX_MODE_PROMISC;
10076 } else if (dev->flags & IFF_ALLMULTI) {
10077 /* Accept all multicast. */
10078 tg3_set_multi(tp, 1);
10079 } else if (netdev_mc_empty(dev)) {
10080 /* Reject all multicast. */
10081 tg3_set_multi(tp, 0);
10082 } else {
10083 /* Accept one or more multicast(s). */
10084 struct netdev_hw_addr *ha;
10085 u32 mc_filter[4] = { 0, };
10086 u32 regidx;
10087 u32 bit;
10088 u32 crc;
10090 netdev_for_each_mc_addr(ha, dev) {
10091 crc = calc_crc(ha->addr, ETH_ALEN);
10092 bit = ~crc & 0x7f;
10093 regidx = (bit & 0x60) >> 5;
10094 bit &= 0x1f;
10095 mc_filter[regidx] |= (1 << bit);
10098 tw32(MAC_HASH_REG_0, mc_filter[0]);
10099 tw32(MAC_HASH_REG_1, mc_filter[1]);
10100 tw32(MAC_HASH_REG_2, mc_filter[2]);
10101 tw32(MAC_HASH_REG_3, mc_filter[3]);
10104 if (rx_mode != tp->rx_mode) {
10105 tp->rx_mode = rx_mode;
10106 tw32_f(MAC_RX_MODE, rx_mode);
10107 udelay(10);
10111 static void tg3_set_rx_mode(struct net_device *dev)
10113 struct tg3 *tp = netdev_priv(dev);
10115 if (!netif_running(dev))
10116 return;
10118 tg3_full_lock(tp, 0);
10119 __tg3_set_rx_mode(dev);
10120 tg3_full_unlock(tp);
10123 static int tg3_get_regs_len(struct net_device *dev)
10125 return TG3_REG_BLK_SIZE;
10128 static void tg3_get_regs(struct net_device *dev,
10129 struct ethtool_regs *regs, void *_p)
10131 struct tg3 *tp = netdev_priv(dev);
10133 regs->version = 0;
10135 memset(_p, 0, TG3_REG_BLK_SIZE);
10137 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10138 return;
10140 tg3_full_lock(tp, 0);
10142 tg3_dump_legacy_regs(tp, (u32 *)_p);
10144 tg3_full_unlock(tp);
10147 static int tg3_get_eeprom_len(struct net_device *dev)
10149 struct tg3 *tp = netdev_priv(dev);
10151 return tp->nvram_size;
10154 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10156 struct tg3 *tp = netdev_priv(dev);
10157 int ret;
10158 u8 *pd;
10159 u32 i, offset, len, b_offset, b_count;
10160 __be32 val;
10162 if (tg3_flag(tp, NO_NVRAM))
10163 return -EINVAL;
10165 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10166 return -EAGAIN;
10168 offset = eeprom->offset;
10169 len = eeprom->len;
10170 eeprom->len = 0;
10172 eeprom->magic = TG3_EEPROM_MAGIC;
10174 if (offset & 3) {
10175 /* adjustments to start on required 4 byte boundary */
10176 b_offset = offset & 3;
10177 b_count = 4 - b_offset;
10178 if (b_count > len) {
10179 /* i.e. offset=1 len=2 */
10180 b_count = len;
10182 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10183 if (ret)
10184 return ret;
10185 memcpy(data, ((char *)&val) + b_offset, b_count);
10186 len -= b_count;
10187 offset += b_count;
10188 eeprom->len += b_count;
10191 /* read bytes up to the last 4 byte boundary */
10192 pd = &data[eeprom->len];
10193 for (i = 0; i < (len - (len & 3)); i += 4) {
10194 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10195 if (ret) {
10196 eeprom->len += i;
10197 return ret;
10199 memcpy(pd + i, &val, 4);
10201 eeprom->len += i;
10203 if (len & 3) {
10204 /* read last bytes not ending on 4 byte boundary */
10205 pd = &data[eeprom->len];
10206 b_count = len & 3;
10207 b_offset = offset + len - b_count;
10208 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10209 if (ret)
10210 return ret;
10211 memcpy(pd, &val, b_count);
10212 eeprom->len += b_count;
10214 return 0;
10217 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10219 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10221 struct tg3 *tp = netdev_priv(dev);
10222 int ret;
10223 u32 offset, len, b_offset, odd_len;
10224 u8 *buf;
10225 __be32 start, end;
10227 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10228 return -EAGAIN;
10230 if (tg3_flag(tp, NO_NVRAM) ||
10231 eeprom->magic != TG3_EEPROM_MAGIC)
10232 return -EINVAL;
10234 offset = eeprom->offset;
10235 len = eeprom->len;
10237 if ((b_offset = (offset & 3))) {
10238 /* adjustments to start on required 4 byte boundary */
10239 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10240 if (ret)
10241 return ret;
10242 len += b_offset;
10243 offset &= ~3;
10244 if (len < 4)
10245 len = 4;
10248 odd_len = 0;
10249 if (len & 3) {
10250 /* adjustments to end on required 4 byte boundary */
10251 odd_len = 1;
10252 len = (len + 3) & ~3;
10253 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10254 if (ret)
10255 return ret;
10258 buf = data;
10259 if (b_offset || odd_len) {
10260 buf = kmalloc(len, GFP_KERNEL);
10261 if (!buf)
10262 return -ENOMEM;
10263 if (b_offset)
10264 memcpy(buf, &start, 4);
10265 if (odd_len)
10266 memcpy(buf+len-4, &end, 4);
10267 memcpy(buf + b_offset, data, eeprom->len);
10270 ret = tg3_nvram_write_block(tp, offset, len, buf);
10272 if (buf != data)
10273 kfree(buf);
10275 return ret;
10278 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10280 struct tg3 *tp = netdev_priv(dev);
10282 if (tg3_flag(tp, USE_PHYLIB)) {
10283 struct phy_device *phydev;
10284 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10285 return -EAGAIN;
10286 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10287 return phy_ethtool_gset(phydev, cmd);
10290 cmd->supported = (SUPPORTED_Autoneg);
10292 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10293 cmd->supported |= (SUPPORTED_1000baseT_Half |
10294 SUPPORTED_1000baseT_Full);
10296 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10297 cmd->supported |= (SUPPORTED_100baseT_Half |
10298 SUPPORTED_100baseT_Full |
10299 SUPPORTED_10baseT_Half |
10300 SUPPORTED_10baseT_Full |
10301 SUPPORTED_TP);
10302 cmd->port = PORT_TP;
10303 } else {
10304 cmd->supported |= SUPPORTED_FIBRE;
10305 cmd->port = PORT_FIBRE;
10308 cmd->advertising = tp->link_config.advertising;
10309 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10310 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10311 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10312 cmd->advertising |= ADVERTISED_Pause;
10313 } else {
10314 cmd->advertising |= ADVERTISED_Pause |
10315 ADVERTISED_Asym_Pause;
10317 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10318 cmd->advertising |= ADVERTISED_Asym_Pause;
10321 if (netif_running(dev)) {
10322 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10323 cmd->duplex = tp->link_config.active_duplex;
10324 } else {
10325 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10326 cmd->duplex = DUPLEX_INVALID;
10328 cmd->phy_address = tp->phy_addr;
10329 cmd->transceiver = XCVR_INTERNAL;
10330 cmd->autoneg = tp->link_config.autoneg;
10331 cmd->maxtxpkt = 0;
10332 cmd->maxrxpkt = 0;
10333 return 0;
10336 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10338 struct tg3 *tp = netdev_priv(dev);
10339 u32 speed = ethtool_cmd_speed(cmd);
10341 if (tg3_flag(tp, USE_PHYLIB)) {
10342 struct phy_device *phydev;
10343 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10344 return -EAGAIN;
10345 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10346 return phy_ethtool_sset(phydev, cmd);
10349 if (cmd->autoneg != AUTONEG_ENABLE &&
10350 cmd->autoneg != AUTONEG_DISABLE)
10351 return -EINVAL;
10353 if (cmd->autoneg == AUTONEG_DISABLE &&
10354 cmd->duplex != DUPLEX_FULL &&
10355 cmd->duplex != DUPLEX_HALF)
10356 return -EINVAL;
10358 if (cmd->autoneg == AUTONEG_ENABLE) {
10359 u32 mask = ADVERTISED_Autoneg |
10360 ADVERTISED_Pause |
10361 ADVERTISED_Asym_Pause;
10363 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10364 mask |= ADVERTISED_1000baseT_Half |
10365 ADVERTISED_1000baseT_Full;
10367 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10368 mask |= ADVERTISED_100baseT_Half |
10369 ADVERTISED_100baseT_Full |
10370 ADVERTISED_10baseT_Half |
10371 ADVERTISED_10baseT_Full |
10372 ADVERTISED_TP;
10373 else
10374 mask |= ADVERTISED_FIBRE;
10376 if (cmd->advertising & ~mask)
10377 return -EINVAL;
10379 mask &= (ADVERTISED_1000baseT_Half |
10380 ADVERTISED_1000baseT_Full |
10381 ADVERTISED_100baseT_Half |
10382 ADVERTISED_100baseT_Full |
10383 ADVERTISED_10baseT_Half |
10384 ADVERTISED_10baseT_Full);
10386 cmd->advertising &= mask;
10387 } else {
10388 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10389 if (speed != SPEED_1000)
10390 return -EINVAL;
10392 if (cmd->duplex != DUPLEX_FULL)
10393 return -EINVAL;
10394 } else {
10395 if (speed != SPEED_100 &&
10396 speed != SPEED_10)
10397 return -EINVAL;
10401 tg3_full_lock(tp, 0);
10403 tp->link_config.autoneg = cmd->autoneg;
10404 if (cmd->autoneg == AUTONEG_ENABLE) {
10405 tp->link_config.advertising = (cmd->advertising |
10406 ADVERTISED_Autoneg);
10407 tp->link_config.speed = SPEED_INVALID;
10408 tp->link_config.duplex = DUPLEX_INVALID;
10409 } else {
10410 tp->link_config.advertising = 0;
10411 tp->link_config.speed = speed;
10412 tp->link_config.duplex = cmd->duplex;
10415 tp->link_config.orig_speed = tp->link_config.speed;
10416 tp->link_config.orig_duplex = tp->link_config.duplex;
10417 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10419 if (netif_running(dev))
10420 tg3_setup_phy(tp, 1);
10422 tg3_full_unlock(tp);
10424 return 0;
10427 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10429 struct tg3 *tp = netdev_priv(dev);
10431 strcpy(info->driver, DRV_MODULE_NAME);
10432 strcpy(info->version, DRV_MODULE_VERSION);
10433 strcpy(info->fw_version, tp->fw_ver);
10434 strcpy(info->bus_info, pci_name(tp->pdev));
10437 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10439 struct tg3 *tp = netdev_priv(dev);
10441 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10442 wol->supported = WAKE_MAGIC;
10443 else
10444 wol->supported = 0;
10445 wol->wolopts = 0;
10446 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10447 wol->wolopts = WAKE_MAGIC;
10448 memset(&wol->sopass, 0, sizeof(wol->sopass));
10451 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10453 struct tg3 *tp = netdev_priv(dev);
10454 struct device *dp = &tp->pdev->dev;
10456 if (wol->wolopts & ~WAKE_MAGIC)
10457 return -EINVAL;
10458 if ((wol->wolopts & WAKE_MAGIC) &&
10459 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10460 return -EINVAL;
10462 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10464 spin_lock_bh(&tp->lock);
10465 if (device_may_wakeup(dp))
10466 tg3_flag_set(tp, WOL_ENABLE);
10467 else
10468 tg3_flag_clear(tp, WOL_ENABLE);
10469 spin_unlock_bh(&tp->lock);
10471 return 0;
10474 static u32 tg3_get_msglevel(struct net_device *dev)
10476 struct tg3 *tp = netdev_priv(dev);
10477 return tp->msg_enable;
10480 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10482 struct tg3 *tp = netdev_priv(dev);
10483 tp->msg_enable = value;
10486 static int tg3_nway_reset(struct net_device *dev)
10488 struct tg3 *tp = netdev_priv(dev);
10489 int r;
10491 if (!netif_running(dev))
10492 return -EAGAIN;
10494 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10495 return -EINVAL;
10497 if (tg3_flag(tp, USE_PHYLIB)) {
10498 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10499 return -EAGAIN;
10500 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10501 } else {
10502 u32 bmcr;
10504 spin_lock_bh(&tp->lock);
10505 r = -EINVAL;
10506 tg3_readphy(tp, MII_BMCR, &bmcr);
10507 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10508 ((bmcr & BMCR_ANENABLE) ||
10509 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10510 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10511 BMCR_ANENABLE);
10512 r = 0;
10514 spin_unlock_bh(&tp->lock);
10517 return r;
10520 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10522 struct tg3 *tp = netdev_priv(dev);
10524 ering->rx_max_pending = tp->rx_std_ring_mask;
10525 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10526 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10527 else
10528 ering->rx_jumbo_max_pending = 0;
10530 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10532 ering->rx_pending = tp->rx_pending;
10533 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10534 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10535 else
10536 ering->rx_jumbo_pending = 0;
10538 ering->tx_pending = tp->napi[0].tx_pending;
10541 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10543 struct tg3 *tp = netdev_priv(dev);
10544 int i, irq_sync = 0, err = 0;
10546 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10547 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10548 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10549 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10550 (tg3_flag(tp, TSO_BUG) &&
10551 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10552 return -EINVAL;
10554 if (netif_running(dev)) {
10555 tg3_phy_stop(tp);
10556 tg3_netif_stop(tp);
10557 irq_sync = 1;
10560 tg3_full_lock(tp, irq_sync);
10562 tp->rx_pending = ering->rx_pending;
10564 if (tg3_flag(tp, MAX_RXPEND_64) &&
10565 tp->rx_pending > 63)
10566 tp->rx_pending = 63;
10567 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10569 for (i = 0; i < tp->irq_max; i++)
10570 tp->napi[i].tx_pending = ering->tx_pending;
10572 if (netif_running(dev)) {
10573 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10574 err = tg3_restart_hw(tp, 1);
10575 if (!err)
10576 tg3_netif_start(tp);
10579 tg3_full_unlock(tp);
10581 if (irq_sync && !err)
10582 tg3_phy_start(tp);
10584 return err;
10587 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10589 struct tg3 *tp = netdev_priv(dev);
10591 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10593 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10594 epause->rx_pause = 1;
10595 else
10596 epause->rx_pause = 0;
10598 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10599 epause->tx_pause = 1;
10600 else
10601 epause->tx_pause = 0;
10604 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10606 struct tg3 *tp = netdev_priv(dev);
10607 int err = 0;
10609 if (tg3_flag(tp, USE_PHYLIB)) {
10610 u32 newadv;
10611 struct phy_device *phydev;
10613 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10615 if (!(phydev->supported & SUPPORTED_Pause) ||
10616 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10617 (epause->rx_pause != epause->tx_pause)))
10618 return -EINVAL;
10620 tp->link_config.flowctrl = 0;
10621 if (epause->rx_pause) {
10622 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10624 if (epause->tx_pause) {
10625 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10626 newadv = ADVERTISED_Pause;
10627 } else
10628 newadv = ADVERTISED_Pause |
10629 ADVERTISED_Asym_Pause;
10630 } else if (epause->tx_pause) {
10631 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10632 newadv = ADVERTISED_Asym_Pause;
10633 } else
10634 newadv = 0;
10636 if (epause->autoneg)
10637 tg3_flag_set(tp, PAUSE_AUTONEG);
10638 else
10639 tg3_flag_clear(tp, PAUSE_AUTONEG);
10641 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10642 u32 oldadv = phydev->advertising &
10643 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10644 if (oldadv != newadv) {
10645 phydev->advertising &=
10646 ~(ADVERTISED_Pause |
10647 ADVERTISED_Asym_Pause);
10648 phydev->advertising |= newadv;
10649 if (phydev->autoneg) {
10651 * Always renegotiate the link to
10652 * inform our link partner of our
10653 * flow control settings, even if the
10654 * flow control is forced. Let
10655 * tg3_adjust_link() do the final
10656 * flow control setup.
10658 return phy_start_aneg(phydev);
10662 if (!epause->autoneg)
10663 tg3_setup_flow_control(tp, 0, 0);
10664 } else {
10665 tp->link_config.orig_advertising &=
10666 ~(ADVERTISED_Pause |
10667 ADVERTISED_Asym_Pause);
10668 tp->link_config.orig_advertising |= newadv;
10670 } else {
10671 int irq_sync = 0;
10673 if (netif_running(dev)) {
10674 tg3_netif_stop(tp);
10675 irq_sync = 1;
10678 tg3_full_lock(tp, irq_sync);
10680 if (epause->autoneg)
10681 tg3_flag_set(tp, PAUSE_AUTONEG);
10682 else
10683 tg3_flag_clear(tp, PAUSE_AUTONEG);
10684 if (epause->rx_pause)
10685 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10686 else
10687 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10688 if (epause->tx_pause)
10689 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10690 else
10691 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10693 if (netif_running(dev)) {
10694 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10695 err = tg3_restart_hw(tp, 1);
10696 if (!err)
10697 tg3_netif_start(tp);
10700 tg3_full_unlock(tp);
10703 return err;
10706 static int tg3_get_sset_count(struct net_device *dev, int sset)
10708 switch (sset) {
10709 case ETH_SS_TEST:
10710 return TG3_NUM_TEST;
10711 case ETH_SS_STATS:
10712 return TG3_NUM_STATS;
10713 default:
10714 return -EOPNOTSUPP;
10718 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10720 switch (stringset) {
10721 case ETH_SS_STATS:
10722 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10723 break;
10724 case ETH_SS_TEST:
10725 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10726 break;
10727 default:
10728 WARN_ON(1); /* we need a WARN() */
10729 break;
10733 static int tg3_set_phys_id(struct net_device *dev,
10734 enum ethtool_phys_id_state state)
10736 struct tg3 *tp = netdev_priv(dev);
10738 if (!netif_running(tp->dev))
10739 return -EAGAIN;
10741 switch (state) {
10742 case ETHTOOL_ID_ACTIVE:
10743 return 1; /* cycle on/off once per second */
10745 case ETHTOOL_ID_ON:
10746 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10747 LED_CTRL_1000MBPS_ON |
10748 LED_CTRL_100MBPS_ON |
10749 LED_CTRL_10MBPS_ON |
10750 LED_CTRL_TRAFFIC_OVERRIDE |
10751 LED_CTRL_TRAFFIC_BLINK |
10752 LED_CTRL_TRAFFIC_LED);
10753 break;
10755 case ETHTOOL_ID_OFF:
10756 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10757 LED_CTRL_TRAFFIC_OVERRIDE);
10758 break;
10760 case ETHTOOL_ID_INACTIVE:
10761 tw32(MAC_LED_CTRL, tp->led_ctrl);
10762 break;
10765 return 0;
10768 static void tg3_get_ethtool_stats(struct net_device *dev,
10769 struct ethtool_stats *estats, u64 *tmp_stats)
10771 struct tg3 *tp = netdev_priv(dev);
10772 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10775 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10777 int i;
10778 __be32 *buf;
10779 u32 offset = 0, len = 0;
10780 u32 magic, val;
10782 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10783 return NULL;
10785 if (magic == TG3_EEPROM_MAGIC) {
10786 for (offset = TG3_NVM_DIR_START;
10787 offset < TG3_NVM_DIR_END;
10788 offset += TG3_NVM_DIRENT_SIZE) {
10789 if (tg3_nvram_read(tp, offset, &val))
10790 return NULL;
10792 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10793 TG3_NVM_DIRTYPE_EXTVPD)
10794 break;
10797 if (offset != TG3_NVM_DIR_END) {
10798 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10799 if (tg3_nvram_read(tp, offset + 4, &offset))
10800 return NULL;
10802 offset = tg3_nvram_logical_addr(tp, offset);
10806 if (!offset || !len) {
10807 offset = TG3_NVM_VPD_OFF;
10808 len = TG3_NVM_VPD_LEN;
10811 buf = kmalloc(len, GFP_KERNEL);
10812 if (buf == NULL)
10813 return NULL;
10815 if (magic == TG3_EEPROM_MAGIC) {
10816 for (i = 0; i < len; i += 4) {
10817 /* The data is in little-endian format in NVRAM.
10818 * Use the big-endian read routines to preserve
10819 * the byte order as it exists in NVRAM.
10821 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10822 goto error;
10824 } else {
10825 u8 *ptr;
10826 ssize_t cnt;
10827 unsigned int pos = 0;
10829 ptr = (u8 *)&buf[0];
10830 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10831 cnt = pci_read_vpd(tp->pdev, pos,
10832 len - pos, ptr);
10833 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10834 cnt = 0;
10835 else if (cnt < 0)
10836 goto error;
10838 if (pos != len)
10839 goto error;
10842 *vpdlen = len;
10844 return buf;
10846 error:
10847 kfree(buf);
10848 return NULL;
10851 #define NVRAM_TEST_SIZE 0x100
10852 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10853 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10854 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10855 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10856 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10857 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10858 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10859 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10861 static int tg3_test_nvram(struct tg3 *tp)
10863 u32 csum, magic, len;
10864 __be32 *buf;
10865 int i, j, k, err = 0, size;
10867 if (tg3_flag(tp, NO_NVRAM))
10868 return 0;
10870 if (tg3_nvram_read(tp, 0, &magic) != 0)
10871 return -EIO;
10873 if (magic == TG3_EEPROM_MAGIC)
10874 size = NVRAM_TEST_SIZE;
10875 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10876 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10877 TG3_EEPROM_SB_FORMAT_1) {
10878 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10879 case TG3_EEPROM_SB_REVISION_0:
10880 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10881 break;
10882 case TG3_EEPROM_SB_REVISION_2:
10883 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10884 break;
10885 case TG3_EEPROM_SB_REVISION_3:
10886 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10887 break;
10888 case TG3_EEPROM_SB_REVISION_4:
10889 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10890 break;
10891 case TG3_EEPROM_SB_REVISION_5:
10892 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10893 break;
10894 case TG3_EEPROM_SB_REVISION_6:
10895 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10896 break;
10897 default:
10898 return -EIO;
10900 } else
10901 return 0;
10902 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10903 size = NVRAM_SELFBOOT_HW_SIZE;
10904 else
10905 return -EIO;
10907 buf = kmalloc(size, GFP_KERNEL);
10908 if (buf == NULL)
10909 return -ENOMEM;
10911 err = -EIO;
10912 for (i = 0, j = 0; i < size; i += 4, j++) {
10913 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10914 if (err)
10915 break;
10917 if (i < size)
10918 goto out;
10920 /* Selfboot format */
10921 magic = be32_to_cpu(buf[0]);
10922 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10923 TG3_EEPROM_MAGIC_FW) {
10924 u8 *buf8 = (u8 *) buf, csum8 = 0;
10926 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10927 TG3_EEPROM_SB_REVISION_2) {
10928 /* For rev 2, the csum doesn't include the MBA. */
10929 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10930 csum8 += buf8[i];
10931 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10932 csum8 += buf8[i];
10933 } else {
10934 for (i = 0; i < size; i++)
10935 csum8 += buf8[i];
10938 if (csum8 == 0) {
10939 err = 0;
10940 goto out;
10943 err = -EIO;
10944 goto out;
10947 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10948 TG3_EEPROM_MAGIC_HW) {
10949 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10950 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10951 u8 *buf8 = (u8 *) buf;
10953 /* Separate the parity bits and the data bytes. */
10954 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10955 if ((i == 0) || (i == 8)) {
10956 int l;
10957 u8 msk;
10959 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10960 parity[k++] = buf8[i] & msk;
10961 i++;
10962 } else if (i == 16) {
10963 int l;
10964 u8 msk;
10966 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10967 parity[k++] = buf8[i] & msk;
10968 i++;
10970 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10971 parity[k++] = buf8[i] & msk;
10972 i++;
10974 data[j++] = buf8[i];
10977 err = -EIO;
10978 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10979 u8 hw8 = hweight8(data[i]);
10981 if ((hw8 & 0x1) && parity[i])
10982 goto out;
10983 else if (!(hw8 & 0x1) && !parity[i])
10984 goto out;
10986 err = 0;
10987 goto out;
10990 err = -EIO;
10992 /* Bootstrap checksum at offset 0x10 */
10993 csum = calc_crc((unsigned char *) buf, 0x10);
10994 if (csum != le32_to_cpu(buf[0x10/4]))
10995 goto out;
10997 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10998 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10999 if (csum != le32_to_cpu(buf[0xfc/4]))
11000 goto out;
11002 kfree(buf);
11004 buf = tg3_vpd_readblock(tp, &len);
11005 if (!buf)
11006 return -ENOMEM;
11008 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11009 if (i > 0) {
11010 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11011 if (j < 0)
11012 goto out;
11014 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11015 goto out;
11017 i += PCI_VPD_LRDT_TAG_SIZE;
11018 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11019 PCI_VPD_RO_KEYWORD_CHKSUM);
11020 if (j > 0) {
11021 u8 csum8 = 0;
11023 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11025 for (i = 0; i <= j; i++)
11026 csum8 += ((u8 *)buf)[i];
11028 if (csum8)
11029 goto out;
11033 err = 0;
11035 out:
11036 kfree(buf);
11037 return err;
11040 #define TG3_SERDES_TIMEOUT_SEC 2
11041 #define TG3_COPPER_TIMEOUT_SEC 6
11043 static int tg3_test_link(struct tg3 *tp)
11045 int i, max;
11047 if (!netif_running(tp->dev))
11048 return -ENODEV;
11050 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11051 max = TG3_SERDES_TIMEOUT_SEC;
11052 else
11053 max = TG3_COPPER_TIMEOUT_SEC;
11055 for (i = 0; i < max; i++) {
11056 if (netif_carrier_ok(tp->dev))
11057 return 0;
11059 if (msleep_interruptible(1000))
11060 break;
11063 return -EIO;
11066 /* Only test the commonly used registers */
11067 static int tg3_test_registers(struct tg3 *tp)
11069 int i, is_5705, is_5750;
11070 u32 offset, read_mask, write_mask, val, save_val, read_val;
11071 static struct {
11072 u16 offset;
11073 u16 flags;
11074 #define TG3_FL_5705 0x1
11075 #define TG3_FL_NOT_5705 0x2
11076 #define TG3_FL_NOT_5788 0x4
11077 #define TG3_FL_NOT_5750 0x8
11078 u32 read_mask;
11079 u32 write_mask;
11080 } reg_tbl[] = {
11081 /* MAC Control Registers */
11082 { MAC_MODE, TG3_FL_NOT_5705,
11083 0x00000000, 0x00ef6f8c },
11084 { MAC_MODE, TG3_FL_5705,
11085 0x00000000, 0x01ef6b8c },
11086 { MAC_STATUS, TG3_FL_NOT_5705,
11087 0x03800107, 0x00000000 },
11088 { MAC_STATUS, TG3_FL_5705,
11089 0x03800100, 0x00000000 },
11090 { MAC_ADDR_0_HIGH, 0x0000,
11091 0x00000000, 0x0000ffff },
11092 { MAC_ADDR_0_LOW, 0x0000,
11093 0x00000000, 0xffffffff },
11094 { MAC_RX_MTU_SIZE, 0x0000,
11095 0x00000000, 0x0000ffff },
11096 { MAC_TX_MODE, 0x0000,
11097 0x00000000, 0x00000070 },
11098 { MAC_TX_LENGTHS, 0x0000,
11099 0x00000000, 0x00003fff },
11100 { MAC_RX_MODE, TG3_FL_NOT_5705,
11101 0x00000000, 0x000007fc },
11102 { MAC_RX_MODE, TG3_FL_5705,
11103 0x00000000, 0x000007dc },
11104 { MAC_HASH_REG_0, 0x0000,
11105 0x00000000, 0xffffffff },
11106 { MAC_HASH_REG_1, 0x0000,
11107 0x00000000, 0xffffffff },
11108 { MAC_HASH_REG_2, 0x0000,
11109 0x00000000, 0xffffffff },
11110 { MAC_HASH_REG_3, 0x0000,
11111 0x00000000, 0xffffffff },
11113 /* Receive Data and Receive BD Initiator Control Registers. */
11114 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11115 0x00000000, 0xffffffff },
11116 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11117 0x00000000, 0xffffffff },
11118 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11119 0x00000000, 0x00000003 },
11120 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11121 0x00000000, 0xffffffff },
11122 { RCVDBDI_STD_BD+0, 0x0000,
11123 0x00000000, 0xffffffff },
11124 { RCVDBDI_STD_BD+4, 0x0000,
11125 0x00000000, 0xffffffff },
11126 { RCVDBDI_STD_BD+8, 0x0000,
11127 0x00000000, 0xffff0002 },
11128 { RCVDBDI_STD_BD+0xc, 0x0000,
11129 0x00000000, 0xffffffff },
11131 /* Receive BD Initiator Control Registers. */
11132 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11133 0x00000000, 0xffffffff },
11134 { RCVBDI_STD_THRESH, TG3_FL_5705,
11135 0x00000000, 0x000003ff },
11136 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11137 0x00000000, 0xffffffff },
11139 /* Host Coalescing Control Registers. */
11140 { HOSTCC_MODE, TG3_FL_NOT_5705,
11141 0x00000000, 0x00000004 },
11142 { HOSTCC_MODE, TG3_FL_5705,
11143 0x00000000, 0x000000f6 },
11144 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11145 0x00000000, 0xffffffff },
11146 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11147 0x00000000, 0x000003ff },
11148 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11149 0x00000000, 0xffffffff },
11150 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11151 0x00000000, 0x000003ff },
11152 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11153 0x00000000, 0xffffffff },
11154 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11155 0x00000000, 0x000000ff },
11156 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11157 0x00000000, 0xffffffff },
11158 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11159 0x00000000, 0x000000ff },
11160 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11161 0x00000000, 0xffffffff },
11162 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11163 0x00000000, 0xffffffff },
11164 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11165 0x00000000, 0xffffffff },
11166 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11167 0x00000000, 0x000000ff },
11168 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11169 0x00000000, 0xffffffff },
11170 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11171 0x00000000, 0x000000ff },
11172 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11173 0x00000000, 0xffffffff },
11174 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11175 0x00000000, 0xffffffff },
11176 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11177 0x00000000, 0xffffffff },
11178 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11179 0x00000000, 0xffffffff },
11180 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11181 0x00000000, 0xffffffff },
11182 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11183 0xffffffff, 0x00000000 },
11184 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11185 0xffffffff, 0x00000000 },
11187 /* Buffer Manager Control Registers. */
11188 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11189 0x00000000, 0x007fff80 },
11190 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11191 0x00000000, 0x007fffff },
11192 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11193 0x00000000, 0x0000003f },
11194 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11195 0x00000000, 0x000001ff },
11196 { BUFMGR_MB_HIGH_WATER, 0x0000,
11197 0x00000000, 0x000001ff },
11198 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11199 0xffffffff, 0x00000000 },
11200 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11201 0xffffffff, 0x00000000 },
11203 /* Mailbox Registers */
11204 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11205 0x00000000, 0x000001ff },
11206 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11207 0x00000000, 0x000001ff },
11208 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11209 0x00000000, 0x000007ff },
11210 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11211 0x00000000, 0x000001ff },
11213 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11216 is_5705 = is_5750 = 0;
11217 if (tg3_flag(tp, 5705_PLUS)) {
11218 is_5705 = 1;
11219 if (tg3_flag(tp, 5750_PLUS))
11220 is_5750 = 1;
11223 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11224 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11225 continue;
11227 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11228 continue;
11230 if (tg3_flag(tp, IS_5788) &&
11231 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11232 continue;
11234 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11235 continue;
11237 offset = (u32) reg_tbl[i].offset;
11238 read_mask = reg_tbl[i].read_mask;
11239 write_mask = reg_tbl[i].write_mask;
11241 /* Save the original register content */
11242 save_val = tr32(offset);
11244 /* Determine the read-only value. */
11245 read_val = save_val & read_mask;
11247 /* Write zero to the register, then make sure the read-only bits
11248 * are not changed and the read/write bits are all zeros.
11250 tw32(offset, 0);
11252 val = tr32(offset);
11254 /* Test the read-only and read/write bits. */
11255 if (((val & read_mask) != read_val) || (val & write_mask))
11256 goto out;
11258 /* Write ones to all the bits defined by RdMask and WrMask, then
11259 * make sure the read-only bits are not changed and the
11260 * read/write bits are all ones.
11262 tw32(offset, read_mask | write_mask);
11264 val = tr32(offset);
11266 /* Test the read-only bits. */
11267 if ((val & read_mask) != read_val)
11268 goto out;
11270 /* Test the read/write bits. */
11271 if ((val & write_mask) != write_mask)
11272 goto out;
11274 tw32(offset, save_val);
11277 return 0;
11279 out:
11280 if (netif_msg_hw(tp))
11281 netdev_err(tp->dev,
11282 "Register test failed at offset %x\n", offset);
11283 tw32(offset, save_val);
11284 return -EIO;
11287 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11289 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11290 int i;
11291 u32 j;
11293 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11294 for (j = 0; j < len; j += 4) {
11295 u32 val;
11297 tg3_write_mem(tp, offset + j, test_pattern[i]);
11298 tg3_read_mem(tp, offset + j, &val);
11299 if (val != test_pattern[i])
11300 return -EIO;
11303 return 0;
11306 static int tg3_test_memory(struct tg3 *tp)
11308 static struct mem_entry {
11309 u32 offset;
11310 u32 len;
11311 } mem_tbl_570x[] = {
11312 { 0x00000000, 0x00b50},
11313 { 0x00002000, 0x1c000},
11314 { 0xffffffff, 0x00000}
11315 }, mem_tbl_5705[] = {
11316 { 0x00000100, 0x0000c},
11317 { 0x00000200, 0x00008},
11318 { 0x00004000, 0x00800},
11319 { 0x00006000, 0x01000},
11320 { 0x00008000, 0x02000},
11321 { 0x00010000, 0x0e000},
11322 { 0xffffffff, 0x00000}
11323 }, mem_tbl_5755[] = {
11324 { 0x00000200, 0x00008},
11325 { 0x00004000, 0x00800},
11326 { 0x00006000, 0x00800},
11327 { 0x00008000, 0x02000},
11328 { 0x00010000, 0x0c000},
11329 { 0xffffffff, 0x00000}
11330 }, mem_tbl_5906[] = {
11331 { 0x00000200, 0x00008},
11332 { 0x00004000, 0x00400},
11333 { 0x00006000, 0x00400},
11334 { 0x00008000, 0x01000},
11335 { 0x00010000, 0x01000},
11336 { 0xffffffff, 0x00000}
11337 }, mem_tbl_5717[] = {
11338 { 0x00000200, 0x00008},
11339 { 0x00010000, 0x0a000},
11340 { 0x00020000, 0x13c00},
11341 { 0xffffffff, 0x00000}
11342 }, mem_tbl_57765[] = {
11343 { 0x00000200, 0x00008},
11344 { 0x00004000, 0x00800},
11345 { 0x00006000, 0x09800},
11346 { 0x00010000, 0x0a000},
11347 { 0xffffffff, 0x00000}
11349 struct mem_entry *mem_tbl;
11350 int err = 0;
11351 int i;
11353 if (tg3_flag(tp, 5717_PLUS))
11354 mem_tbl = mem_tbl_5717;
11355 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11356 mem_tbl = mem_tbl_57765;
11357 else if (tg3_flag(tp, 5755_PLUS))
11358 mem_tbl = mem_tbl_5755;
11359 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11360 mem_tbl = mem_tbl_5906;
11361 else if (tg3_flag(tp, 5705_PLUS))
11362 mem_tbl = mem_tbl_5705;
11363 else
11364 mem_tbl = mem_tbl_570x;
11366 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11367 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11368 if (err)
11369 break;
11372 return err;
11375 #define TG3_TSO_MSS 500
11377 #define TG3_TSO_IP_HDR_LEN 20
11378 #define TG3_TSO_TCP_HDR_LEN 20
11379 #define TG3_TSO_TCP_OPT_LEN 12
11381 static const u8 tg3_tso_header[] = {
11382 0x08, 0x00,
11383 0x45, 0x00, 0x00, 0x00,
11384 0x00, 0x00, 0x40, 0x00,
11385 0x40, 0x06, 0x00, 0x00,
11386 0x0a, 0x00, 0x00, 0x01,
11387 0x0a, 0x00, 0x00, 0x02,
11388 0x0d, 0x00, 0xe0, 0x00,
11389 0x00, 0x00, 0x01, 0x00,
11390 0x00, 0x00, 0x02, 0x00,
11391 0x80, 0x10, 0x10, 0x00,
11392 0x14, 0x09, 0x00, 0x00,
11393 0x01, 0x01, 0x08, 0x0a,
11394 0x11, 0x11, 0x11, 0x11,
11395 0x11, 0x11, 0x11, 0x11,
11398 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11400 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11401 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11402 u32 budget;
11403 struct sk_buff *skb, *rx_skb;
11404 u8 *tx_data;
11405 dma_addr_t map;
11406 int num_pkts, tx_len, rx_len, i, err;
11407 struct tg3_rx_buffer_desc *desc;
11408 struct tg3_napi *tnapi, *rnapi;
11409 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11411 tnapi = &tp->napi[0];
11412 rnapi = &tp->napi[0];
11413 if (tp->irq_cnt > 1) {
11414 if (tg3_flag(tp, ENABLE_RSS))
11415 rnapi = &tp->napi[1];
11416 if (tg3_flag(tp, ENABLE_TSS))
11417 tnapi = &tp->napi[1];
11419 coal_now = tnapi->coal_now | rnapi->coal_now;
11421 err = -EIO;
11423 tx_len = pktsz;
11424 skb = netdev_alloc_skb(tp->dev, tx_len);
11425 if (!skb)
11426 return -ENOMEM;
11428 tx_data = skb_put(skb, tx_len);
11429 memcpy(tx_data, tp->dev->dev_addr, 6);
11430 memset(tx_data + 6, 0x0, 8);
11432 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11434 if (tso_loopback) {
11435 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11437 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11438 TG3_TSO_TCP_OPT_LEN;
11440 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11441 sizeof(tg3_tso_header));
11442 mss = TG3_TSO_MSS;
11444 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11445 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11447 /* Set the total length field in the IP header */
11448 iph->tot_len = htons((u16)(mss + hdr_len));
11450 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11451 TXD_FLAG_CPU_POST_DMA);
11453 if (tg3_flag(tp, HW_TSO_1) ||
11454 tg3_flag(tp, HW_TSO_2) ||
11455 tg3_flag(tp, HW_TSO_3)) {
11456 struct tcphdr *th;
11457 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11458 th = (struct tcphdr *)&tx_data[val];
11459 th->check = 0;
11460 } else
11461 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11463 if (tg3_flag(tp, HW_TSO_3)) {
11464 mss |= (hdr_len & 0xc) << 12;
11465 if (hdr_len & 0x10)
11466 base_flags |= 0x00000010;
11467 base_flags |= (hdr_len & 0x3e0) << 5;
11468 } else if (tg3_flag(tp, HW_TSO_2))
11469 mss |= hdr_len << 9;
11470 else if (tg3_flag(tp, HW_TSO_1) ||
11471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11472 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11473 } else {
11474 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11477 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11478 } else {
11479 num_pkts = 1;
11480 data_off = ETH_HLEN;
11483 for (i = data_off; i < tx_len; i++)
11484 tx_data[i] = (u8) (i & 0xff);
11486 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11487 if (pci_dma_mapping_error(tp->pdev, map)) {
11488 dev_kfree_skb(skb);
11489 return -EIO;
11492 val = tnapi->tx_prod;
11493 tnapi->tx_buffers[val].skb = skb;
11494 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11496 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11497 rnapi->coal_now);
11499 udelay(10);
11501 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11503 budget = tg3_tx_avail(tnapi);
11504 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11505 base_flags | TXD_FLAG_END, mss, 0)) {
11506 tnapi->tx_buffers[val].skb = NULL;
11507 dev_kfree_skb(skb);
11508 return -EIO;
11511 tnapi->tx_prod++;
11513 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11514 tr32_mailbox(tnapi->prodmbox);
11516 udelay(10);
11518 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11519 for (i = 0; i < 35; i++) {
11520 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11521 coal_now);
11523 udelay(10);
11525 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11526 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11527 if ((tx_idx == tnapi->tx_prod) &&
11528 (rx_idx == (rx_start_idx + num_pkts)))
11529 break;
11532 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11533 dev_kfree_skb(skb);
11535 if (tx_idx != tnapi->tx_prod)
11536 goto out;
11538 if (rx_idx != rx_start_idx + num_pkts)
11539 goto out;
11541 val = data_off;
11542 while (rx_idx != rx_start_idx) {
11543 desc = &rnapi->rx_rcb[rx_start_idx++];
11544 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11545 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11547 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11548 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11549 goto out;
11551 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11552 - ETH_FCS_LEN;
11554 if (!tso_loopback) {
11555 if (rx_len != tx_len)
11556 goto out;
11558 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11559 if (opaque_key != RXD_OPAQUE_RING_STD)
11560 goto out;
11561 } else {
11562 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11563 goto out;
11565 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11566 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11567 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11568 goto out;
11571 if (opaque_key == RXD_OPAQUE_RING_STD) {
11572 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11573 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11574 mapping);
11575 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11576 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11577 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11578 mapping);
11579 } else
11580 goto out;
11582 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11583 PCI_DMA_FROMDEVICE);
11585 for (i = data_off; i < rx_len; i++, val++) {
11586 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11587 goto out;
11591 err = 0;
11593 /* tg3_free_rings will unmap and free the rx_skb */
11594 out:
11595 return err;
11598 #define TG3_STD_LOOPBACK_FAILED 1
11599 #define TG3_JMB_LOOPBACK_FAILED 2
11600 #define TG3_TSO_LOOPBACK_FAILED 4
11601 #define TG3_LOOPBACK_FAILED \
11602 (TG3_STD_LOOPBACK_FAILED | \
11603 TG3_JMB_LOOPBACK_FAILED | \
11604 TG3_TSO_LOOPBACK_FAILED)
11606 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11608 int err = -EIO;
11609 u32 eee_cap;
11611 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11612 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11614 if (!netif_running(tp->dev)) {
11615 data[0] = TG3_LOOPBACK_FAILED;
11616 data[1] = TG3_LOOPBACK_FAILED;
11617 if (do_extlpbk)
11618 data[2] = TG3_LOOPBACK_FAILED;
11619 goto done;
11622 err = tg3_reset_hw(tp, 1);
11623 if (err) {
11624 data[0] = TG3_LOOPBACK_FAILED;
11625 data[1] = TG3_LOOPBACK_FAILED;
11626 if (do_extlpbk)
11627 data[2] = TG3_LOOPBACK_FAILED;
11628 goto done;
11631 if (tg3_flag(tp, ENABLE_RSS)) {
11632 int i;
11634 /* Reroute all rx packets to the 1st queue */
11635 for (i = MAC_RSS_INDIR_TBL_0;
11636 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11637 tw32(i, 0x0);
11640 /* HW errata - mac loopback fails in some cases on 5780.
11641 * Normal traffic and PHY loopback are not affected by
11642 * errata. Also, the MAC loopback test is deprecated for
11643 * all newer ASIC revisions.
11645 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11646 !tg3_flag(tp, CPMU_PRESENT)) {
11647 tg3_mac_loopback(tp, true);
11649 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11650 data[0] |= TG3_STD_LOOPBACK_FAILED;
11652 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11653 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11654 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11656 tg3_mac_loopback(tp, false);
11659 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11660 !tg3_flag(tp, USE_PHYLIB)) {
11661 int i;
11663 tg3_phy_lpbk_set(tp, 0, false);
11665 /* Wait for link */
11666 for (i = 0; i < 100; i++) {
11667 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11668 break;
11669 mdelay(1);
11672 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11673 data[1] |= TG3_STD_LOOPBACK_FAILED;
11674 if (tg3_flag(tp, TSO_CAPABLE) &&
11675 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11676 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11677 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11678 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11679 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11681 if (do_extlpbk) {
11682 tg3_phy_lpbk_set(tp, 0, true);
11684 /* All link indications report up, but the hardware
11685 * isn't really ready for about 20 msec. Double it
11686 * to be sure.
11688 mdelay(40);
11690 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11691 data[2] |= TG3_STD_LOOPBACK_FAILED;
11692 if (tg3_flag(tp, TSO_CAPABLE) &&
11693 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11694 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11695 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11696 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11697 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11700 /* Re-enable gphy autopowerdown. */
11701 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11702 tg3_phy_toggle_apd(tp, true);
11705 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11707 done:
11708 tp->phy_flags |= eee_cap;
11710 return err;
11713 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11714 u64 *data)
11716 struct tg3 *tp = netdev_priv(dev);
11717 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11719 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11720 tg3_power_up(tp)) {
11721 etest->flags |= ETH_TEST_FL_FAILED;
11722 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11723 return;
11726 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11728 if (tg3_test_nvram(tp) != 0) {
11729 etest->flags |= ETH_TEST_FL_FAILED;
11730 data[0] = 1;
11732 if (!doextlpbk && tg3_test_link(tp)) {
11733 etest->flags |= ETH_TEST_FL_FAILED;
11734 data[1] = 1;
11736 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11737 int err, err2 = 0, irq_sync = 0;
11739 if (netif_running(dev)) {
11740 tg3_phy_stop(tp);
11741 tg3_netif_stop(tp);
11742 irq_sync = 1;
11745 tg3_full_lock(tp, irq_sync);
11747 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11748 err = tg3_nvram_lock(tp);
11749 tg3_halt_cpu(tp, RX_CPU_BASE);
11750 if (!tg3_flag(tp, 5705_PLUS))
11751 tg3_halt_cpu(tp, TX_CPU_BASE);
11752 if (!err)
11753 tg3_nvram_unlock(tp);
11755 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11756 tg3_phy_reset(tp);
11758 if (tg3_test_registers(tp) != 0) {
11759 etest->flags |= ETH_TEST_FL_FAILED;
11760 data[2] = 1;
11763 if (tg3_test_memory(tp) != 0) {
11764 etest->flags |= ETH_TEST_FL_FAILED;
11765 data[3] = 1;
11768 if (doextlpbk)
11769 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11771 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11772 etest->flags |= ETH_TEST_FL_FAILED;
11774 tg3_full_unlock(tp);
11776 if (tg3_test_interrupt(tp) != 0) {
11777 etest->flags |= ETH_TEST_FL_FAILED;
11778 data[7] = 1;
11781 tg3_full_lock(tp, 0);
11783 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11784 if (netif_running(dev)) {
11785 tg3_flag_set(tp, INIT_COMPLETE);
11786 err2 = tg3_restart_hw(tp, 1);
11787 if (!err2)
11788 tg3_netif_start(tp);
11791 tg3_full_unlock(tp);
11793 if (irq_sync && !err2)
11794 tg3_phy_start(tp);
11796 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11797 tg3_power_down(tp);
11801 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11803 struct mii_ioctl_data *data = if_mii(ifr);
11804 struct tg3 *tp = netdev_priv(dev);
11805 int err;
11807 if (tg3_flag(tp, USE_PHYLIB)) {
11808 struct phy_device *phydev;
11809 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11810 return -EAGAIN;
11811 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11812 return phy_mii_ioctl(phydev, ifr, cmd);
11815 switch (cmd) {
11816 case SIOCGMIIPHY:
11817 data->phy_id = tp->phy_addr;
11819 /* fallthru */
11820 case SIOCGMIIREG: {
11821 u32 mii_regval;
11823 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11824 break; /* We have no PHY */
11826 if (!netif_running(dev))
11827 return -EAGAIN;
11829 spin_lock_bh(&tp->lock);
11830 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11831 spin_unlock_bh(&tp->lock);
11833 data->val_out = mii_regval;
11835 return err;
11838 case SIOCSMIIREG:
11839 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11840 break; /* We have no PHY */
11842 if (!netif_running(dev))
11843 return -EAGAIN;
11845 spin_lock_bh(&tp->lock);
11846 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11847 spin_unlock_bh(&tp->lock);
11849 return err;
11851 default:
11852 /* do nothing */
11853 break;
11855 return -EOPNOTSUPP;
11858 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11860 struct tg3 *tp = netdev_priv(dev);
11862 memcpy(ec, &tp->coal, sizeof(*ec));
11863 return 0;
11866 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11868 struct tg3 *tp = netdev_priv(dev);
11869 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11870 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11872 if (!tg3_flag(tp, 5705_PLUS)) {
11873 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11874 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11875 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11876 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11879 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11880 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11881 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11882 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11883 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11884 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11885 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11886 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11887 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11888 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11889 return -EINVAL;
11891 /* No rx interrupts will be generated if both are zero */
11892 if ((ec->rx_coalesce_usecs == 0) &&
11893 (ec->rx_max_coalesced_frames == 0))
11894 return -EINVAL;
11896 /* No tx interrupts will be generated if both are zero */
11897 if ((ec->tx_coalesce_usecs == 0) &&
11898 (ec->tx_max_coalesced_frames == 0))
11899 return -EINVAL;
11901 /* Only copy relevant parameters, ignore all others. */
11902 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11903 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11904 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11905 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11906 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11907 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11908 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11909 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11910 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11912 if (netif_running(dev)) {
11913 tg3_full_lock(tp, 0);
11914 __tg3_set_coalesce(tp, &tp->coal);
11915 tg3_full_unlock(tp);
11917 return 0;
11920 static const struct ethtool_ops tg3_ethtool_ops = {
11921 .get_settings = tg3_get_settings,
11922 .set_settings = tg3_set_settings,
11923 .get_drvinfo = tg3_get_drvinfo,
11924 .get_regs_len = tg3_get_regs_len,
11925 .get_regs = tg3_get_regs,
11926 .get_wol = tg3_get_wol,
11927 .set_wol = tg3_set_wol,
11928 .get_msglevel = tg3_get_msglevel,
11929 .set_msglevel = tg3_set_msglevel,
11930 .nway_reset = tg3_nway_reset,
11931 .get_link = ethtool_op_get_link,
11932 .get_eeprom_len = tg3_get_eeprom_len,
11933 .get_eeprom = tg3_get_eeprom,
11934 .set_eeprom = tg3_set_eeprom,
11935 .get_ringparam = tg3_get_ringparam,
11936 .set_ringparam = tg3_set_ringparam,
11937 .get_pauseparam = tg3_get_pauseparam,
11938 .set_pauseparam = tg3_set_pauseparam,
11939 .self_test = tg3_self_test,
11940 .get_strings = tg3_get_strings,
11941 .set_phys_id = tg3_set_phys_id,
11942 .get_ethtool_stats = tg3_get_ethtool_stats,
11943 .get_coalesce = tg3_get_coalesce,
11944 .set_coalesce = tg3_set_coalesce,
11945 .get_sset_count = tg3_get_sset_count,
11948 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11950 u32 cursize, val, magic;
11952 tp->nvram_size = EEPROM_CHIP_SIZE;
11954 if (tg3_nvram_read(tp, 0, &magic) != 0)
11955 return;
11957 if ((magic != TG3_EEPROM_MAGIC) &&
11958 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11959 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11960 return;
11963 * Size the chip by reading offsets at increasing powers of two.
11964 * When we encounter our validation signature, we know the addressing
11965 * has wrapped around, and thus have our chip size.
11967 cursize = 0x10;
11969 while (cursize < tp->nvram_size) {
11970 if (tg3_nvram_read(tp, cursize, &val) != 0)
11971 return;
11973 if (val == magic)
11974 break;
11976 cursize <<= 1;
11979 tp->nvram_size = cursize;
11982 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11984 u32 val;
11986 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11987 return;
11989 /* Selfboot format */
11990 if (val != TG3_EEPROM_MAGIC) {
11991 tg3_get_eeprom_size(tp);
11992 return;
11995 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11996 if (val != 0) {
11997 /* This is confusing. We want to operate on the
11998 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11999 * call will read from NVRAM and byteswap the data
12000 * according to the byteswapping settings for all
12001 * other register accesses. This ensures the data we
12002 * want will always reside in the lower 16-bits.
12003 * However, the data in NVRAM is in LE format, which
12004 * means the data from the NVRAM read will always be
12005 * opposite the endianness of the CPU. The 16-bit
12006 * byteswap then brings the data to CPU endianness.
12008 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12009 return;
12012 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12015 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12017 u32 nvcfg1;
12019 nvcfg1 = tr32(NVRAM_CFG1);
12020 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12021 tg3_flag_set(tp, FLASH);
12022 } else {
12023 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12024 tw32(NVRAM_CFG1, nvcfg1);
12027 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12028 tg3_flag(tp, 5780_CLASS)) {
12029 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12030 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12031 tp->nvram_jedecnum = JEDEC_ATMEL;
12032 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12033 tg3_flag_set(tp, NVRAM_BUFFERED);
12034 break;
12035 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12036 tp->nvram_jedecnum = JEDEC_ATMEL;
12037 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12038 break;
12039 case FLASH_VENDOR_ATMEL_EEPROM:
12040 tp->nvram_jedecnum = JEDEC_ATMEL;
12041 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12042 tg3_flag_set(tp, NVRAM_BUFFERED);
12043 break;
12044 case FLASH_VENDOR_ST:
12045 tp->nvram_jedecnum = JEDEC_ST;
12046 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12047 tg3_flag_set(tp, NVRAM_BUFFERED);
12048 break;
12049 case FLASH_VENDOR_SAIFUN:
12050 tp->nvram_jedecnum = JEDEC_SAIFUN;
12051 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12052 break;
12053 case FLASH_VENDOR_SST_SMALL:
12054 case FLASH_VENDOR_SST_LARGE:
12055 tp->nvram_jedecnum = JEDEC_SST;
12056 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12057 break;
12059 } else {
12060 tp->nvram_jedecnum = JEDEC_ATMEL;
12061 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12062 tg3_flag_set(tp, NVRAM_BUFFERED);
12066 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12068 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12069 case FLASH_5752PAGE_SIZE_256:
12070 tp->nvram_pagesize = 256;
12071 break;
12072 case FLASH_5752PAGE_SIZE_512:
12073 tp->nvram_pagesize = 512;
12074 break;
12075 case FLASH_5752PAGE_SIZE_1K:
12076 tp->nvram_pagesize = 1024;
12077 break;
12078 case FLASH_5752PAGE_SIZE_2K:
12079 tp->nvram_pagesize = 2048;
12080 break;
12081 case FLASH_5752PAGE_SIZE_4K:
12082 tp->nvram_pagesize = 4096;
12083 break;
12084 case FLASH_5752PAGE_SIZE_264:
12085 tp->nvram_pagesize = 264;
12086 break;
12087 case FLASH_5752PAGE_SIZE_528:
12088 tp->nvram_pagesize = 528;
12089 break;
12093 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12095 u32 nvcfg1;
12097 nvcfg1 = tr32(NVRAM_CFG1);
12099 /* NVRAM protection for TPM */
12100 if (nvcfg1 & (1 << 27))
12101 tg3_flag_set(tp, PROTECTED_NVRAM);
12103 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12104 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12105 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12106 tp->nvram_jedecnum = JEDEC_ATMEL;
12107 tg3_flag_set(tp, NVRAM_BUFFERED);
12108 break;
12109 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12110 tp->nvram_jedecnum = JEDEC_ATMEL;
12111 tg3_flag_set(tp, NVRAM_BUFFERED);
12112 tg3_flag_set(tp, FLASH);
12113 break;
12114 case FLASH_5752VENDOR_ST_M45PE10:
12115 case FLASH_5752VENDOR_ST_M45PE20:
12116 case FLASH_5752VENDOR_ST_M45PE40:
12117 tp->nvram_jedecnum = JEDEC_ST;
12118 tg3_flag_set(tp, NVRAM_BUFFERED);
12119 tg3_flag_set(tp, FLASH);
12120 break;
12123 if (tg3_flag(tp, FLASH)) {
12124 tg3_nvram_get_pagesize(tp, nvcfg1);
12125 } else {
12126 /* For eeprom, set pagesize to maximum eeprom size */
12127 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12129 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12130 tw32(NVRAM_CFG1, nvcfg1);
12134 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12136 u32 nvcfg1, protect = 0;
12138 nvcfg1 = tr32(NVRAM_CFG1);
12140 /* NVRAM protection for TPM */
12141 if (nvcfg1 & (1 << 27)) {
12142 tg3_flag_set(tp, PROTECTED_NVRAM);
12143 protect = 1;
12146 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12147 switch (nvcfg1) {
12148 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12149 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12150 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12151 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12152 tp->nvram_jedecnum = JEDEC_ATMEL;
12153 tg3_flag_set(tp, NVRAM_BUFFERED);
12154 tg3_flag_set(tp, FLASH);
12155 tp->nvram_pagesize = 264;
12156 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12157 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12158 tp->nvram_size = (protect ? 0x3e200 :
12159 TG3_NVRAM_SIZE_512KB);
12160 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12161 tp->nvram_size = (protect ? 0x1f200 :
12162 TG3_NVRAM_SIZE_256KB);
12163 else
12164 tp->nvram_size = (protect ? 0x1f200 :
12165 TG3_NVRAM_SIZE_128KB);
12166 break;
12167 case FLASH_5752VENDOR_ST_M45PE10:
12168 case FLASH_5752VENDOR_ST_M45PE20:
12169 case FLASH_5752VENDOR_ST_M45PE40:
12170 tp->nvram_jedecnum = JEDEC_ST;
12171 tg3_flag_set(tp, NVRAM_BUFFERED);
12172 tg3_flag_set(tp, FLASH);
12173 tp->nvram_pagesize = 256;
12174 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12175 tp->nvram_size = (protect ?
12176 TG3_NVRAM_SIZE_64KB :
12177 TG3_NVRAM_SIZE_128KB);
12178 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12179 tp->nvram_size = (protect ?
12180 TG3_NVRAM_SIZE_64KB :
12181 TG3_NVRAM_SIZE_256KB);
12182 else
12183 tp->nvram_size = (protect ?
12184 TG3_NVRAM_SIZE_128KB :
12185 TG3_NVRAM_SIZE_512KB);
12186 break;
12190 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12192 u32 nvcfg1;
12194 nvcfg1 = tr32(NVRAM_CFG1);
12196 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12197 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12198 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12199 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12200 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12201 tp->nvram_jedecnum = JEDEC_ATMEL;
12202 tg3_flag_set(tp, NVRAM_BUFFERED);
12203 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12205 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12206 tw32(NVRAM_CFG1, nvcfg1);
12207 break;
12208 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12209 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12210 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12211 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12212 tp->nvram_jedecnum = JEDEC_ATMEL;
12213 tg3_flag_set(tp, NVRAM_BUFFERED);
12214 tg3_flag_set(tp, FLASH);
12215 tp->nvram_pagesize = 264;
12216 break;
12217 case FLASH_5752VENDOR_ST_M45PE10:
12218 case FLASH_5752VENDOR_ST_M45PE20:
12219 case FLASH_5752VENDOR_ST_M45PE40:
12220 tp->nvram_jedecnum = JEDEC_ST;
12221 tg3_flag_set(tp, NVRAM_BUFFERED);
12222 tg3_flag_set(tp, FLASH);
12223 tp->nvram_pagesize = 256;
12224 break;
12228 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12230 u32 nvcfg1, protect = 0;
12232 nvcfg1 = tr32(NVRAM_CFG1);
12234 /* NVRAM protection for TPM */
12235 if (nvcfg1 & (1 << 27)) {
12236 tg3_flag_set(tp, PROTECTED_NVRAM);
12237 protect = 1;
12240 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12241 switch (nvcfg1) {
12242 case FLASH_5761VENDOR_ATMEL_ADB021D:
12243 case FLASH_5761VENDOR_ATMEL_ADB041D:
12244 case FLASH_5761VENDOR_ATMEL_ADB081D:
12245 case FLASH_5761VENDOR_ATMEL_ADB161D:
12246 case FLASH_5761VENDOR_ATMEL_MDB021D:
12247 case FLASH_5761VENDOR_ATMEL_MDB041D:
12248 case FLASH_5761VENDOR_ATMEL_MDB081D:
12249 case FLASH_5761VENDOR_ATMEL_MDB161D:
12250 tp->nvram_jedecnum = JEDEC_ATMEL;
12251 tg3_flag_set(tp, NVRAM_BUFFERED);
12252 tg3_flag_set(tp, FLASH);
12253 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12254 tp->nvram_pagesize = 256;
12255 break;
12256 case FLASH_5761VENDOR_ST_A_M45PE20:
12257 case FLASH_5761VENDOR_ST_A_M45PE40:
12258 case FLASH_5761VENDOR_ST_A_M45PE80:
12259 case FLASH_5761VENDOR_ST_A_M45PE16:
12260 case FLASH_5761VENDOR_ST_M_M45PE20:
12261 case FLASH_5761VENDOR_ST_M_M45PE40:
12262 case FLASH_5761VENDOR_ST_M_M45PE80:
12263 case FLASH_5761VENDOR_ST_M_M45PE16:
12264 tp->nvram_jedecnum = JEDEC_ST;
12265 tg3_flag_set(tp, NVRAM_BUFFERED);
12266 tg3_flag_set(tp, FLASH);
12267 tp->nvram_pagesize = 256;
12268 break;
12271 if (protect) {
12272 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12273 } else {
12274 switch (nvcfg1) {
12275 case FLASH_5761VENDOR_ATMEL_ADB161D:
12276 case FLASH_5761VENDOR_ATMEL_MDB161D:
12277 case FLASH_5761VENDOR_ST_A_M45PE16:
12278 case FLASH_5761VENDOR_ST_M_M45PE16:
12279 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12280 break;
12281 case FLASH_5761VENDOR_ATMEL_ADB081D:
12282 case FLASH_5761VENDOR_ATMEL_MDB081D:
12283 case FLASH_5761VENDOR_ST_A_M45PE80:
12284 case FLASH_5761VENDOR_ST_M_M45PE80:
12285 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12286 break;
12287 case FLASH_5761VENDOR_ATMEL_ADB041D:
12288 case FLASH_5761VENDOR_ATMEL_MDB041D:
12289 case FLASH_5761VENDOR_ST_A_M45PE40:
12290 case FLASH_5761VENDOR_ST_M_M45PE40:
12291 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12292 break;
12293 case FLASH_5761VENDOR_ATMEL_ADB021D:
12294 case FLASH_5761VENDOR_ATMEL_MDB021D:
12295 case FLASH_5761VENDOR_ST_A_M45PE20:
12296 case FLASH_5761VENDOR_ST_M_M45PE20:
12297 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12298 break;
12303 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12305 tp->nvram_jedecnum = JEDEC_ATMEL;
12306 tg3_flag_set(tp, NVRAM_BUFFERED);
12307 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12310 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12312 u32 nvcfg1;
12314 nvcfg1 = tr32(NVRAM_CFG1);
12316 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12317 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12318 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12319 tp->nvram_jedecnum = JEDEC_ATMEL;
12320 tg3_flag_set(tp, NVRAM_BUFFERED);
12321 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12323 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12324 tw32(NVRAM_CFG1, nvcfg1);
12325 return;
12326 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12327 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12328 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12329 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12330 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12331 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12332 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12333 tp->nvram_jedecnum = JEDEC_ATMEL;
12334 tg3_flag_set(tp, NVRAM_BUFFERED);
12335 tg3_flag_set(tp, FLASH);
12337 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12338 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12339 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12340 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12341 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12342 break;
12343 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12344 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12345 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12346 break;
12347 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12348 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12349 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12350 break;
12352 break;
12353 case FLASH_5752VENDOR_ST_M45PE10:
12354 case FLASH_5752VENDOR_ST_M45PE20:
12355 case FLASH_5752VENDOR_ST_M45PE40:
12356 tp->nvram_jedecnum = JEDEC_ST;
12357 tg3_flag_set(tp, NVRAM_BUFFERED);
12358 tg3_flag_set(tp, FLASH);
12360 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12361 case FLASH_5752VENDOR_ST_M45PE10:
12362 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12363 break;
12364 case FLASH_5752VENDOR_ST_M45PE20:
12365 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12366 break;
12367 case FLASH_5752VENDOR_ST_M45PE40:
12368 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12369 break;
12371 break;
12372 default:
12373 tg3_flag_set(tp, NO_NVRAM);
12374 return;
12377 tg3_nvram_get_pagesize(tp, nvcfg1);
12378 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12379 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12383 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12385 u32 nvcfg1;
12387 nvcfg1 = tr32(NVRAM_CFG1);
12389 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12390 case FLASH_5717VENDOR_ATMEL_EEPROM:
12391 case FLASH_5717VENDOR_MICRO_EEPROM:
12392 tp->nvram_jedecnum = JEDEC_ATMEL;
12393 tg3_flag_set(tp, NVRAM_BUFFERED);
12394 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12396 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12397 tw32(NVRAM_CFG1, nvcfg1);
12398 return;
12399 case FLASH_5717VENDOR_ATMEL_MDB011D:
12400 case FLASH_5717VENDOR_ATMEL_ADB011B:
12401 case FLASH_5717VENDOR_ATMEL_ADB011D:
12402 case FLASH_5717VENDOR_ATMEL_MDB021D:
12403 case FLASH_5717VENDOR_ATMEL_ADB021B:
12404 case FLASH_5717VENDOR_ATMEL_ADB021D:
12405 case FLASH_5717VENDOR_ATMEL_45USPT:
12406 tp->nvram_jedecnum = JEDEC_ATMEL;
12407 tg3_flag_set(tp, NVRAM_BUFFERED);
12408 tg3_flag_set(tp, FLASH);
12410 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12411 case FLASH_5717VENDOR_ATMEL_MDB021D:
12412 /* Detect size with tg3_nvram_get_size() */
12413 break;
12414 case FLASH_5717VENDOR_ATMEL_ADB021B:
12415 case FLASH_5717VENDOR_ATMEL_ADB021D:
12416 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12417 break;
12418 default:
12419 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12420 break;
12422 break;
12423 case FLASH_5717VENDOR_ST_M_M25PE10:
12424 case FLASH_5717VENDOR_ST_A_M25PE10:
12425 case FLASH_5717VENDOR_ST_M_M45PE10:
12426 case FLASH_5717VENDOR_ST_A_M45PE10:
12427 case FLASH_5717VENDOR_ST_M_M25PE20:
12428 case FLASH_5717VENDOR_ST_A_M25PE20:
12429 case FLASH_5717VENDOR_ST_M_M45PE20:
12430 case FLASH_5717VENDOR_ST_A_M45PE20:
12431 case FLASH_5717VENDOR_ST_25USPT:
12432 case FLASH_5717VENDOR_ST_45USPT:
12433 tp->nvram_jedecnum = JEDEC_ST;
12434 tg3_flag_set(tp, NVRAM_BUFFERED);
12435 tg3_flag_set(tp, FLASH);
12437 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12438 case FLASH_5717VENDOR_ST_M_M25PE20:
12439 case FLASH_5717VENDOR_ST_M_M45PE20:
12440 /* Detect size with tg3_nvram_get_size() */
12441 break;
12442 case FLASH_5717VENDOR_ST_A_M25PE20:
12443 case FLASH_5717VENDOR_ST_A_M45PE20:
12444 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12445 break;
12446 default:
12447 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12448 break;
12450 break;
12451 default:
12452 tg3_flag_set(tp, NO_NVRAM);
12453 return;
12456 tg3_nvram_get_pagesize(tp, nvcfg1);
12457 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12458 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12461 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12463 u32 nvcfg1, nvmpinstrp;
12465 nvcfg1 = tr32(NVRAM_CFG1);
12466 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12468 switch (nvmpinstrp) {
12469 case FLASH_5720_EEPROM_HD:
12470 case FLASH_5720_EEPROM_LD:
12471 tp->nvram_jedecnum = JEDEC_ATMEL;
12472 tg3_flag_set(tp, NVRAM_BUFFERED);
12474 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12475 tw32(NVRAM_CFG1, nvcfg1);
12476 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12477 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12478 else
12479 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12480 return;
12481 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12482 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12483 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12484 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12485 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12486 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12487 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12488 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12489 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12490 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12491 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12492 case FLASH_5720VENDOR_ATMEL_45USPT:
12493 tp->nvram_jedecnum = JEDEC_ATMEL;
12494 tg3_flag_set(tp, NVRAM_BUFFERED);
12495 tg3_flag_set(tp, FLASH);
12497 switch (nvmpinstrp) {
12498 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12499 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12500 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12501 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12502 break;
12503 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12504 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12505 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12506 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12507 break;
12508 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12509 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12510 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12511 break;
12512 default:
12513 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12514 break;
12516 break;
12517 case FLASH_5720VENDOR_M_ST_M25PE10:
12518 case FLASH_5720VENDOR_M_ST_M45PE10:
12519 case FLASH_5720VENDOR_A_ST_M25PE10:
12520 case FLASH_5720VENDOR_A_ST_M45PE10:
12521 case FLASH_5720VENDOR_M_ST_M25PE20:
12522 case FLASH_5720VENDOR_M_ST_M45PE20:
12523 case FLASH_5720VENDOR_A_ST_M25PE20:
12524 case FLASH_5720VENDOR_A_ST_M45PE20:
12525 case FLASH_5720VENDOR_M_ST_M25PE40:
12526 case FLASH_5720VENDOR_M_ST_M45PE40:
12527 case FLASH_5720VENDOR_A_ST_M25PE40:
12528 case FLASH_5720VENDOR_A_ST_M45PE40:
12529 case FLASH_5720VENDOR_M_ST_M25PE80:
12530 case FLASH_5720VENDOR_M_ST_M45PE80:
12531 case FLASH_5720VENDOR_A_ST_M25PE80:
12532 case FLASH_5720VENDOR_A_ST_M45PE80:
12533 case FLASH_5720VENDOR_ST_25USPT:
12534 case FLASH_5720VENDOR_ST_45USPT:
12535 tp->nvram_jedecnum = JEDEC_ST;
12536 tg3_flag_set(tp, NVRAM_BUFFERED);
12537 tg3_flag_set(tp, FLASH);
12539 switch (nvmpinstrp) {
12540 case FLASH_5720VENDOR_M_ST_M25PE20:
12541 case FLASH_5720VENDOR_M_ST_M45PE20:
12542 case FLASH_5720VENDOR_A_ST_M25PE20:
12543 case FLASH_5720VENDOR_A_ST_M45PE20:
12544 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12545 break;
12546 case FLASH_5720VENDOR_M_ST_M25PE40:
12547 case FLASH_5720VENDOR_M_ST_M45PE40:
12548 case FLASH_5720VENDOR_A_ST_M25PE40:
12549 case FLASH_5720VENDOR_A_ST_M45PE40:
12550 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12551 break;
12552 case FLASH_5720VENDOR_M_ST_M25PE80:
12553 case FLASH_5720VENDOR_M_ST_M45PE80:
12554 case FLASH_5720VENDOR_A_ST_M25PE80:
12555 case FLASH_5720VENDOR_A_ST_M45PE80:
12556 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12557 break;
12558 default:
12559 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12560 break;
12562 break;
12563 default:
12564 tg3_flag_set(tp, NO_NVRAM);
12565 return;
12568 tg3_nvram_get_pagesize(tp, nvcfg1);
12569 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12570 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12573 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12574 static void __devinit tg3_nvram_init(struct tg3 *tp)
12576 tw32_f(GRC_EEPROM_ADDR,
12577 (EEPROM_ADDR_FSM_RESET |
12578 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12579 EEPROM_ADDR_CLKPERD_SHIFT)));
12581 msleep(1);
12583 /* Enable seeprom accesses. */
12584 tw32_f(GRC_LOCAL_CTRL,
12585 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12586 udelay(100);
12588 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12589 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12590 tg3_flag_set(tp, NVRAM);
12592 if (tg3_nvram_lock(tp)) {
12593 netdev_warn(tp->dev,
12594 "Cannot get nvram lock, %s failed\n",
12595 __func__);
12596 return;
12598 tg3_enable_nvram_access(tp);
12600 tp->nvram_size = 0;
12602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12603 tg3_get_5752_nvram_info(tp);
12604 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12605 tg3_get_5755_nvram_info(tp);
12606 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12607 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12608 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12609 tg3_get_5787_nvram_info(tp);
12610 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12611 tg3_get_5761_nvram_info(tp);
12612 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12613 tg3_get_5906_nvram_info(tp);
12614 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12615 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12616 tg3_get_57780_nvram_info(tp);
12617 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12619 tg3_get_5717_nvram_info(tp);
12620 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12621 tg3_get_5720_nvram_info(tp);
12622 else
12623 tg3_get_nvram_info(tp);
12625 if (tp->nvram_size == 0)
12626 tg3_get_nvram_size(tp);
12628 tg3_disable_nvram_access(tp);
12629 tg3_nvram_unlock(tp);
12631 } else {
12632 tg3_flag_clear(tp, NVRAM);
12633 tg3_flag_clear(tp, NVRAM_BUFFERED);
12635 tg3_get_eeprom_size(tp);
12639 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12640 u32 offset, u32 len, u8 *buf)
12642 int i, j, rc = 0;
12643 u32 val;
12645 for (i = 0; i < len; i += 4) {
12646 u32 addr;
12647 __be32 data;
12649 addr = offset + i;
12651 memcpy(&data, buf + i, 4);
12654 * The SEEPROM interface expects the data to always be opposite
12655 * the native endian format. We accomplish this by reversing
12656 * all the operations that would have been performed on the
12657 * data from a call to tg3_nvram_read_be32().
12659 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12661 val = tr32(GRC_EEPROM_ADDR);
12662 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12664 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12665 EEPROM_ADDR_READ);
12666 tw32(GRC_EEPROM_ADDR, val |
12667 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12668 (addr & EEPROM_ADDR_ADDR_MASK) |
12669 EEPROM_ADDR_START |
12670 EEPROM_ADDR_WRITE);
12672 for (j = 0; j < 1000; j++) {
12673 val = tr32(GRC_EEPROM_ADDR);
12675 if (val & EEPROM_ADDR_COMPLETE)
12676 break;
12677 msleep(1);
12679 if (!(val & EEPROM_ADDR_COMPLETE)) {
12680 rc = -EBUSY;
12681 break;
12685 return rc;
12688 /* offset and length are dword aligned */
12689 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12690 u8 *buf)
12692 int ret = 0;
12693 u32 pagesize = tp->nvram_pagesize;
12694 u32 pagemask = pagesize - 1;
12695 u32 nvram_cmd;
12696 u8 *tmp;
12698 tmp = kmalloc(pagesize, GFP_KERNEL);
12699 if (tmp == NULL)
12700 return -ENOMEM;
12702 while (len) {
12703 int j;
12704 u32 phy_addr, page_off, size;
12706 phy_addr = offset & ~pagemask;
12708 for (j = 0; j < pagesize; j += 4) {
12709 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12710 (__be32 *) (tmp + j));
12711 if (ret)
12712 break;
12714 if (ret)
12715 break;
12717 page_off = offset & pagemask;
12718 size = pagesize;
12719 if (len < size)
12720 size = len;
12722 len -= size;
12724 memcpy(tmp + page_off, buf, size);
12726 offset = offset + (pagesize - page_off);
12728 tg3_enable_nvram_access(tp);
12731 * Before we can erase the flash page, we need
12732 * to issue a special "write enable" command.
12734 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12736 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12737 break;
12739 /* Erase the target page */
12740 tw32(NVRAM_ADDR, phy_addr);
12742 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12743 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12745 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12746 break;
12748 /* Issue another write enable to start the write. */
12749 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12751 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12752 break;
12754 for (j = 0; j < pagesize; j += 4) {
12755 __be32 data;
12757 data = *((__be32 *) (tmp + j));
12759 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12761 tw32(NVRAM_ADDR, phy_addr + j);
12763 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12764 NVRAM_CMD_WR;
12766 if (j == 0)
12767 nvram_cmd |= NVRAM_CMD_FIRST;
12768 else if (j == (pagesize - 4))
12769 nvram_cmd |= NVRAM_CMD_LAST;
12771 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12772 break;
12774 if (ret)
12775 break;
12778 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12779 tg3_nvram_exec_cmd(tp, nvram_cmd);
12781 kfree(tmp);
12783 return ret;
12786 /* offset and length are dword aligned */
12787 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12788 u8 *buf)
12790 int i, ret = 0;
12792 for (i = 0; i < len; i += 4, offset += 4) {
12793 u32 page_off, phy_addr, nvram_cmd;
12794 __be32 data;
12796 memcpy(&data, buf + i, 4);
12797 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12799 page_off = offset % tp->nvram_pagesize;
12801 phy_addr = tg3_nvram_phys_addr(tp, offset);
12803 tw32(NVRAM_ADDR, phy_addr);
12805 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12807 if (page_off == 0 || i == 0)
12808 nvram_cmd |= NVRAM_CMD_FIRST;
12809 if (page_off == (tp->nvram_pagesize - 4))
12810 nvram_cmd |= NVRAM_CMD_LAST;
12812 if (i == (len - 4))
12813 nvram_cmd |= NVRAM_CMD_LAST;
12815 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12816 !tg3_flag(tp, 5755_PLUS) &&
12817 (tp->nvram_jedecnum == JEDEC_ST) &&
12818 (nvram_cmd & NVRAM_CMD_FIRST)) {
12820 if ((ret = tg3_nvram_exec_cmd(tp,
12821 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12822 NVRAM_CMD_DONE)))
12824 break;
12826 if (!tg3_flag(tp, FLASH)) {
12827 /* We always do complete word writes to eeprom. */
12828 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12831 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12832 break;
12834 return ret;
12837 /* offset and length are dword aligned */
12838 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12840 int ret;
12842 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12843 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12844 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12845 udelay(40);
12848 if (!tg3_flag(tp, NVRAM)) {
12849 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12850 } else {
12851 u32 grc_mode;
12853 ret = tg3_nvram_lock(tp);
12854 if (ret)
12855 return ret;
12857 tg3_enable_nvram_access(tp);
12858 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12859 tw32(NVRAM_WRITE1, 0x406);
12861 grc_mode = tr32(GRC_MODE);
12862 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12864 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12865 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12866 buf);
12867 } else {
12868 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12869 buf);
12872 grc_mode = tr32(GRC_MODE);
12873 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12875 tg3_disable_nvram_access(tp);
12876 tg3_nvram_unlock(tp);
12879 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12880 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12881 udelay(40);
12884 return ret;
12887 struct subsys_tbl_ent {
12888 u16 subsys_vendor, subsys_devid;
12889 u32 phy_id;
12892 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12893 /* Broadcom boards. */
12894 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12895 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12896 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12897 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12898 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12899 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12900 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12901 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12902 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12903 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12904 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12905 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12906 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12907 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12908 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12909 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12910 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12911 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12912 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12913 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12914 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12915 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12917 /* 3com boards. */
12918 { TG3PCI_SUBVENDOR_ID_3COM,
12919 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12920 { TG3PCI_SUBVENDOR_ID_3COM,
12921 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12922 { TG3PCI_SUBVENDOR_ID_3COM,
12923 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12924 { TG3PCI_SUBVENDOR_ID_3COM,
12925 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12926 { TG3PCI_SUBVENDOR_ID_3COM,
12927 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12929 /* DELL boards. */
12930 { TG3PCI_SUBVENDOR_ID_DELL,
12931 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12932 { TG3PCI_SUBVENDOR_ID_DELL,
12933 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12934 { TG3PCI_SUBVENDOR_ID_DELL,
12935 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12936 { TG3PCI_SUBVENDOR_ID_DELL,
12937 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12939 /* Compaq boards. */
12940 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12941 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12942 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12943 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12944 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12945 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12946 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12947 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12948 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12949 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12951 /* IBM boards. */
12952 { TG3PCI_SUBVENDOR_ID_IBM,
12953 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12956 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12958 int i;
12960 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12961 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12962 tp->pdev->subsystem_vendor) &&
12963 (subsys_id_to_phy_id[i].subsys_devid ==
12964 tp->pdev->subsystem_device))
12965 return &subsys_id_to_phy_id[i];
12967 return NULL;
12970 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12972 u32 val;
12974 tp->phy_id = TG3_PHY_ID_INVALID;
12975 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12977 /* Assume an onboard device and WOL capable by default. */
12978 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12979 tg3_flag_set(tp, WOL_CAP);
12981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12982 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12983 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12984 tg3_flag_set(tp, IS_NIC);
12986 val = tr32(VCPU_CFGSHDW);
12987 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12988 tg3_flag_set(tp, ASPM_WORKAROUND);
12989 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12990 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12991 tg3_flag_set(tp, WOL_ENABLE);
12992 device_set_wakeup_enable(&tp->pdev->dev, true);
12994 goto done;
12997 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12998 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12999 u32 nic_cfg, led_cfg;
13000 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13001 int eeprom_phy_serdes = 0;
13003 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13004 tp->nic_sram_data_cfg = nic_cfg;
13006 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13007 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13008 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13009 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13010 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13011 (ver > 0) && (ver < 0x100))
13012 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13015 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13017 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13018 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13019 eeprom_phy_serdes = 1;
13021 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13022 if (nic_phy_id != 0) {
13023 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13024 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13026 eeprom_phy_id = (id1 >> 16) << 10;
13027 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13028 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13029 } else
13030 eeprom_phy_id = 0;
13032 tp->phy_id = eeprom_phy_id;
13033 if (eeprom_phy_serdes) {
13034 if (!tg3_flag(tp, 5705_PLUS))
13035 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13036 else
13037 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13040 if (tg3_flag(tp, 5750_PLUS))
13041 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13042 SHASTA_EXT_LED_MODE_MASK);
13043 else
13044 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13046 switch (led_cfg) {
13047 default:
13048 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13049 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13050 break;
13052 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13053 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13054 break;
13056 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13057 tp->led_ctrl = LED_CTRL_MODE_MAC;
13059 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13060 * read on some older 5700/5701 bootcode.
13062 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13063 ASIC_REV_5700 ||
13064 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13065 ASIC_REV_5701)
13066 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13068 break;
13070 case SHASTA_EXT_LED_SHARED:
13071 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13072 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13073 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13074 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13075 LED_CTRL_MODE_PHY_2);
13076 break;
13078 case SHASTA_EXT_LED_MAC:
13079 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13080 break;
13082 case SHASTA_EXT_LED_COMBO:
13083 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13084 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13085 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13086 LED_CTRL_MODE_PHY_2);
13087 break;
13091 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13093 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13094 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13096 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13097 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13099 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13100 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13101 if ((tp->pdev->subsystem_vendor ==
13102 PCI_VENDOR_ID_ARIMA) &&
13103 (tp->pdev->subsystem_device == 0x205a ||
13104 tp->pdev->subsystem_device == 0x2063))
13105 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13106 } else {
13107 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13108 tg3_flag_set(tp, IS_NIC);
13111 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13112 tg3_flag_set(tp, ENABLE_ASF);
13113 if (tg3_flag(tp, 5750_PLUS))
13114 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13117 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13118 tg3_flag(tp, 5750_PLUS))
13119 tg3_flag_set(tp, ENABLE_APE);
13121 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13122 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13123 tg3_flag_clear(tp, WOL_CAP);
13125 if (tg3_flag(tp, WOL_CAP) &&
13126 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13127 tg3_flag_set(tp, WOL_ENABLE);
13128 device_set_wakeup_enable(&tp->pdev->dev, true);
13131 if (cfg2 & (1 << 17))
13132 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13134 /* serdes signal pre-emphasis in register 0x590 set by */
13135 /* bootcode if bit 18 is set */
13136 if (cfg2 & (1 << 18))
13137 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13139 if ((tg3_flag(tp, 57765_PLUS) ||
13140 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13141 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13142 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13143 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13145 if (tg3_flag(tp, PCI_EXPRESS) &&
13146 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13147 !tg3_flag(tp, 57765_PLUS)) {
13148 u32 cfg3;
13150 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13151 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13152 tg3_flag_set(tp, ASPM_WORKAROUND);
13155 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13156 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13157 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13158 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13159 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13160 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13162 done:
13163 if (tg3_flag(tp, WOL_CAP))
13164 device_set_wakeup_enable(&tp->pdev->dev,
13165 tg3_flag(tp, WOL_ENABLE));
13166 else
13167 device_set_wakeup_capable(&tp->pdev->dev, false);
13170 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13172 int i;
13173 u32 val;
13175 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13176 tw32(OTP_CTRL, cmd);
13178 /* Wait for up to 1 ms for command to execute. */
13179 for (i = 0; i < 100; i++) {
13180 val = tr32(OTP_STATUS);
13181 if (val & OTP_STATUS_CMD_DONE)
13182 break;
13183 udelay(10);
13186 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13189 /* Read the gphy configuration from the OTP region of the chip. The gphy
13190 * configuration is a 32-bit value that straddles the alignment boundary.
13191 * We do two 32-bit reads and then shift and merge the results.
13193 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13195 u32 bhalf_otp, thalf_otp;
13197 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13199 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13200 return 0;
13202 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13204 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13205 return 0;
13207 thalf_otp = tr32(OTP_READ_DATA);
13209 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13211 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13212 return 0;
13214 bhalf_otp = tr32(OTP_READ_DATA);
13216 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13219 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13221 u32 adv = ADVERTISED_Autoneg |
13222 ADVERTISED_Pause;
13224 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13225 adv |= ADVERTISED_1000baseT_Half |
13226 ADVERTISED_1000baseT_Full;
13228 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13229 adv |= ADVERTISED_100baseT_Half |
13230 ADVERTISED_100baseT_Full |
13231 ADVERTISED_10baseT_Half |
13232 ADVERTISED_10baseT_Full |
13233 ADVERTISED_TP;
13234 else
13235 adv |= ADVERTISED_FIBRE;
13237 tp->link_config.advertising = adv;
13238 tp->link_config.speed = SPEED_INVALID;
13239 tp->link_config.duplex = DUPLEX_INVALID;
13240 tp->link_config.autoneg = AUTONEG_ENABLE;
13241 tp->link_config.active_speed = SPEED_INVALID;
13242 tp->link_config.active_duplex = DUPLEX_INVALID;
13243 tp->link_config.orig_speed = SPEED_INVALID;
13244 tp->link_config.orig_duplex = DUPLEX_INVALID;
13245 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13248 static int __devinit tg3_phy_probe(struct tg3 *tp)
13250 u32 hw_phy_id_1, hw_phy_id_2;
13251 u32 hw_phy_id, hw_phy_id_masked;
13252 int err;
13254 /* flow control autonegotiation is default behavior */
13255 tg3_flag_set(tp, PAUSE_AUTONEG);
13256 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13258 if (tg3_flag(tp, USE_PHYLIB))
13259 return tg3_phy_init(tp);
13261 /* Reading the PHY ID register can conflict with ASF
13262 * firmware access to the PHY hardware.
13264 err = 0;
13265 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13266 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13267 } else {
13268 /* Now read the physical PHY_ID from the chip and verify
13269 * that it is sane. If it doesn't look good, we fall back
13270 * to either the hard-coded table based PHY_ID and failing
13271 * that the value found in the eeprom area.
13273 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13274 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13276 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13277 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13278 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13280 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13283 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13284 tp->phy_id = hw_phy_id;
13285 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13286 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13287 else
13288 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13289 } else {
13290 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13291 /* Do nothing, phy ID already set up in
13292 * tg3_get_eeprom_hw_cfg().
13294 } else {
13295 struct subsys_tbl_ent *p;
13297 /* No eeprom signature? Try the hardcoded
13298 * subsys device table.
13300 p = tg3_lookup_by_subsys(tp);
13301 if (!p)
13302 return -ENODEV;
13304 tp->phy_id = p->phy_id;
13305 if (!tp->phy_id ||
13306 tp->phy_id == TG3_PHY_ID_BCM8002)
13307 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13311 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13312 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13313 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13314 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13315 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13316 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13317 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13318 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13320 tg3_phy_init_link_config(tp);
13322 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13323 !tg3_flag(tp, ENABLE_APE) &&
13324 !tg3_flag(tp, ENABLE_ASF)) {
13325 u32 bmsr, mask;
13327 tg3_readphy(tp, MII_BMSR, &bmsr);
13328 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13329 (bmsr & BMSR_LSTATUS))
13330 goto skip_phy_reset;
13332 err = tg3_phy_reset(tp);
13333 if (err)
13334 return err;
13336 tg3_phy_set_wirespeed(tp);
13338 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13339 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13340 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13341 if (!tg3_copper_is_advertising_all(tp, mask)) {
13342 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13343 tp->link_config.flowctrl);
13345 tg3_writephy(tp, MII_BMCR,
13346 BMCR_ANENABLE | BMCR_ANRESTART);
13350 skip_phy_reset:
13351 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13352 err = tg3_init_5401phy_dsp(tp);
13353 if (err)
13354 return err;
13356 err = tg3_init_5401phy_dsp(tp);
13359 return err;
13362 static void __devinit tg3_read_vpd(struct tg3 *tp)
13364 u8 *vpd_data;
13365 unsigned int block_end, rosize, len;
13366 u32 vpdlen;
13367 int j, i = 0;
13369 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13370 if (!vpd_data)
13371 goto out_no_vpd;
13373 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13374 if (i < 0)
13375 goto out_not_found;
13377 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13378 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13379 i += PCI_VPD_LRDT_TAG_SIZE;
13381 if (block_end > vpdlen)
13382 goto out_not_found;
13384 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13385 PCI_VPD_RO_KEYWORD_MFR_ID);
13386 if (j > 0) {
13387 len = pci_vpd_info_field_size(&vpd_data[j]);
13389 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13390 if (j + len > block_end || len != 4 ||
13391 memcmp(&vpd_data[j], "1028", 4))
13392 goto partno;
13394 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13395 PCI_VPD_RO_KEYWORD_VENDOR0);
13396 if (j < 0)
13397 goto partno;
13399 len = pci_vpd_info_field_size(&vpd_data[j]);
13401 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13402 if (j + len > block_end)
13403 goto partno;
13405 memcpy(tp->fw_ver, &vpd_data[j], len);
13406 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13409 partno:
13410 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13411 PCI_VPD_RO_KEYWORD_PARTNO);
13412 if (i < 0)
13413 goto out_not_found;
13415 len = pci_vpd_info_field_size(&vpd_data[i]);
13417 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13418 if (len > TG3_BPN_SIZE ||
13419 (len + i) > vpdlen)
13420 goto out_not_found;
13422 memcpy(tp->board_part_number, &vpd_data[i], len);
13424 out_not_found:
13425 kfree(vpd_data);
13426 if (tp->board_part_number[0])
13427 return;
13429 out_no_vpd:
13430 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13431 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13432 strcpy(tp->board_part_number, "BCM5717");
13433 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13434 strcpy(tp->board_part_number, "BCM5718");
13435 else
13436 goto nomatch;
13437 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13438 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13439 strcpy(tp->board_part_number, "BCM57780");
13440 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13441 strcpy(tp->board_part_number, "BCM57760");
13442 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13443 strcpy(tp->board_part_number, "BCM57790");
13444 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13445 strcpy(tp->board_part_number, "BCM57788");
13446 else
13447 goto nomatch;
13448 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13449 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13450 strcpy(tp->board_part_number, "BCM57761");
13451 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13452 strcpy(tp->board_part_number, "BCM57765");
13453 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13454 strcpy(tp->board_part_number, "BCM57781");
13455 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13456 strcpy(tp->board_part_number, "BCM57785");
13457 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13458 strcpy(tp->board_part_number, "BCM57791");
13459 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13460 strcpy(tp->board_part_number, "BCM57795");
13461 else
13462 goto nomatch;
13463 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13464 strcpy(tp->board_part_number, "BCM95906");
13465 } else {
13466 nomatch:
13467 strcpy(tp->board_part_number, "none");
13471 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13473 u32 val;
13475 if (tg3_nvram_read(tp, offset, &val) ||
13476 (val & 0xfc000000) != 0x0c000000 ||
13477 tg3_nvram_read(tp, offset + 4, &val) ||
13478 val != 0)
13479 return 0;
13481 return 1;
13484 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13486 u32 val, offset, start, ver_offset;
13487 int i, dst_off;
13488 bool newver = false;
13490 if (tg3_nvram_read(tp, 0xc, &offset) ||
13491 tg3_nvram_read(tp, 0x4, &start))
13492 return;
13494 offset = tg3_nvram_logical_addr(tp, offset);
13496 if (tg3_nvram_read(tp, offset, &val))
13497 return;
13499 if ((val & 0xfc000000) == 0x0c000000) {
13500 if (tg3_nvram_read(tp, offset + 4, &val))
13501 return;
13503 if (val == 0)
13504 newver = true;
13507 dst_off = strlen(tp->fw_ver);
13509 if (newver) {
13510 if (TG3_VER_SIZE - dst_off < 16 ||
13511 tg3_nvram_read(tp, offset + 8, &ver_offset))
13512 return;
13514 offset = offset + ver_offset - start;
13515 for (i = 0; i < 16; i += 4) {
13516 __be32 v;
13517 if (tg3_nvram_read_be32(tp, offset + i, &v))
13518 return;
13520 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13522 } else {
13523 u32 major, minor;
13525 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13526 return;
13528 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13529 TG3_NVM_BCVER_MAJSFT;
13530 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13531 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13532 "v%d.%02d", major, minor);
13536 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13538 u32 val, major, minor;
13540 /* Use native endian representation */
13541 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13542 return;
13544 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13545 TG3_NVM_HWSB_CFG1_MAJSFT;
13546 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13547 TG3_NVM_HWSB_CFG1_MINSFT;
13549 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13552 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13554 u32 offset, major, minor, build;
13556 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13558 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13559 return;
13561 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13562 case TG3_EEPROM_SB_REVISION_0:
13563 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13564 break;
13565 case TG3_EEPROM_SB_REVISION_2:
13566 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13567 break;
13568 case TG3_EEPROM_SB_REVISION_3:
13569 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13570 break;
13571 case TG3_EEPROM_SB_REVISION_4:
13572 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13573 break;
13574 case TG3_EEPROM_SB_REVISION_5:
13575 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13576 break;
13577 case TG3_EEPROM_SB_REVISION_6:
13578 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13579 break;
13580 default:
13581 return;
13584 if (tg3_nvram_read(tp, offset, &val))
13585 return;
13587 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13588 TG3_EEPROM_SB_EDH_BLD_SHFT;
13589 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13590 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13591 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13593 if (minor > 99 || build > 26)
13594 return;
13596 offset = strlen(tp->fw_ver);
13597 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13598 " v%d.%02d", major, minor);
13600 if (build > 0) {
13601 offset = strlen(tp->fw_ver);
13602 if (offset < TG3_VER_SIZE - 1)
13603 tp->fw_ver[offset] = 'a' + build - 1;
13607 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13609 u32 val, offset, start;
13610 int i, vlen;
13612 for (offset = TG3_NVM_DIR_START;
13613 offset < TG3_NVM_DIR_END;
13614 offset += TG3_NVM_DIRENT_SIZE) {
13615 if (tg3_nvram_read(tp, offset, &val))
13616 return;
13618 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13619 break;
13622 if (offset == TG3_NVM_DIR_END)
13623 return;
13625 if (!tg3_flag(tp, 5705_PLUS))
13626 start = 0x08000000;
13627 else if (tg3_nvram_read(tp, offset - 4, &start))
13628 return;
13630 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13631 !tg3_fw_img_is_valid(tp, offset) ||
13632 tg3_nvram_read(tp, offset + 8, &val))
13633 return;
13635 offset += val - start;
13637 vlen = strlen(tp->fw_ver);
13639 tp->fw_ver[vlen++] = ',';
13640 tp->fw_ver[vlen++] = ' ';
13642 for (i = 0; i < 4; i++) {
13643 __be32 v;
13644 if (tg3_nvram_read_be32(tp, offset, &v))
13645 return;
13647 offset += sizeof(v);
13649 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13650 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13651 break;
13654 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13655 vlen += sizeof(v);
13659 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13661 int vlen;
13662 u32 apedata;
13663 char *fwtype;
13665 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13666 return;
13668 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13669 if (apedata != APE_SEG_SIG_MAGIC)
13670 return;
13672 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13673 if (!(apedata & APE_FW_STATUS_READY))
13674 return;
13676 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13678 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13679 tg3_flag_set(tp, APE_HAS_NCSI);
13680 fwtype = "NCSI";
13681 } else {
13682 fwtype = "DASH";
13685 vlen = strlen(tp->fw_ver);
13687 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13688 fwtype,
13689 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13690 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13691 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13692 (apedata & APE_FW_VERSION_BLDMSK));
13695 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13697 u32 val;
13698 bool vpd_vers = false;
13700 if (tp->fw_ver[0] != 0)
13701 vpd_vers = true;
13703 if (tg3_flag(tp, NO_NVRAM)) {
13704 strcat(tp->fw_ver, "sb");
13705 return;
13708 if (tg3_nvram_read(tp, 0, &val))
13709 return;
13711 if (val == TG3_EEPROM_MAGIC)
13712 tg3_read_bc_ver(tp);
13713 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13714 tg3_read_sb_ver(tp, val);
13715 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13716 tg3_read_hwsb_ver(tp);
13717 else
13718 return;
13720 if (vpd_vers)
13721 goto done;
13723 if (tg3_flag(tp, ENABLE_APE)) {
13724 if (tg3_flag(tp, ENABLE_ASF))
13725 tg3_read_dash_ver(tp);
13726 } else if (tg3_flag(tp, ENABLE_ASF)) {
13727 tg3_read_mgmtfw_ver(tp);
13730 done:
13731 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13734 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13736 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13738 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13739 return TG3_RX_RET_MAX_SIZE_5717;
13740 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13741 return TG3_RX_RET_MAX_SIZE_5700;
13742 else
13743 return TG3_RX_RET_MAX_SIZE_5705;
13746 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13747 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13748 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13749 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13750 { },
13753 static int __devinit tg3_get_invariants(struct tg3 *tp)
13755 u32 misc_ctrl_reg;
13756 u32 pci_state_reg, grc_misc_cfg;
13757 u32 val;
13758 u16 pci_cmd;
13759 int err;
13761 /* Force memory write invalidate off. If we leave it on,
13762 * then on 5700_BX chips we have to enable a workaround.
13763 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13764 * to match the cacheline size. The Broadcom driver have this
13765 * workaround but turns MWI off all the times so never uses
13766 * it. This seems to suggest that the workaround is insufficient.
13768 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13769 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13770 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13772 /* Important! -- Make sure register accesses are byteswapped
13773 * correctly. Also, for those chips that require it, make
13774 * sure that indirect register accesses are enabled before
13775 * the first operation.
13777 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13778 &misc_ctrl_reg);
13779 tp->misc_host_ctrl |= (misc_ctrl_reg &
13780 MISC_HOST_CTRL_CHIPREV);
13781 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13782 tp->misc_host_ctrl);
13784 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13785 MISC_HOST_CTRL_CHIPREV_SHIFT);
13786 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13787 u32 prod_id_asic_rev;
13789 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13790 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13791 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13792 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13793 pci_read_config_dword(tp->pdev,
13794 TG3PCI_GEN2_PRODID_ASICREV,
13795 &prod_id_asic_rev);
13796 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13797 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13798 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13799 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13800 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13801 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13802 pci_read_config_dword(tp->pdev,
13803 TG3PCI_GEN15_PRODID_ASICREV,
13804 &prod_id_asic_rev);
13805 else
13806 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13807 &prod_id_asic_rev);
13809 tp->pci_chip_rev_id = prod_id_asic_rev;
13812 /* Wrong chip ID in 5752 A0. This code can be removed later
13813 * as A0 is not in production.
13815 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13816 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13818 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13819 * we need to disable memory and use config. cycles
13820 * only to access all registers. The 5702/03 chips
13821 * can mistakenly decode the special cycles from the
13822 * ICH chipsets as memory write cycles, causing corruption
13823 * of register and memory space. Only certain ICH bridges
13824 * will drive special cycles with non-zero data during the
13825 * address phase which can fall within the 5703's address
13826 * range. This is not an ICH bug as the PCI spec allows
13827 * non-zero address during special cycles. However, only
13828 * these ICH bridges are known to drive non-zero addresses
13829 * during special cycles.
13831 * Since special cycles do not cross PCI bridges, we only
13832 * enable this workaround if the 5703 is on the secondary
13833 * bus of these ICH bridges.
13835 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13836 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13837 static struct tg3_dev_id {
13838 u32 vendor;
13839 u32 device;
13840 u32 rev;
13841 } ich_chipsets[] = {
13842 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13843 PCI_ANY_ID },
13844 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13845 PCI_ANY_ID },
13846 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13847 0xa },
13848 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13849 PCI_ANY_ID },
13850 { },
13852 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13853 struct pci_dev *bridge = NULL;
13855 while (pci_id->vendor != 0) {
13856 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13857 bridge);
13858 if (!bridge) {
13859 pci_id++;
13860 continue;
13862 if (pci_id->rev != PCI_ANY_ID) {
13863 if (bridge->revision > pci_id->rev)
13864 continue;
13866 if (bridge->subordinate &&
13867 (bridge->subordinate->number ==
13868 tp->pdev->bus->number)) {
13869 tg3_flag_set(tp, ICH_WORKAROUND);
13870 pci_dev_put(bridge);
13871 break;
13876 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13877 static struct tg3_dev_id {
13878 u32 vendor;
13879 u32 device;
13880 } bridge_chipsets[] = {
13881 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13882 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13883 { },
13885 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13886 struct pci_dev *bridge = NULL;
13888 while (pci_id->vendor != 0) {
13889 bridge = pci_get_device(pci_id->vendor,
13890 pci_id->device,
13891 bridge);
13892 if (!bridge) {
13893 pci_id++;
13894 continue;
13896 if (bridge->subordinate &&
13897 (bridge->subordinate->number <=
13898 tp->pdev->bus->number) &&
13899 (bridge->subordinate->subordinate >=
13900 tp->pdev->bus->number)) {
13901 tg3_flag_set(tp, 5701_DMA_BUG);
13902 pci_dev_put(bridge);
13903 break;
13908 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13909 * DMA addresses > 40-bit. This bridge may have other additional
13910 * 57xx devices behind it in some 4-port NIC designs for example.
13911 * Any tg3 device found behind the bridge will also need the 40-bit
13912 * DMA workaround.
13914 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13915 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13916 tg3_flag_set(tp, 5780_CLASS);
13917 tg3_flag_set(tp, 40BIT_DMA_BUG);
13918 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13919 } else {
13920 struct pci_dev *bridge = NULL;
13922 do {
13923 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13924 PCI_DEVICE_ID_SERVERWORKS_EPB,
13925 bridge);
13926 if (bridge && bridge->subordinate &&
13927 (bridge->subordinate->number <=
13928 tp->pdev->bus->number) &&
13929 (bridge->subordinate->subordinate >=
13930 tp->pdev->bus->number)) {
13931 tg3_flag_set(tp, 40BIT_DMA_BUG);
13932 pci_dev_put(bridge);
13933 break;
13935 } while (bridge);
13938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13940 tp->pdev_peer = tg3_find_peer(tp);
13942 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13944 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13945 tg3_flag_set(tp, 5717_PLUS);
13947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13948 tg3_flag(tp, 5717_PLUS))
13949 tg3_flag_set(tp, 57765_PLUS);
13951 /* Intentionally exclude ASIC_REV_5906 */
13952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13953 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13954 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13955 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13958 tg3_flag(tp, 57765_PLUS))
13959 tg3_flag_set(tp, 5755_PLUS);
13961 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13963 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13964 tg3_flag(tp, 5755_PLUS) ||
13965 tg3_flag(tp, 5780_CLASS))
13966 tg3_flag_set(tp, 5750_PLUS);
13968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13969 tg3_flag(tp, 5750_PLUS))
13970 tg3_flag_set(tp, 5705_PLUS);
13972 /* Determine TSO capabilities */
13973 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13974 ; /* Do nothing. HW bug. */
13975 else if (tg3_flag(tp, 57765_PLUS))
13976 tg3_flag_set(tp, HW_TSO_3);
13977 else if (tg3_flag(tp, 5755_PLUS) ||
13978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13979 tg3_flag_set(tp, HW_TSO_2);
13980 else if (tg3_flag(tp, 5750_PLUS)) {
13981 tg3_flag_set(tp, HW_TSO_1);
13982 tg3_flag_set(tp, TSO_BUG);
13983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13984 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13985 tg3_flag_clear(tp, TSO_BUG);
13986 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13987 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13988 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13989 tg3_flag_set(tp, TSO_BUG);
13990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13991 tp->fw_needed = FIRMWARE_TG3TSO5;
13992 else
13993 tp->fw_needed = FIRMWARE_TG3TSO;
13996 /* Selectively allow TSO based on operating conditions */
13997 if (tg3_flag(tp, HW_TSO_1) ||
13998 tg3_flag(tp, HW_TSO_2) ||
13999 tg3_flag(tp, HW_TSO_3) ||
14000 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
14001 tg3_flag_set(tp, TSO_CAPABLE);
14002 else {
14003 tg3_flag_clear(tp, TSO_CAPABLE);
14004 tg3_flag_clear(tp, TSO_BUG);
14005 tp->fw_needed = NULL;
14008 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14009 tp->fw_needed = FIRMWARE_TG3;
14011 tp->irq_max = 1;
14013 if (tg3_flag(tp, 5750_PLUS)) {
14014 tg3_flag_set(tp, SUPPORT_MSI);
14015 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14016 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14017 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14018 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14019 tp->pdev_peer == tp->pdev))
14020 tg3_flag_clear(tp, SUPPORT_MSI);
14022 if (tg3_flag(tp, 5755_PLUS) ||
14023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14024 tg3_flag_set(tp, 1SHOT_MSI);
14027 if (tg3_flag(tp, 57765_PLUS)) {
14028 tg3_flag_set(tp, SUPPORT_MSIX);
14029 tp->irq_max = TG3_IRQ_MAX_VECS;
14033 if (tg3_flag(tp, 5755_PLUS))
14034 tg3_flag_set(tp, SHORT_DMA_BUG);
14036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14037 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14039 if (tg3_flag(tp, 5717_PLUS))
14040 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14042 if (tg3_flag(tp, 57765_PLUS) &&
14043 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14044 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14046 if (!tg3_flag(tp, 5705_PLUS) ||
14047 tg3_flag(tp, 5780_CLASS) ||
14048 tg3_flag(tp, USE_JUMBO_BDFLAG))
14049 tg3_flag_set(tp, JUMBO_CAPABLE);
14051 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14052 &pci_state_reg);
14054 if (pci_is_pcie(tp->pdev)) {
14055 u16 lnkctl;
14057 tg3_flag_set(tp, PCI_EXPRESS);
14059 tp->pcie_readrq = 4096;
14060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14062 tp->pcie_readrq = 2048;
14064 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14066 pci_read_config_word(tp->pdev,
14067 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14068 &lnkctl);
14069 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14070 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14071 ASIC_REV_5906) {
14072 tg3_flag_clear(tp, HW_TSO_2);
14073 tg3_flag_clear(tp, TSO_CAPABLE);
14075 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14076 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14077 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14078 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14079 tg3_flag_set(tp, CLKREQ_BUG);
14080 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14081 tg3_flag_set(tp, L1PLLPD_EN);
14083 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14084 /* BCM5785 devices are effectively PCIe devices, and should
14085 * follow PCIe codepaths, but do not have a PCIe capabilities
14086 * section.
14088 tg3_flag_set(tp, PCI_EXPRESS);
14089 } else if (!tg3_flag(tp, 5705_PLUS) ||
14090 tg3_flag(tp, 5780_CLASS)) {
14091 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14092 if (!tp->pcix_cap) {
14093 dev_err(&tp->pdev->dev,
14094 "Cannot find PCI-X capability, aborting\n");
14095 return -EIO;
14098 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14099 tg3_flag_set(tp, PCIX_MODE);
14102 /* If we have an AMD 762 or VIA K8T800 chipset, write
14103 * reordering to the mailbox registers done by the host
14104 * controller can cause major troubles. We read back from
14105 * every mailbox register write to force the writes to be
14106 * posted to the chip in order.
14108 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14109 !tg3_flag(tp, PCI_EXPRESS))
14110 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14112 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14113 &tp->pci_cacheline_sz);
14114 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14115 &tp->pci_lat_timer);
14116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14117 tp->pci_lat_timer < 64) {
14118 tp->pci_lat_timer = 64;
14119 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14120 tp->pci_lat_timer);
14123 /* Important! -- It is critical that the PCI-X hw workaround
14124 * situation is decided before the first MMIO register access.
14126 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14127 /* 5700 BX chips need to have their TX producer index
14128 * mailboxes written twice to workaround a bug.
14130 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14132 /* If we are in PCI-X mode, enable register write workaround.
14134 * The workaround is to use indirect register accesses
14135 * for all chip writes not to mailbox registers.
14137 if (tg3_flag(tp, PCIX_MODE)) {
14138 u32 pm_reg;
14140 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14142 /* The chip can have it's power management PCI config
14143 * space registers clobbered due to this bug.
14144 * So explicitly force the chip into D0 here.
14146 pci_read_config_dword(tp->pdev,
14147 tp->pm_cap + PCI_PM_CTRL,
14148 &pm_reg);
14149 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14150 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14151 pci_write_config_dword(tp->pdev,
14152 tp->pm_cap + PCI_PM_CTRL,
14153 pm_reg);
14155 /* Also, force SERR#/PERR# in PCI command. */
14156 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14157 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14158 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14162 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14163 tg3_flag_set(tp, PCI_HIGH_SPEED);
14164 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14165 tg3_flag_set(tp, PCI_32BIT);
14167 /* Chip-specific fixup from Broadcom driver */
14168 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14169 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14170 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14171 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14174 /* Default fast path register access methods */
14175 tp->read32 = tg3_read32;
14176 tp->write32 = tg3_write32;
14177 tp->read32_mbox = tg3_read32;
14178 tp->write32_mbox = tg3_write32;
14179 tp->write32_tx_mbox = tg3_write32;
14180 tp->write32_rx_mbox = tg3_write32;
14182 /* Various workaround register access methods */
14183 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14184 tp->write32 = tg3_write_indirect_reg32;
14185 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14186 (tg3_flag(tp, PCI_EXPRESS) &&
14187 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14189 * Back to back register writes can cause problems on these
14190 * chips, the workaround is to read back all reg writes
14191 * except those to mailbox regs.
14193 * See tg3_write_indirect_reg32().
14195 tp->write32 = tg3_write_flush_reg32;
14198 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14199 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14200 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14201 tp->write32_rx_mbox = tg3_write_flush_reg32;
14204 if (tg3_flag(tp, ICH_WORKAROUND)) {
14205 tp->read32 = tg3_read_indirect_reg32;
14206 tp->write32 = tg3_write_indirect_reg32;
14207 tp->read32_mbox = tg3_read_indirect_mbox;
14208 tp->write32_mbox = tg3_write_indirect_mbox;
14209 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14210 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14212 iounmap(tp->regs);
14213 tp->regs = NULL;
14215 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14216 pci_cmd &= ~PCI_COMMAND_MEMORY;
14217 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14220 tp->read32_mbox = tg3_read32_mbox_5906;
14221 tp->write32_mbox = tg3_write32_mbox_5906;
14222 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14223 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14226 if (tp->write32 == tg3_write_indirect_reg32 ||
14227 (tg3_flag(tp, PCIX_MODE) &&
14228 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14230 tg3_flag_set(tp, SRAM_USE_CONFIG);
14232 /* The memory arbiter has to be enabled in order for SRAM accesses
14233 * to succeed. Normally on powerup the tg3 chip firmware will make
14234 * sure it is enabled, but other entities such as system netboot
14235 * code might disable it.
14237 val = tr32(MEMARB_MODE);
14238 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14240 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14242 tg3_flag(tp, 5780_CLASS)) {
14243 if (tg3_flag(tp, PCIX_MODE)) {
14244 pci_read_config_dword(tp->pdev,
14245 tp->pcix_cap + PCI_X_STATUS,
14246 &val);
14247 tp->pci_fn = val & 0x7;
14249 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14250 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14251 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14252 NIC_SRAM_CPMUSTAT_SIG) {
14253 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14254 tp->pci_fn = tp->pci_fn ? 1 : 0;
14256 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14257 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14258 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14259 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14260 NIC_SRAM_CPMUSTAT_SIG) {
14261 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14262 TG3_CPMU_STATUS_FSHFT_5719;
14266 /* Get eeprom hw config before calling tg3_set_power_state().
14267 * In particular, the TG3_FLAG_IS_NIC flag must be
14268 * determined before calling tg3_set_power_state() so that
14269 * we know whether or not to switch out of Vaux power.
14270 * When the flag is set, it means that GPIO1 is used for eeprom
14271 * write protect and also implies that it is a LOM where GPIOs
14272 * are not used to switch power.
14274 tg3_get_eeprom_hw_cfg(tp);
14276 if (tg3_flag(tp, ENABLE_APE)) {
14277 /* Allow reads and writes to the
14278 * APE register and memory space.
14280 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14281 PCISTATE_ALLOW_APE_SHMEM_WR |
14282 PCISTATE_ALLOW_APE_PSPACE_WR;
14283 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14284 pci_state_reg);
14286 tg3_ape_lock_init(tp);
14289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14291 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14293 tg3_flag(tp, 57765_PLUS))
14294 tg3_flag_set(tp, CPMU_PRESENT);
14296 /* Set up tp->grc_local_ctrl before calling
14297 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14298 * will bring 5700's external PHY out of reset.
14299 * It is also used as eeprom write protect on LOMs.
14301 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14302 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14303 tg3_flag(tp, EEPROM_WRITE_PROT))
14304 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14305 GRC_LCLCTRL_GPIO_OUTPUT1);
14306 /* Unused GPIO3 must be driven as output on 5752 because there
14307 * are no pull-up resistors on unused GPIO pins.
14309 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14310 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14312 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14313 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14314 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14315 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14317 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14318 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14319 /* Turn off the debug UART. */
14320 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14321 if (tg3_flag(tp, IS_NIC))
14322 /* Keep VMain power. */
14323 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14324 GRC_LCLCTRL_GPIO_OUTPUT0;
14327 /* Switch out of Vaux if it is a NIC */
14328 tg3_pwrsrc_switch_to_vmain(tp);
14330 /* Derive initial jumbo mode from MTU assigned in
14331 * ether_setup() via the alloc_etherdev() call
14333 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14334 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14336 /* Determine WakeOnLan speed to use. */
14337 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14338 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14339 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14340 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14341 tg3_flag_clear(tp, WOL_SPEED_100MB);
14342 } else {
14343 tg3_flag_set(tp, WOL_SPEED_100MB);
14346 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14347 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14349 /* A few boards don't want Ethernet@WireSpeed phy feature */
14350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14351 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14352 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14353 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14354 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14355 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14356 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14358 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14359 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14360 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14361 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14362 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14364 if (tg3_flag(tp, 5705_PLUS) &&
14365 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14366 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14367 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14368 !tg3_flag(tp, 57765_PLUS)) {
14369 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14370 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14371 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14372 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14373 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14374 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14375 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14376 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14377 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14378 } else
14379 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14383 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14384 tp->phy_otp = tg3_read_otp_phycfg(tp);
14385 if (tp->phy_otp == 0)
14386 tp->phy_otp = TG3_OTP_DEFAULT;
14389 if (tg3_flag(tp, CPMU_PRESENT))
14390 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14391 else
14392 tp->mi_mode = MAC_MI_MODE_BASE;
14394 tp->coalesce_mode = 0;
14395 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14396 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14397 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14399 /* Set these bits to enable statistics workaround. */
14400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14401 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14402 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14403 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14404 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14407 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14409 tg3_flag_set(tp, USE_PHYLIB);
14411 err = tg3_mdio_init(tp);
14412 if (err)
14413 return err;
14415 /* Initialize data/descriptor byte/word swapping. */
14416 val = tr32(GRC_MODE);
14417 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14418 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14419 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14420 GRC_MODE_B2HRX_ENABLE |
14421 GRC_MODE_HTX2B_ENABLE |
14422 GRC_MODE_HOST_STACKUP);
14423 else
14424 val &= GRC_MODE_HOST_STACKUP;
14426 tw32(GRC_MODE, val | tp->grc_mode);
14428 tg3_switch_clocks(tp);
14430 /* Clear this out for sanity. */
14431 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14433 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14434 &pci_state_reg);
14435 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14436 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14437 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14439 if (chiprevid == CHIPREV_ID_5701_A0 ||
14440 chiprevid == CHIPREV_ID_5701_B0 ||
14441 chiprevid == CHIPREV_ID_5701_B2 ||
14442 chiprevid == CHIPREV_ID_5701_B5) {
14443 void __iomem *sram_base;
14445 /* Write some dummy words into the SRAM status block
14446 * area, see if it reads back correctly. If the return
14447 * value is bad, force enable the PCIX workaround.
14449 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14451 writel(0x00000000, sram_base);
14452 writel(0x00000000, sram_base + 4);
14453 writel(0xffffffff, sram_base + 4);
14454 if (readl(sram_base) != 0x00000000)
14455 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14459 udelay(50);
14460 tg3_nvram_init(tp);
14462 grc_misc_cfg = tr32(GRC_MISC_CFG);
14463 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14466 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14467 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14468 tg3_flag_set(tp, IS_5788);
14470 if (!tg3_flag(tp, IS_5788) &&
14471 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14472 tg3_flag_set(tp, TAGGED_STATUS);
14473 if (tg3_flag(tp, TAGGED_STATUS)) {
14474 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14475 HOSTCC_MODE_CLRTICK_TXBD);
14477 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14478 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14479 tp->misc_host_ctrl);
14482 /* Preserve the APE MAC_MODE bits */
14483 if (tg3_flag(tp, ENABLE_APE))
14484 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14485 else
14486 tp->mac_mode = 0;
14488 /* these are limited to 10/100 only */
14489 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14490 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14491 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14492 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14493 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14494 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14495 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14496 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14497 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14498 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14499 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14500 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14501 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14502 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14503 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14504 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14506 err = tg3_phy_probe(tp);
14507 if (err) {
14508 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14509 /* ... but do not return immediately ... */
14510 tg3_mdio_fini(tp);
14513 tg3_read_vpd(tp);
14514 tg3_read_fw_ver(tp);
14516 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14517 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14518 } else {
14519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14520 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14521 else
14522 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14525 /* 5700 {AX,BX} chips have a broken status block link
14526 * change bit implementation, so we must use the
14527 * status register in those cases.
14529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14530 tg3_flag_set(tp, USE_LINKCHG_REG);
14531 else
14532 tg3_flag_clear(tp, USE_LINKCHG_REG);
14534 /* The led_ctrl is set during tg3_phy_probe, here we might
14535 * have to force the link status polling mechanism based
14536 * upon subsystem IDs.
14538 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14540 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14541 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14542 tg3_flag_set(tp, USE_LINKCHG_REG);
14545 /* For all SERDES we poll the MAC status register. */
14546 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14547 tg3_flag_set(tp, POLL_SERDES);
14548 else
14549 tg3_flag_clear(tp, POLL_SERDES);
14551 tp->rx_offset = NET_IP_ALIGN;
14552 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14553 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14554 tg3_flag(tp, PCIX_MODE)) {
14555 tp->rx_offset = 0;
14556 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14557 tp->rx_copy_thresh = ~(u16)0;
14558 #endif
14561 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14562 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14563 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14565 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14567 /* Increment the rx prod index on the rx std ring by at most
14568 * 8 for these chips to workaround hw errata.
14570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14571 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14573 tp->rx_std_max_post = 8;
14575 if (tg3_flag(tp, ASPM_WORKAROUND))
14576 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14577 PCIE_PWR_MGMT_L1_THRESH_MSK;
14579 return err;
14582 #ifdef CONFIG_SPARC
14583 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14585 struct net_device *dev = tp->dev;
14586 struct pci_dev *pdev = tp->pdev;
14587 struct device_node *dp = pci_device_to_OF_node(pdev);
14588 const unsigned char *addr;
14589 int len;
14591 addr = of_get_property(dp, "local-mac-address", &len);
14592 if (addr && len == 6) {
14593 memcpy(dev->dev_addr, addr, 6);
14594 memcpy(dev->perm_addr, dev->dev_addr, 6);
14595 return 0;
14597 return -ENODEV;
14600 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14602 struct net_device *dev = tp->dev;
14604 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14605 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14606 return 0;
14608 #endif
14610 static int __devinit tg3_get_device_address(struct tg3 *tp)
14612 struct net_device *dev = tp->dev;
14613 u32 hi, lo, mac_offset;
14614 int addr_ok = 0;
14616 #ifdef CONFIG_SPARC
14617 if (!tg3_get_macaddr_sparc(tp))
14618 return 0;
14619 #endif
14621 mac_offset = 0x7c;
14622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14623 tg3_flag(tp, 5780_CLASS)) {
14624 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14625 mac_offset = 0xcc;
14626 if (tg3_nvram_lock(tp))
14627 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14628 else
14629 tg3_nvram_unlock(tp);
14630 } else if (tg3_flag(tp, 5717_PLUS)) {
14631 if (tp->pci_fn & 1)
14632 mac_offset = 0xcc;
14633 if (tp->pci_fn > 1)
14634 mac_offset += 0x18c;
14635 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14636 mac_offset = 0x10;
14638 /* First try to get it from MAC address mailbox. */
14639 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14640 if ((hi >> 16) == 0x484b) {
14641 dev->dev_addr[0] = (hi >> 8) & 0xff;
14642 dev->dev_addr[1] = (hi >> 0) & 0xff;
14644 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14645 dev->dev_addr[2] = (lo >> 24) & 0xff;
14646 dev->dev_addr[3] = (lo >> 16) & 0xff;
14647 dev->dev_addr[4] = (lo >> 8) & 0xff;
14648 dev->dev_addr[5] = (lo >> 0) & 0xff;
14650 /* Some old bootcode may report a 0 MAC address in SRAM */
14651 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14653 if (!addr_ok) {
14654 /* Next, try NVRAM. */
14655 if (!tg3_flag(tp, NO_NVRAM) &&
14656 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14657 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14658 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14659 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14661 /* Finally just fetch it out of the MAC control regs. */
14662 else {
14663 hi = tr32(MAC_ADDR_0_HIGH);
14664 lo = tr32(MAC_ADDR_0_LOW);
14666 dev->dev_addr[5] = lo & 0xff;
14667 dev->dev_addr[4] = (lo >> 8) & 0xff;
14668 dev->dev_addr[3] = (lo >> 16) & 0xff;
14669 dev->dev_addr[2] = (lo >> 24) & 0xff;
14670 dev->dev_addr[1] = hi & 0xff;
14671 dev->dev_addr[0] = (hi >> 8) & 0xff;
14675 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14676 #ifdef CONFIG_SPARC
14677 if (!tg3_get_default_macaddr_sparc(tp))
14678 return 0;
14679 #endif
14680 return -EINVAL;
14682 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14683 return 0;
14686 #define BOUNDARY_SINGLE_CACHELINE 1
14687 #define BOUNDARY_MULTI_CACHELINE 2
14689 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14691 int cacheline_size;
14692 u8 byte;
14693 int goal;
14695 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14696 if (byte == 0)
14697 cacheline_size = 1024;
14698 else
14699 cacheline_size = (int) byte * 4;
14701 /* On 5703 and later chips, the boundary bits have no
14702 * effect.
14704 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14705 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14706 !tg3_flag(tp, PCI_EXPRESS))
14707 goto out;
14709 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14710 goal = BOUNDARY_MULTI_CACHELINE;
14711 #else
14712 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14713 goal = BOUNDARY_SINGLE_CACHELINE;
14714 #else
14715 goal = 0;
14716 #endif
14717 #endif
14719 if (tg3_flag(tp, 57765_PLUS)) {
14720 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14721 goto out;
14724 if (!goal)
14725 goto out;
14727 /* PCI controllers on most RISC systems tend to disconnect
14728 * when a device tries to burst across a cache-line boundary.
14729 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14731 * Unfortunately, for PCI-E there are only limited
14732 * write-side controls for this, and thus for reads
14733 * we will still get the disconnects. We'll also waste
14734 * these PCI cycles for both read and write for chips
14735 * other than 5700 and 5701 which do not implement the
14736 * boundary bits.
14738 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14739 switch (cacheline_size) {
14740 case 16:
14741 case 32:
14742 case 64:
14743 case 128:
14744 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14745 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14746 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14747 } else {
14748 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14749 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14751 break;
14753 case 256:
14754 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14755 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14756 break;
14758 default:
14759 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14760 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14761 break;
14763 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14764 switch (cacheline_size) {
14765 case 16:
14766 case 32:
14767 case 64:
14768 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14769 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14770 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14771 break;
14773 /* fallthrough */
14774 case 128:
14775 default:
14776 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14777 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14778 break;
14780 } else {
14781 switch (cacheline_size) {
14782 case 16:
14783 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14784 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14785 DMA_RWCTRL_WRITE_BNDRY_16);
14786 break;
14788 /* fallthrough */
14789 case 32:
14790 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14791 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14792 DMA_RWCTRL_WRITE_BNDRY_32);
14793 break;
14795 /* fallthrough */
14796 case 64:
14797 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14798 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14799 DMA_RWCTRL_WRITE_BNDRY_64);
14800 break;
14802 /* fallthrough */
14803 case 128:
14804 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14805 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14806 DMA_RWCTRL_WRITE_BNDRY_128);
14807 break;
14809 /* fallthrough */
14810 case 256:
14811 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14812 DMA_RWCTRL_WRITE_BNDRY_256);
14813 break;
14814 case 512:
14815 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14816 DMA_RWCTRL_WRITE_BNDRY_512);
14817 break;
14818 case 1024:
14819 default:
14820 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14821 DMA_RWCTRL_WRITE_BNDRY_1024);
14822 break;
14826 out:
14827 return val;
14830 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14832 struct tg3_internal_buffer_desc test_desc;
14833 u32 sram_dma_descs;
14834 int i, ret;
14836 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14838 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14839 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14840 tw32(RDMAC_STATUS, 0);
14841 tw32(WDMAC_STATUS, 0);
14843 tw32(BUFMGR_MODE, 0);
14844 tw32(FTQ_RESET, 0);
14846 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14847 test_desc.addr_lo = buf_dma & 0xffffffff;
14848 test_desc.nic_mbuf = 0x00002100;
14849 test_desc.len = size;
14852 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14853 * the *second* time the tg3 driver was getting loaded after an
14854 * initial scan.
14856 * Broadcom tells me:
14857 * ...the DMA engine is connected to the GRC block and a DMA
14858 * reset may affect the GRC block in some unpredictable way...
14859 * The behavior of resets to individual blocks has not been tested.
14861 * Broadcom noted the GRC reset will also reset all sub-components.
14863 if (to_device) {
14864 test_desc.cqid_sqid = (13 << 8) | 2;
14866 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14867 udelay(40);
14868 } else {
14869 test_desc.cqid_sqid = (16 << 8) | 7;
14871 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14872 udelay(40);
14874 test_desc.flags = 0x00000005;
14876 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14877 u32 val;
14879 val = *(((u32 *)&test_desc) + i);
14880 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14881 sram_dma_descs + (i * sizeof(u32)));
14882 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14884 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14886 if (to_device)
14887 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14888 else
14889 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14891 ret = -ENODEV;
14892 for (i = 0; i < 40; i++) {
14893 u32 val;
14895 if (to_device)
14896 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14897 else
14898 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14899 if ((val & 0xffff) == sram_dma_descs) {
14900 ret = 0;
14901 break;
14904 udelay(100);
14907 return ret;
14910 #define TEST_BUFFER_SIZE 0x2000
14912 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14913 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14914 { },
14917 static int __devinit tg3_test_dma(struct tg3 *tp)
14919 dma_addr_t buf_dma;
14920 u32 *buf, saved_dma_rwctrl;
14921 int ret = 0;
14923 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14924 &buf_dma, GFP_KERNEL);
14925 if (!buf) {
14926 ret = -ENOMEM;
14927 goto out_nofree;
14930 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14931 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14933 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14935 if (tg3_flag(tp, 57765_PLUS))
14936 goto out;
14938 if (tg3_flag(tp, PCI_EXPRESS)) {
14939 /* DMA read watermark not used on PCIE */
14940 tp->dma_rwctrl |= 0x00180000;
14941 } else if (!tg3_flag(tp, PCIX_MODE)) {
14942 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14944 tp->dma_rwctrl |= 0x003f0000;
14945 else
14946 tp->dma_rwctrl |= 0x003f000f;
14947 } else {
14948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14950 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14951 u32 read_water = 0x7;
14953 /* If the 5704 is behind the EPB bridge, we can
14954 * do the less restrictive ONE_DMA workaround for
14955 * better performance.
14957 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14959 tp->dma_rwctrl |= 0x8000;
14960 else if (ccval == 0x6 || ccval == 0x7)
14961 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14964 read_water = 4;
14965 /* Set bit 23 to enable PCIX hw bug fix */
14966 tp->dma_rwctrl |=
14967 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14968 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14969 (1 << 23);
14970 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14971 /* 5780 always in PCIX mode */
14972 tp->dma_rwctrl |= 0x00144000;
14973 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14974 /* 5714 always in PCIX mode */
14975 tp->dma_rwctrl |= 0x00148000;
14976 } else {
14977 tp->dma_rwctrl |= 0x001b000f;
14981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14983 tp->dma_rwctrl &= 0xfffffff0;
14985 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14987 /* Remove this if it causes problems for some boards. */
14988 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14990 /* On 5700/5701 chips, we need to set this bit.
14991 * Otherwise the chip will issue cacheline transactions
14992 * to streamable DMA memory with not all the byte
14993 * enables turned on. This is an error on several
14994 * RISC PCI controllers, in particular sparc64.
14996 * On 5703/5704 chips, this bit has been reassigned
14997 * a different meaning. In particular, it is used
14998 * on those chips to enable a PCI-X workaround.
15000 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15003 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15005 #if 0
15006 /* Unneeded, already done by tg3_get_invariants. */
15007 tg3_switch_clocks(tp);
15008 #endif
15010 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15011 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15012 goto out;
15014 /* It is best to perform DMA test with maximum write burst size
15015 * to expose the 5700/5701 write DMA bug.
15017 saved_dma_rwctrl = tp->dma_rwctrl;
15018 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15019 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15021 while (1) {
15022 u32 *p = buf, i;
15024 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15025 p[i] = i;
15027 /* Send the buffer to the chip. */
15028 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15029 if (ret) {
15030 dev_err(&tp->pdev->dev,
15031 "%s: Buffer write failed. err = %d\n",
15032 __func__, ret);
15033 break;
15036 #if 0
15037 /* validate data reached card RAM correctly. */
15038 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15039 u32 val;
15040 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15041 if (le32_to_cpu(val) != p[i]) {
15042 dev_err(&tp->pdev->dev,
15043 "%s: Buffer corrupted on device! "
15044 "(%d != %d)\n", __func__, val, i);
15045 /* ret = -ENODEV here? */
15047 p[i] = 0;
15049 #endif
15050 /* Now read it back. */
15051 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15052 if (ret) {
15053 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15054 "err = %d\n", __func__, ret);
15055 break;
15058 /* Verify it. */
15059 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15060 if (p[i] == i)
15061 continue;
15063 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15064 DMA_RWCTRL_WRITE_BNDRY_16) {
15065 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15066 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15067 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15068 break;
15069 } else {
15070 dev_err(&tp->pdev->dev,
15071 "%s: Buffer corrupted on read back! "
15072 "(%d != %d)\n", __func__, p[i], i);
15073 ret = -ENODEV;
15074 goto out;
15078 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15079 /* Success. */
15080 ret = 0;
15081 break;
15084 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15085 DMA_RWCTRL_WRITE_BNDRY_16) {
15086 /* DMA test passed without adjusting DMA boundary,
15087 * now look for chipsets that are known to expose the
15088 * DMA bug without failing the test.
15090 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15091 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15092 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15093 } else {
15094 /* Safe to use the calculated DMA boundary. */
15095 tp->dma_rwctrl = saved_dma_rwctrl;
15098 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15101 out:
15102 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15103 out_nofree:
15104 return ret;
15107 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15109 if (tg3_flag(tp, 57765_PLUS)) {
15110 tp->bufmgr_config.mbuf_read_dma_low_water =
15111 DEFAULT_MB_RDMA_LOW_WATER_5705;
15112 tp->bufmgr_config.mbuf_mac_rx_low_water =
15113 DEFAULT_MB_MACRX_LOW_WATER_57765;
15114 tp->bufmgr_config.mbuf_high_water =
15115 DEFAULT_MB_HIGH_WATER_57765;
15117 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15118 DEFAULT_MB_RDMA_LOW_WATER_5705;
15119 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15120 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15121 tp->bufmgr_config.mbuf_high_water_jumbo =
15122 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15123 } else if (tg3_flag(tp, 5705_PLUS)) {
15124 tp->bufmgr_config.mbuf_read_dma_low_water =
15125 DEFAULT_MB_RDMA_LOW_WATER_5705;
15126 tp->bufmgr_config.mbuf_mac_rx_low_water =
15127 DEFAULT_MB_MACRX_LOW_WATER_5705;
15128 tp->bufmgr_config.mbuf_high_water =
15129 DEFAULT_MB_HIGH_WATER_5705;
15130 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15131 tp->bufmgr_config.mbuf_mac_rx_low_water =
15132 DEFAULT_MB_MACRX_LOW_WATER_5906;
15133 tp->bufmgr_config.mbuf_high_water =
15134 DEFAULT_MB_HIGH_WATER_5906;
15137 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15138 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15139 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15140 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15141 tp->bufmgr_config.mbuf_high_water_jumbo =
15142 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15143 } else {
15144 tp->bufmgr_config.mbuf_read_dma_low_water =
15145 DEFAULT_MB_RDMA_LOW_WATER;
15146 tp->bufmgr_config.mbuf_mac_rx_low_water =
15147 DEFAULT_MB_MACRX_LOW_WATER;
15148 tp->bufmgr_config.mbuf_high_water =
15149 DEFAULT_MB_HIGH_WATER;
15151 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15152 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15153 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15154 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15155 tp->bufmgr_config.mbuf_high_water_jumbo =
15156 DEFAULT_MB_HIGH_WATER_JUMBO;
15159 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15160 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15163 static char * __devinit tg3_phy_string(struct tg3 *tp)
15165 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15166 case TG3_PHY_ID_BCM5400: return "5400";
15167 case TG3_PHY_ID_BCM5401: return "5401";
15168 case TG3_PHY_ID_BCM5411: return "5411";
15169 case TG3_PHY_ID_BCM5701: return "5701";
15170 case TG3_PHY_ID_BCM5703: return "5703";
15171 case TG3_PHY_ID_BCM5704: return "5704";
15172 case TG3_PHY_ID_BCM5705: return "5705";
15173 case TG3_PHY_ID_BCM5750: return "5750";
15174 case TG3_PHY_ID_BCM5752: return "5752";
15175 case TG3_PHY_ID_BCM5714: return "5714";
15176 case TG3_PHY_ID_BCM5780: return "5780";
15177 case TG3_PHY_ID_BCM5755: return "5755";
15178 case TG3_PHY_ID_BCM5787: return "5787";
15179 case TG3_PHY_ID_BCM5784: return "5784";
15180 case TG3_PHY_ID_BCM5756: return "5722/5756";
15181 case TG3_PHY_ID_BCM5906: return "5906";
15182 case TG3_PHY_ID_BCM5761: return "5761";
15183 case TG3_PHY_ID_BCM5718C: return "5718C";
15184 case TG3_PHY_ID_BCM5718S: return "5718S";
15185 case TG3_PHY_ID_BCM57765: return "57765";
15186 case TG3_PHY_ID_BCM5719C: return "5719C";
15187 case TG3_PHY_ID_BCM5720C: return "5720C";
15188 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15189 case 0: return "serdes";
15190 default: return "unknown";
15194 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15196 if (tg3_flag(tp, PCI_EXPRESS)) {
15197 strcpy(str, "PCI Express");
15198 return str;
15199 } else if (tg3_flag(tp, PCIX_MODE)) {
15200 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15202 strcpy(str, "PCIX:");
15204 if ((clock_ctrl == 7) ||
15205 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15206 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15207 strcat(str, "133MHz");
15208 else if (clock_ctrl == 0)
15209 strcat(str, "33MHz");
15210 else if (clock_ctrl == 2)
15211 strcat(str, "50MHz");
15212 else if (clock_ctrl == 4)
15213 strcat(str, "66MHz");
15214 else if (clock_ctrl == 6)
15215 strcat(str, "100MHz");
15216 } else {
15217 strcpy(str, "PCI:");
15218 if (tg3_flag(tp, PCI_HIGH_SPEED))
15219 strcat(str, "66MHz");
15220 else
15221 strcat(str, "33MHz");
15223 if (tg3_flag(tp, PCI_32BIT))
15224 strcat(str, ":32-bit");
15225 else
15226 strcat(str, ":64-bit");
15227 return str;
15230 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15232 struct pci_dev *peer;
15233 unsigned int func, devnr = tp->pdev->devfn & ~7;
15235 for (func = 0; func < 8; func++) {
15236 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15237 if (peer && peer != tp->pdev)
15238 break;
15239 pci_dev_put(peer);
15241 /* 5704 can be configured in single-port mode, set peer to
15242 * tp->pdev in that case.
15244 if (!peer) {
15245 peer = tp->pdev;
15246 return peer;
15250 * We don't need to keep the refcount elevated; there's no way
15251 * to remove one half of this device without removing the other
15253 pci_dev_put(peer);
15255 return peer;
15258 static void __devinit tg3_init_coal(struct tg3 *tp)
15260 struct ethtool_coalesce *ec = &tp->coal;
15262 memset(ec, 0, sizeof(*ec));
15263 ec->cmd = ETHTOOL_GCOALESCE;
15264 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15265 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15266 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15267 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15268 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15269 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15270 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15271 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15272 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15274 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15275 HOSTCC_MODE_CLRTICK_TXBD)) {
15276 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15277 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15278 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15279 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15282 if (tg3_flag(tp, 5705_PLUS)) {
15283 ec->rx_coalesce_usecs_irq = 0;
15284 ec->tx_coalesce_usecs_irq = 0;
15285 ec->stats_block_coalesce_usecs = 0;
15289 static const struct net_device_ops tg3_netdev_ops = {
15290 .ndo_open = tg3_open,
15291 .ndo_stop = tg3_close,
15292 .ndo_start_xmit = tg3_start_xmit,
15293 .ndo_get_stats64 = tg3_get_stats64,
15294 .ndo_validate_addr = eth_validate_addr,
15295 .ndo_set_rx_mode = tg3_set_rx_mode,
15296 .ndo_set_mac_address = tg3_set_mac_addr,
15297 .ndo_do_ioctl = tg3_ioctl,
15298 .ndo_tx_timeout = tg3_tx_timeout,
15299 .ndo_change_mtu = tg3_change_mtu,
15300 .ndo_fix_features = tg3_fix_features,
15301 .ndo_set_features = tg3_set_features,
15302 #ifdef CONFIG_NET_POLL_CONTROLLER
15303 .ndo_poll_controller = tg3_poll_controller,
15304 #endif
15307 static int __devinit tg3_init_one(struct pci_dev *pdev,
15308 const struct pci_device_id *ent)
15310 struct net_device *dev;
15311 struct tg3 *tp;
15312 int i, err, pm_cap;
15313 u32 sndmbx, rcvmbx, intmbx;
15314 char str[40];
15315 u64 dma_mask, persist_dma_mask;
15316 u32 features = 0;
15318 printk_once(KERN_INFO "%s\n", version);
15320 err = pci_enable_device(pdev);
15321 if (err) {
15322 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15323 return err;
15326 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15327 if (err) {
15328 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15329 goto err_out_disable_pdev;
15332 pci_set_master(pdev);
15334 /* Find power-management capability. */
15335 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15336 if (pm_cap == 0) {
15337 dev_err(&pdev->dev,
15338 "Cannot find Power Management capability, aborting\n");
15339 err = -EIO;
15340 goto err_out_free_res;
15343 err = pci_set_power_state(pdev, PCI_D0);
15344 if (err) {
15345 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15346 goto err_out_free_res;
15349 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15350 if (!dev) {
15351 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15352 err = -ENOMEM;
15353 goto err_out_power_down;
15356 SET_NETDEV_DEV(dev, &pdev->dev);
15358 tp = netdev_priv(dev);
15359 tp->pdev = pdev;
15360 tp->dev = dev;
15361 tp->pm_cap = pm_cap;
15362 tp->rx_mode = TG3_DEF_RX_MODE;
15363 tp->tx_mode = TG3_DEF_TX_MODE;
15365 if (tg3_debug > 0)
15366 tp->msg_enable = tg3_debug;
15367 else
15368 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15370 /* The word/byte swap controls here control register access byte
15371 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15372 * setting below.
15374 tp->misc_host_ctrl =
15375 MISC_HOST_CTRL_MASK_PCI_INT |
15376 MISC_HOST_CTRL_WORD_SWAP |
15377 MISC_HOST_CTRL_INDIR_ACCESS |
15378 MISC_HOST_CTRL_PCISTATE_RW;
15380 /* The NONFRM (non-frame) byte/word swap controls take effect
15381 * on descriptor entries, anything which isn't packet data.
15383 * The StrongARM chips on the board (one for tx, one for rx)
15384 * are running in big-endian mode.
15386 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15387 GRC_MODE_WSWAP_NONFRM_DATA);
15388 #ifdef __BIG_ENDIAN
15389 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15390 #endif
15391 spin_lock_init(&tp->lock);
15392 spin_lock_init(&tp->indirect_lock);
15393 INIT_WORK(&tp->reset_task, tg3_reset_task);
15395 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15396 if (!tp->regs) {
15397 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15398 err = -ENOMEM;
15399 goto err_out_free_dev;
15402 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15403 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15404 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15405 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15406 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15407 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15408 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15409 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15410 tg3_flag_set(tp, ENABLE_APE);
15411 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15412 if (!tp->aperegs) {
15413 dev_err(&pdev->dev,
15414 "Cannot map APE registers, aborting\n");
15415 err = -ENOMEM;
15416 goto err_out_iounmap;
15420 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15421 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15423 dev->ethtool_ops = &tg3_ethtool_ops;
15424 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15425 dev->netdev_ops = &tg3_netdev_ops;
15426 dev->irq = pdev->irq;
15428 err = tg3_get_invariants(tp);
15429 if (err) {
15430 dev_err(&pdev->dev,
15431 "Problem fetching invariants of chip, aborting\n");
15432 goto err_out_apeunmap;
15435 /* The EPB bridge inside 5714, 5715, and 5780 and any
15436 * device behind the EPB cannot support DMA addresses > 40-bit.
15437 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15438 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15439 * do DMA address check in tg3_start_xmit().
15441 if (tg3_flag(tp, IS_5788))
15442 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15443 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15444 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15445 #ifdef CONFIG_HIGHMEM
15446 dma_mask = DMA_BIT_MASK(64);
15447 #endif
15448 } else
15449 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15451 /* Configure DMA attributes. */
15452 if (dma_mask > DMA_BIT_MASK(32)) {
15453 err = pci_set_dma_mask(pdev, dma_mask);
15454 if (!err) {
15455 features |= NETIF_F_HIGHDMA;
15456 err = pci_set_consistent_dma_mask(pdev,
15457 persist_dma_mask);
15458 if (err < 0) {
15459 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15460 "DMA for consistent allocations\n");
15461 goto err_out_apeunmap;
15465 if (err || dma_mask == DMA_BIT_MASK(32)) {
15466 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15467 if (err) {
15468 dev_err(&pdev->dev,
15469 "No usable DMA configuration, aborting\n");
15470 goto err_out_apeunmap;
15474 tg3_init_bufmgr_config(tp);
15476 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15478 /* 5700 B0 chips do not support checksumming correctly due
15479 * to hardware bugs.
15481 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15482 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15484 if (tg3_flag(tp, 5755_PLUS))
15485 features |= NETIF_F_IPV6_CSUM;
15488 /* TSO is on by default on chips that support hardware TSO.
15489 * Firmware TSO on older chips gives lower performance, so it
15490 * is off by default, but can be enabled using ethtool.
15492 if ((tg3_flag(tp, HW_TSO_1) ||
15493 tg3_flag(tp, HW_TSO_2) ||
15494 tg3_flag(tp, HW_TSO_3)) &&
15495 (features & NETIF_F_IP_CSUM))
15496 features |= NETIF_F_TSO;
15497 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15498 if (features & NETIF_F_IPV6_CSUM)
15499 features |= NETIF_F_TSO6;
15500 if (tg3_flag(tp, HW_TSO_3) ||
15501 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15502 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15503 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15504 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15505 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15506 features |= NETIF_F_TSO_ECN;
15509 dev->features |= features;
15510 dev->vlan_features |= features;
15513 * Add loopback capability only for a subset of devices that support
15514 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15515 * loopback for the remaining devices.
15517 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15518 !tg3_flag(tp, CPMU_PRESENT))
15519 /* Add the loopback capability */
15520 features |= NETIF_F_LOOPBACK;
15522 dev->hw_features |= features;
15524 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15525 !tg3_flag(tp, TSO_CAPABLE) &&
15526 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15527 tg3_flag_set(tp, MAX_RXPEND_64);
15528 tp->rx_pending = 63;
15531 err = tg3_get_device_address(tp);
15532 if (err) {
15533 dev_err(&pdev->dev,
15534 "Could not obtain valid ethernet address, aborting\n");
15535 goto err_out_apeunmap;
15539 * Reset chip in case UNDI or EFI driver did not shutdown
15540 * DMA self test will enable WDMAC and we'll see (spurious)
15541 * pending DMA on the PCI bus at that point.
15543 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15544 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15545 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15546 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15549 err = tg3_test_dma(tp);
15550 if (err) {
15551 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15552 goto err_out_apeunmap;
15555 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15556 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15557 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15558 for (i = 0; i < tp->irq_max; i++) {
15559 struct tg3_napi *tnapi = &tp->napi[i];
15561 tnapi->tp = tp;
15562 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15564 tnapi->int_mbox = intmbx;
15565 if (i <= 4)
15566 intmbx += 0x8;
15567 else
15568 intmbx += 0x4;
15570 tnapi->consmbox = rcvmbx;
15571 tnapi->prodmbox = sndmbx;
15573 if (i)
15574 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15575 else
15576 tnapi->coal_now = HOSTCC_MODE_NOW;
15578 if (!tg3_flag(tp, SUPPORT_MSIX))
15579 break;
15582 * If we support MSIX, we'll be using RSS. If we're using
15583 * RSS, the first vector only handles link interrupts and the
15584 * remaining vectors handle rx and tx interrupts. Reuse the
15585 * mailbox values for the next iteration. The values we setup
15586 * above are still useful for the single vectored mode.
15588 if (!i)
15589 continue;
15591 rcvmbx += 0x8;
15593 if (sndmbx & 0x4)
15594 sndmbx -= 0x4;
15595 else
15596 sndmbx += 0xc;
15599 tg3_init_coal(tp);
15601 pci_set_drvdata(pdev, dev);
15603 if (tg3_flag(tp, 5717_PLUS)) {
15604 /* Resume a low-power mode */
15605 tg3_frob_aux_power(tp, false);
15608 err = register_netdev(dev);
15609 if (err) {
15610 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15611 goto err_out_apeunmap;
15614 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15615 tp->board_part_number,
15616 tp->pci_chip_rev_id,
15617 tg3_bus_string(tp, str),
15618 dev->dev_addr);
15620 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15621 struct phy_device *phydev;
15622 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15623 netdev_info(dev,
15624 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15625 phydev->drv->name, dev_name(&phydev->dev));
15626 } else {
15627 char *ethtype;
15629 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15630 ethtype = "10/100Base-TX";
15631 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15632 ethtype = "1000Base-SX";
15633 else
15634 ethtype = "10/100/1000Base-T";
15636 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15637 "(WireSpeed[%d], EEE[%d])\n",
15638 tg3_phy_string(tp), ethtype,
15639 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15640 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15643 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15644 (dev->features & NETIF_F_RXCSUM) != 0,
15645 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15646 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15647 tg3_flag(tp, ENABLE_ASF) != 0,
15648 tg3_flag(tp, TSO_CAPABLE) != 0);
15649 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15650 tp->dma_rwctrl,
15651 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15652 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15654 pci_save_state(pdev);
15656 return 0;
15658 err_out_apeunmap:
15659 if (tp->aperegs) {
15660 iounmap(tp->aperegs);
15661 tp->aperegs = NULL;
15664 err_out_iounmap:
15665 if (tp->regs) {
15666 iounmap(tp->regs);
15667 tp->regs = NULL;
15670 err_out_free_dev:
15671 free_netdev(dev);
15673 err_out_power_down:
15674 pci_set_power_state(pdev, PCI_D3hot);
15676 err_out_free_res:
15677 pci_release_regions(pdev);
15679 err_out_disable_pdev:
15680 pci_disable_device(pdev);
15681 pci_set_drvdata(pdev, NULL);
15682 return err;
15685 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15687 struct net_device *dev = pci_get_drvdata(pdev);
15689 if (dev) {
15690 struct tg3 *tp = netdev_priv(dev);
15692 if (tp->fw)
15693 release_firmware(tp->fw);
15695 tg3_reset_task_cancel(tp);
15697 if (tg3_flag(tp, USE_PHYLIB)) {
15698 tg3_phy_fini(tp);
15699 tg3_mdio_fini(tp);
15702 unregister_netdev(dev);
15703 if (tp->aperegs) {
15704 iounmap(tp->aperegs);
15705 tp->aperegs = NULL;
15707 if (tp->regs) {
15708 iounmap(tp->regs);
15709 tp->regs = NULL;
15711 free_netdev(dev);
15712 pci_release_regions(pdev);
15713 pci_disable_device(pdev);
15714 pci_set_drvdata(pdev, NULL);
15718 #ifdef CONFIG_PM_SLEEP
15719 static int tg3_suspend(struct device *device)
15721 struct pci_dev *pdev = to_pci_dev(device);
15722 struct net_device *dev = pci_get_drvdata(pdev);
15723 struct tg3 *tp = netdev_priv(dev);
15724 int err;
15726 if (!netif_running(dev))
15727 return 0;
15729 tg3_reset_task_cancel(tp);
15730 tg3_phy_stop(tp);
15731 tg3_netif_stop(tp);
15733 del_timer_sync(&tp->timer);
15735 tg3_full_lock(tp, 1);
15736 tg3_disable_ints(tp);
15737 tg3_full_unlock(tp);
15739 netif_device_detach(dev);
15741 tg3_full_lock(tp, 0);
15742 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15743 tg3_flag_clear(tp, INIT_COMPLETE);
15744 tg3_full_unlock(tp);
15746 err = tg3_power_down_prepare(tp);
15747 if (err) {
15748 int err2;
15750 tg3_full_lock(tp, 0);
15752 tg3_flag_set(tp, INIT_COMPLETE);
15753 err2 = tg3_restart_hw(tp, 1);
15754 if (err2)
15755 goto out;
15757 tp->timer.expires = jiffies + tp->timer_offset;
15758 add_timer(&tp->timer);
15760 netif_device_attach(dev);
15761 tg3_netif_start(tp);
15763 out:
15764 tg3_full_unlock(tp);
15766 if (!err2)
15767 tg3_phy_start(tp);
15770 return err;
15773 static int tg3_resume(struct device *device)
15775 struct pci_dev *pdev = to_pci_dev(device);
15776 struct net_device *dev = pci_get_drvdata(pdev);
15777 struct tg3 *tp = netdev_priv(dev);
15778 int err;
15780 if (!netif_running(dev))
15781 return 0;
15783 netif_device_attach(dev);
15785 tg3_full_lock(tp, 0);
15787 tg3_flag_set(tp, INIT_COMPLETE);
15788 err = tg3_restart_hw(tp, 1);
15789 if (err)
15790 goto out;
15792 tp->timer.expires = jiffies + tp->timer_offset;
15793 add_timer(&tp->timer);
15795 tg3_netif_start(tp);
15797 out:
15798 tg3_full_unlock(tp);
15800 if (!err)
15801 tg3_phy_start(tp);
15803 return err;
15806 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15807 #define TG3_PM_OPS (&tg3_pm_ops)
15809 #else
15811 #define TG3_PM_OPS NULL
15813 #endif /* CONFIG_PM_SLEEP */
15816 * tg3_io_error_detected - called when PCI error is detected
15817 * @pdev: Pointer to PCI device
15818 * @state: The current pci connection state
15820 * This function is called after a PCI bus error affecting
15821 * this device has been detected.
15823 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15824 pci_channel_state_t state)
15826 struct net_device *netdev = pci_get_drvdata(pdev);
15827 struct tg3 *tp = netdev_priv(netdev);
15828 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15830 netdev_info(netdev, "PCI I/O error detected\n");
15832 rtnl_lock();
15834 if (!netif_running(netdev))
15835 goto done;
15837 tg3_phy_stop(tp);
15839 tg3_netif_stop(tp);
15841 del_timer_sync(&tp->timer);
15843 /* Want to make sure that the reset task doesn't run */
15844 tg3_reset_task_cancel(tp);
15845 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15847 netif_device_detach(netdev);
15849 /* Clean up software state, even if MMIO is blocked */
15850 tg3_full_lock(tp, 0);
15851 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15852 tg3_full_unlock(tp);
15854 done:
15855 if (state == pci_channel_io_perm_failure)
15856 err = PCI_ERS_RESULT_DISCONNECT;
15857 else
15858 pci_disable_device(pdev);
15860 rtnl_unlock();
15862 return err;
15866 * tg3_io_slot_reset - called after the pci bus has been reset.
15867 * @pdev: Pointer to PCI device
15869 * Restart the card from scratch, as if from a cold-boot.
15870 * At this point, the card has exprienced a hard reset,
15871 * followed by fixups by BIOS, and has its config space
15872 * set up identically to what it was at cold boot.
15874 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15876 struct net_device *netdev = pci_get_drvdata(pdev);
15877 struct tg3 *tp = netdev_priv(netdev);
15878 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15879 int err;
15881 rtnl_lock();
15883 if (pci_enable_device(pdev)) {
15884 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15885 goto done;
15888 pci_set_master(pdev);
15889 pci_restore_state(pdev);
15890 pci_save_state(pdev);
15892 if (!netif_running(netdev)) {
15893 rc = PCI_ERS_RESULT_RECOVERED;
15894 goto done;
15897 err = tg3_power_up(tp);
15898 if (err)
15899 goto done;
15901 rc = PCI_ERS_RESULT_RECOVERED;
15903 done:
15904 rtnl_unlock();
15906 return rc;
15910 * tg3_io_resume - called when traffic can start flowing again.
15911 * @pdev: Pointer to PCI device
15913 * This callback is called when the error recovery driver tells
15914 * us that its OK to resume normal operation.
15916 static void tg3_io_resume(struct pci_dev *pdev)
15918 struct net_device *netdev = pci_get_drvdata(pdev);
15919 struct tg3 *tp = netdev_priv(netdev);
15920 int err;
15922 rtnl_lock();
15924 if (!netif_running(netdev))
15925 goto done;
15927 tg3_full_lock(tp, 0);
15928 tg3_flag_set(tp, INIT_COMPLETE);
15929 err = tg3_restart_hw(tp, 1);
15930 tg3_full_unlock(tp);
15931 if (err) {
15932 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15933 goto done;
15936 netif_device_attach(netdev);
15938 tp->timer.expires = jiffies + tp->timer_offset;
15939 add_timer(&tp->timer);
15941 tg3_netif_start(tp);
15943 tg3_phy_start(tp);
15945 done:
15946 rtnl_unlock();
15949 static struct pci_error_handlers tg3_err_handler = {
15950 .error_detected = tg3_io_error_detected,
15951 .slot_reset = tg3_io_slot_reset,
15952 .resume = tg3_io_resume
15955 static struct pci_driver tg3_driver = {
15956 .name = DRV_MODULE_NAME,
15957 .id_table = tg3_pci_tbl,
15958 .probe = tg3_init_one,
15959 .remove = __devexit_p(tg3_remove_one),
15960 .err_handler = &tg3_err_handler,
15961 .driver.pm = TG3_PM_OPS,
15964 static int __init tg3_init(void)
15966 return pci_register_driver(&tg3_driver);
15969 static void __exit tg3_cleanup(void)
15971 pci_unregister_driver(&tg3_driver);
15974 module_init(tg3_init);
15975 module_exit(tg3_cleanup);