net: Remove unnecessary driver assignments of ethtool_ringparam fields to zero
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
blobfe712f955110cec929cc13ecb0b5670467d54eee
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
61 #define BAR_0 0
62 #define BAR_2 2
64 #include "tg3.h"
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 set_bit(flag, bits);
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 120
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "August 18, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
104 (NETIF_MSG_DRV | \
105 NETIF_MSG_PROBE | \
106 NETIF_MSG_LINK | \
107 NETIF_MSG_TIMER | \
108 NETIF_MSG_IFDOWN | \
109 NETIF_MSG_IFUP | \
110 NETIF_MSG_RX_ERR | \
111 NETIF_MSG_TX_ERR)
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 #define TG3_RSS_INDIR_TBL_SIZE 128
140 /* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
147 #define TG3_TX_RING_SIZE 512
148 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 TG3_TX_RING_SIZE)
158 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
160 #define TG3_DMA_BYTE_ENAB 64
162 #define TG3_RX_STD_DMA_SZ 1536
163 #define TG3_RX_JMB_DMA_SZ 9046
165 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
167 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
187 #define TG3_RX_COPY_THRESHOLD 256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190 #else
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192 #endif
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp) 0
198 #endif
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314 static const struct {
315 const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317 { "rx_octets" },
318 { "rx_fragments" },
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
322 { "rx_fcs_errors" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
329 { "rx_jabbers" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
344 { "tx_octets" },
345 { "tx_collisions" },
347 { "tx_xon_sent" },
348 { "tx_xoff_sent" },
349 { "tx_flow_control" },
350 { "tx_mac_errors" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
353 { "tx_deferred" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
374 { "tx_discards" },
375 { "tx_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
379 { "rxbds_empty" },
380 { "rx_discards" },
381 { "rx_errors" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
390 { "nic_irqs" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 writel(val, tp->regs + off);
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 return readl(tp->regs + off);
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 writel(val, tp->aperegs + off);
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 return readl(tp->aperegs + off);
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
438 unsigned long flags;
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 writel(val, tp->regs + off);
449 readl(tp->regs + off);
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
454 unsigned long flags;
455 u32 val;
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
461 return val;
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
466 unsigned long flags;
468 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 TG3_64BIT_REG_LOW, val);
471 return;
473 if (off == TG3_RX_STD_PROD_IDX_REG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 TG3_64BIT_REG_LOW, val);
476 return;
479 spin_lock_irqsave(&tp->indirect_lock, flags);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488 (val == 0x1)) {
489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
496 unsigned long flags;
497 u32 val;
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 return val;
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 /* Non-posted methods */
515 tp->write32(tp, off, val);
516 else {
517 /* Posted method */
518 tg3_write32(tp, off, val);
519 if (usec_wait)
520 udelay(usec_wait);
521 tp->read32(tp, off);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
526 if (usec_wait)
527 udelay(usec_wait);
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 tp->write32_mbox(tp, off, val);
533 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 tp->read32_mbox(tp, off);
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 void __iomem *mbox = tp->regs + off;
540 writel(val, mbox);
541 if (tg3_flag(tp, TXD_MBOX_HWBUG))
542 writel(val, mbox);
543 if (tg3_flag(tp, MBOX_WRITE_REORDER))
544 readl(mbox);
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 return readl(tp->regs + off + GRCMBOX_BASE);
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 writel(val, tp->regs + off + GRCMBOX_BASE);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
570 unsigned long flags;
572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574 return;
576 spin_lock_irqsave(&tp->indirect_lock, flags);
577 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583 } else {
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590 spin_unlock_irqrestore(&tp->indirect_lock, flags);
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
595 unsigned long flags;
597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599 *val = 0;
600 return;
603 spin_lock_irqsave(&tp->indirect_lock, flags);
604 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610 } else {
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 *val = tr32(TG3PCI_MEM_WIN_DATA);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617 spin_unlock_irqrestore(&tp->indirect_lock, flags);
620 static void tg3_ape_lock_init(struct tg3 *tp)
622 int i;
623 u32 regbase, bit;
625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 regbase = TG3_APE_LOCK_GRANT;
627 else
628 regbase = TG3_APE_PER_LOCK_GRANT;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i = 0; i < 8; i++) {
632 if (i == TG3_APE_LOCK_GPIO)
633 continue;
634 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
637 /* Clear the correct bit of the GPIO lock too. */
638 if (!tp->pci_fn)
639 bit = APE_LOCK_GRANT_DRIVER;
640 else
641 bit = 1 << tp->pci_fn;
643 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
646 static int tg3_ape_lock(struct tg3 *tp, int locknum)
648 int i, off;
649 int ret = 0;
650 u32 status, req, gnt, bit;
652 if (!tg3_flag(tp, ENABLE_APE))
653 return 0;
655 switch (locknum) {
656 case TG3_APE_LOCK_GPIO:
657 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
658 return 0;
659 case TG3_APE_LOCK_GRC:
660 case TG3_APE_LOCK_MEM:
661 break;
662 default:
663 return -EINVAL;
666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
667 req = TG3_APE_LOCK_REQ;
668 gnt = TG3_APE_LOCK_GRANT;
669 } else {
670 req = TG3_APE_PER_LOCK_REQ;
671 gnt = TG3_APE_PER_LOCK_GRANT;
674 off = 4 * locknum;
676 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
677 bit = APE_LOCK_REQ_DRIVER;
678 else
679 bit = 1 << tp->pci_fn;
681 tg3_ape_write32(tp, req + off, bit);
683 /* Wait for up to 1 millisecond to acquire lock. */
684 for (i = 0; i < 100; i++) {
685 status = tg3_ape_read32(tp, gnt + off);
686 if (status == bit)
687 break;
688 udelay(10);
691 if (status != bit) {
692 /* Revoke the lock request. */
693 tg3_ape_write32(tp, gnt + off, bit);
694 ret = -EBUSY;
697 return ret;
700 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
702 u32 gnt, bit;
704 if (!tg3_flag(tp, ENABLE_APE))
705 return;
707 switch (locknum) {
708 case TG3_APE_LOCK_GPIO:
709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
710 return;
711 case TG3_APE_LOCK_GRC:
712 case TG3_APE_LOCK_MEM:
713 break;
714 default:
715 return;
718 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
719 gnt = TG3_APE_LOCK_GRANT;
720 else
721 gnt = TG3_APE_PER_LOCK_GRANT;
723 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
724 bit = APE_LOCK_GRANT_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
728 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
733 int i;
734 u32 apedata;
736 /* NCSI does not support APE events */
737 if (tg3_flag(tp, APE_HAS_NCSI))
738 return;
740 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
741 if (apedata != APE_SEG_SIG_MAGIC)
742 return;
744 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
745 if (!(apedata & APE_FW_STATUS_READY))
746 return;
748 /* Wait for up to 1 millisecond for APE to service previous event. */
749 for (i = 0; i < 10; i++) {
750 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
751 return;
753 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
755 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
756 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
757 event | APE_EVENT_STATUS_EVENT_PENDING);
759 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
761 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
762 break;
764 udelay(100);
767 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
768 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
773 u32 event;
774 u32 apedata;
776 if (!tg3_flag(tp, ENABLE_APE))
777 return;
779 switch (kind) {
780 case RESET_KIND_INIT:
781 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
782 APE_HOST_SEG_SIG_MAGIC);
783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
784 APE_HOST_SEG_LEN_MAGIC);
785 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
786 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
787 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
788 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
789 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
790 APE_HOST_BEHAV_NO_PHYLOCK);
791 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
792 TG3_APE_HOST_DRVR_STATE_START);
794 event = APE_EVENT_STATUS_STATE_START;
795 break;
796 case RESET_KIND_SHUTDOWN:
797 /* With the interface we are currently using,
798 * APE does not track driver state. Wiping
799 * out the HOST SEGMENT SIGNATURE forces
800 * the APE to assume OS absent status.
802 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
804 if (device_may_wakeup(&tp->pdev->dev) &&
805 tg3_flag(tp, WOL_ENABLE)) {
806 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
807 TG3_APE_HOST_WOL_SPEED_AUTO);
808 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
809 } else
810 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
812 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
814 event = APE_EVENT_STATUS_STATE_UNLOAD;
815 break;
816 case RESET_KIND_SUSPEND:
817 event = APE_EVENT_STATUS_STATE_SUSPEND;
818 break;
819 default:
820 return;
823 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
825 tg3_ape_send_event(tp, event);
828 static void tg3_disable_ints(struct tg3 *tp)
830 int i;
832 tw32(TG3PCI_MISC_HOST_CTRL,
833 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
834 for (i = 0; i < tp->irq_max; i++)
835 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 static void tg3_enable_ints(struct tg3 *tp)
840 int i;
842 tp->irq_sync = 0;
843 wmb();
845 tw32(TG3PCI_MISC_HOST_CTRL,
846 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
848 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
849 for (i = 0; i < tp->irq_cnt; i++) {
850 struct tg3_napi *tnapi = &tp->napi[i];
852 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
853 if (tg3_flag(tp, 1SHOT_MSI))
854 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856 tp->coal_now |= tnapi->coal_now;
859 /* Force an initial interrupt */
860 if (!tg3_flag(tp, TAGGED_STATUS) &&
861 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
862 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
863 else
864 tw32(HOSTCC_MODE, tp->coal_now);
866 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
871 struct tg3 *tp = tnapi->tp;
872 struct tg3_hw_status *sblk = tnapi->hw_status;
873 unsigned int work_exists = 0;
875 /* check for phy events */
876 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
877 if (sblk->status & SD_STATUS_LINK_CHG)
878 work_exists = 1;
880 /* check for RX/TX work to do */
881 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
882 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
883 work_exists = 1;
885 return work_exists;
888 /* tg3_int_reenable
889 * similar to tg3_enable_ints, but it accurately determines whether there
890 * is new work pending and can return without flushing the PIO write
891 * which reenables interrupts
893 static void tg3_int_reenable(struct tg3_napi *tnapi)
895 struct tg3 *tp = tnapi->tp;
897 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
898 mmiowb();
900 /* When doing tagged status, this work check is unnecessary.
901 * The last_tag we write above tells the chip which piece of
902 * work we've completed.
904 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
905 tw32(HOSTCC_MODE, tp->coalesce_mode |
906 HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 static void tg3_switch_clocks(struct tg3 *tp)
911 u32 clock_ctrl;
912 u32 orig_clock_ctrl;
914 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
915 return;
917 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
919 orig_clock_ctrl = clock_ctrl;
920 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
921 CLOCK_CTRL_CLKRUN_OENABLE |
922 0x1f);
923 tp->pci_clock_ctrl = clock_ctrl;
925 if (tg3_flag(tp, 5705_PLUS)) {
926 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
927 tw32_wait_f(TG3PCI_CLOCK_CTRL,
928 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
930 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
931 tw32_wait_f(TG3PCI_CLOCK_CTRL,
932 clock_ctrl |
933 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
934 40);
935 tw32_wait_f(TG3PCI_CLOCK_CTRL,
936 clock_ctrl | (CLOCK_CTRL_ALTCLK),
937 40);
939 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 #define PHY_BUSY_LOOPS 5000
944 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
946 u32 frame_val;
947 unsigned int loops;
948 int ret;
950 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
951 tw32_f(MAC_MI_MODE,
952 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
953 udelay(80);
956 *val = 0x0;
958 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
959 MI_COM_PHY_ADDR_MASK);
960 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
961 MI_COM_REG_ADDR_MASK);
962 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
964 tw32_f(MAC_MI_COM, frame_val);
966 loops = PHY_BUSY_LOOPS;
967 while (loops != 0) {
968 udelay(10);
969 frame_val = tr32(MAC_MI_COM);
971 if ((frame_val & MI_COM_BUSY) == 0) {
972 udelay(5);
973 frame_val = tr32(MAC_MI_COM);
974 break;
976 loops -= 1;
979 ret = -EBUSY;
980 if (loops != 0) {
981 *val = frame_val & MI_COM_DATA_MASK;
982 ret = 0;
985 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
986 tw32_f(MAC_MI_MODE, tp->mi_mode);
987 udelay(80);
990 return ret;
993 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
995 u32 frame_val;
996 unsigned int loops;
997 int ret;
999 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1000 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1001 return 0;
1003 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1004 tw32_f(MAC_MI_MODE,
1005 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1006 udelay(80);
1009 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1010 MI_COM_PHY_ADDR_MASK);
1011 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1012 MI_COM_REG_ADDR_MASK);
1013 frame_val |= (val & MI_COM_DATA_MASK);
1014 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1016 tw32_f(MAC_MI_COM, frame_val);
1018 loops = PHY_BUSY_LOOPS;
1019 while (loops != 0) {
1020 udelay(10);
1021 frame_val = tr32(MAC_MI_COM);
1022 if ((frame_val & MI_COM_BUSY) == 0) {
1023 udelay(5);
1024 frame_val = tr32(MAC_MI_COM);
1025 break;
1027 loops -= 1;
1030 ret = -EBUSY;
1031 if (loops != 0)
1032 ret = 0;
1034 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1035 tw32_f(MAC_MI_MODE, tp->mi_mode);
1036 udelay(80);
1039 return ret;
1042 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1044 int err;
1046 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1047 if (err)
1048 goto done;
1050 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1051 if (err)
1052 goto done;
1054 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1055 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1056 if (err)
1057 goto done;
1059 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1061 done:
1062 return err;
1065 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1067 int err;
1069 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1070 if (err)
1071 goto done;
1073 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1074 if (err)
1075 goto done;
1077 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1078 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1079 if (err)
1080 goto done;
1082 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1084 done:
1085 return err;
1088 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1090 int err;
1092 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1093 if (!err)
1094 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1096 return err;
1099 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1101 int err;
1103 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1104 if (!err)
1105 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1107 return err;
1110 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1112 int err;
1114 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1115 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1116 MII_TG3_AUXCTL_SHDWSEL_MISC);
1117 if (!err)
1118 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1120 return err;
1123 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1125 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1126 set |= MII_TG3_AUXCTL_MISC_WREN;
1128 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1132 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1133 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1134 MII_TG3_AUXCTL_ACTL_TX_6DB)
1136 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1137 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1138 MII_TG3_AUXCTL_ACTL_TX_6DB);
1140 static int tg3_bmcr_reset(struct tg3 *tp)
1142 u32 phy_control;
1143 int limit, err;
1145 /* OK, reset it, and poll the BMCR_RESET bit until it
1146 * clears or we time out.
1148 phy_control = BMCR_RESET;
1149 err = tg3_writephy(tp, MII_BMCR, phy_control);
1150 if (err != 0)
1151 return -EBUSY;
1153 limit = 5000;
1154 while (limit--) {
1155 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1156 if (err != 0)
1157 return -EBUSY;
1159 if ((phy_control & BMCR_RESET) == 0) {
1160 udelay(40);
1161 break;
1163 udelay(10);
1165 if (limit < 0)
1166 return -EBUSY;
1168 return 0;
1171 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1173 struct tg3 *tp = bp->priv;
1174 u32 val;
1176 spin_lock_bh(&tp->lock);
1178 if (tg3_readphy(tp, reg, &val))
1179 val = -EIO;
1181 spin_unlock_bh(&tp->lock);
1183 return val;
1186 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1188 struct tg3 *tp = bp->priv;
1189 u32 ret = 0;
1191 spin_lock_bh(&tp->lock);
1193 if (tg3_writephy(tp, reg, val))
1194 ret = -EIO;
1196 spin_unlock_bh(&tp->lock);
1198 return ret;
1201 static int tg3_mdio_reset(struct mii_bus *bp)
1203 return 0;
1206 static void tg3_mdio_config_5785(struct tg3 *tp)
1208 u32 val;
1209 struct phy_device *phydev;
1211 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1212 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1213 case PHY_ID_BCM50610:
1214 case PHY_ID_BCM50610M:
1215 val = MAC_PHYCFG2_50610_LED_MODES;
1216 break;
1217 case PHY_ID_BCMAC131:
1218 val = MAC_PHYCFG2_AC131_LED_MODES;
1219 break;
1220 case PHY_ID_RTL8211C:
1221 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1222 break;
1223 case PHY_ID_RTL8201E:
1224 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1225 break;
1226 default:
1227 return;
1230 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1231 tw32(MAC_PHYCFG2, val);
1233 val = tr32(MAC_PHYCFG1);
1234 val &= ~(MAC_PHYCFG1_RGMII_INT |
1235 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1236 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1237 tw32(MAC_PHYCFG1, val);
1239 return;
1242 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1243 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1244 MAC_PHYCFG2_FMODE_MASK_MASK |
1245 MAC_PHYCFG2_GMODE_MASK_MASK |
1246 MAC_PHYCFG2_ACT_MASK_MASK |
1247 MAC_PHYCFG2_QUAL_MASK_MASK |
1248 MAC_PHYCFG2_INBAND_ENABLE;
1250 tw32(MAC_PHYCFG2, val);
1252 val = tr32(MAC_PHYCFG1);
1253 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1254 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1255 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1256 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1257 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1258 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1259 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1261 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1262 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1263 tw32(MAC_PHYCFG1, val);
1265 val = tr32(MAC_EXT_RGMII_MODE);
1266 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1267 MAC_RGMII_MODE_RX_QUALITY |
1268 MAC_RGMII_MODE_RX_ACTIVITY |
1269 MAC_RGMII_MODE_RX_ENG_DET |
1270 MAC_RGMII_MODE_TX_ENABLE |
1271 MAC_RGMII_MODE_TX_LOWPWR |
1272 MAC_RGMII_MODE_TX_RESET);
1273 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1274 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1275 val |= MAC_RGMII_MODE_RX_INT_B |
1276 MAC_RGMII_MODE_RX_QUALITY |
1277 MAC_RGMII_MODE_RX_ACTIVITY |
1278 MAC_RGMII_MODE_RX_ENG_DET;
1279 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1280 val |= MAC_RGMII_MODE_TX_ENABLE |
1281 MAC_RGMII_MODE_TX_LOWPWR |
1282 MAC_RGMII_MODE_TX_RESET;
1284 tw32(MAC_EXT_RGMII_MODE, val);
1287 static void tg3_mdio_start(struct tg3 *tp)
1289 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1290 tw32_f(MAC_MI_MODE, tp->mi_mode);
1291 udelay(80);
1293 if (tg3_flag(tp, MDIOBUS_INITED) &&
1294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1295 tg3_mdio_config_5785(tp);
1298 static int tg3_mdio_init(struct tg3 *tp)
1300 int i;
1301 u32 reg;
1302 struct phy_device *phydev;
1304 if (tg3_flag(tp, 5717_PLUS)) {
1305 u32 is_serdes;
1307 tp->phy_addr = tp->pci_fn + 1;
1309 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1310 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1311 else
1312 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1313 TG3_CPMU_PHY_STRAP_IS_SERDES;
1314 if (is_serdes)
1315 tp->phy_addr += 7;
1316 } else
1317 tp->phy_addr = TG3_PHY_MII_ADDR;
1319 tg3_mdio_start(tp);
1321 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1322 return 0;
1324 tp->mdio_bus = mdiobus_alloc();
1325 if (tp->mdio_bus == NULL)
1326 return -ENOMEM;
1328 tp->mdio_bus->name = "tg3 mdio bus";
1329 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1330 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1331 tp->mdio_bus->priv = tp;
1332 tp->mdio_bus->parent = &tp->pdev->dev;
1333 tp->mdio_bus->read = &tg3_mdio_read;
1334 tp->mdio_bus->write = &tg3_mdio_write;
1335 tp->mdio_bus->reset = &tg3_mdio_reset;
1336 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1337 tp->mdio_bus->irq = &tp->mdio_irq[0];
1339 for (i = 0; i < PHY_MAX_ADDR; i++)
1340 tp->mdio_bus->irq[i] = PHY_POLL;
1342 /* The bus registration will look for all the PHYs on the mdio bus.
1343 * Unfortunately, it does not ensure the PHY is powered up before
1344 * accessing the PHY ID registers. A chip reset is the
1345 * quickest way to bring the device back to an operational state..
1347 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1348 tg3_bmcr_reset(tp);
1350 i = mdiobus_register(tp->mdio_bus);
1351 if (i) {
1352 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1353 mdiobus_free(tp->mdio_bus);
1354 return i;
1357 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1359 if (!phydev || !phydev->drv) {
1360 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1361 mdiobus_unregister(tp->mdio_bus);
1362 mdiobus_free(tp->mdio_bus);
1363 return -ENODEV;
1366 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1367 case PHY_ID_BCM57780:
1368 phydev->interface = PHY_INTERFACE_MODE_GMII;
1369 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1370 break;
1371 case PHY_ID_BCM50610:
1372 case PHY_ID_BCM50610M:
1373 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1374 PHY_BRCM_RX_REFCLK_UNUSED |
1375 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1376 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1377 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1378 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1379 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1380 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1381 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1382 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1383 /* fallthru */
1384 case PHY_ID_RTL8211C:
1385 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1386 break;
1387 case PHY_ID_RTL8201E:
1388 case PHY_ID_BCMAC131:
1389 phydev->interface = PHY_INTERFACE_MODE_MII;
1390 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1391 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1392 break;
1395 tg3_flag_set(tp, MDIOBUS_INITED);
1397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1398 tg3_mdio_config_5785(tp);
1400 return 0;
1403 static void tg3_mdio_fini(struct tg3 *tp)
1405 if (tg3_flag(tp, MDIOBUS_INITED)) {
1406 tg3_flag_clear(tp, MDIOBUS_INITED);
1407 mdiobus_unregister(tp->mdio_bus);
1408 mdiobus_free(tp->mdio_bus);
1412 /* tp->lock is held. */
1413 static inline void tg3_generate_fw_event(struct tg3 *tp)
1415 u32 val;
1417 val = tr32(GRC_RX_CPU_EVENT);
1418 val |= GRC_RX_CPU_DRIVER_EVENT;
1419 tw32_f(GRC_RX_CPU_EVENT, val);
1421 tp->last_event_jiffies = jiffies;
1424 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1426 /* tp->lock is held. */
1427 static void tg3_wait_for_event_ack(struct tg3 *tp)
1429 int i;
1430 unsigned int delay_cnt;
1431 long time_remain;
1433 /* If enough time has passed, no wait is necessary. */
1434 time_remain = (long)(tp->last_event_jiffies + 1 +
1435 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1436 (long)jiffies;
1437 if (time_remain < 0)
1438 return;
1440 /* Check if we can shorten the wait time. */
1441 delay_cnt = jiffies_to_usecs(time_remain);
1442 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1443 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1444 delay_cnt = (delay_cnt >> 3) + 1;
1446 for (i = 0; i < delay_cnt; i++) {
1447 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1448 break;
1449 udelay(8);
1453 /* tp->lock is held. */
1454 static void tg3_ump_link_report(struct tg3 *tp)
1456 u32 reg;
1457 u32 val;
1459 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1460 return;
1462 tg3_wait_for_event_ack(tp);
1464 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1466 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1468 val = 0;
1469 if (!tg3_readphy(tp, MII_BMCR, &reg))
1470 val = reg << 16;
1471 if (!tg3_readphy(tp, MII_BMSR, &reg))
1472 val |= (reg & 0xffff);
1473 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1475 val = 0;
1476 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1477 val = reg << 16;
1478 if (!tg3_readphy(tp, MII_LPA, &reg))
1479 val |= (reg & 0xffff);
1480 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1482 val = 0;
1483 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1484 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1485 val = reg << 16;
1486 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1487 val |= (reg & 0xffff);
1489 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1491 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1492 val = reg << 16;
1493 else
1494 val = 0;
1495 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1497 tg3_generate_fw_event(tp);
1500 /* tp->lock is held. */
1501 static void tg3_stop_fw(struct tg3 *tp)
1503 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1504 /* Wait for RX cpu to ACK the previous event. */
1505 tg3_wait_for_event_ack(tp);
1507 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1509 tg3_generate_fw_event(tp);
1511 /* Wait for RX cpu to ACK this event. */
1512 tg3_wait_for_event_ack(tp);
1516 /* tp->lock is held. */
1517 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1519 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1520 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1522 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1523 switch (kind) {
1524 case RESET_KIND_INIT:
1525 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1526 DRV_STATE_START);
1527 break;
1529 case RESET_KIND_SHUTDOWN:
1530 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1531 DRV_STATE_UNLOAD);
1532 break;
1534 case RESET_KIND_SUSPEND:
1535 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1536 DRV_STATE_SUSPEND);
1537 break;
1539 default:
1540 break;
1544 if (kind == RESET_KIND_INIT ||
1545 kind == RESET_KIND_SUSPEND)
1546 tg3_ape_driver_state_change(tp, kind);
1549 /* tp->lock is held. */
1550 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1552 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1553 switch (kind) {
1554 case RESET_KIND_INIT:
1555 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1556 DRV_STATE_START_DONE);
1557 break;
1559 case RESET_KIND_SHUTDOWN:
1560 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1561 DRV_STATE_UNLOAD_DONE);
1562 break;
1564 default:
1565 break;
1569 if (kind == RESET_KIND_SHUTDOWN)
1570 tg3_ape_driver_state_change(tp, kind);
1573 /* tp->lock is held. */
1574 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1576 if (tg3_flag(tp, ENABLE_ASF)) {
1577 switch (kind) {
1578 case RESET_KIND_INIT:
1579 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1580 DRV_STATE_START);
1581 break;
1583 case RESET_KIND_SHUTDOWN:
1584 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1585 DRV_STATE_UNLOAD);
1586 break;
1588 case RESET_KIND_SUSPEND:
1589 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1590 DRV_STATE_SUSPEND);
1591 break;
1593 default:
1594 break;
1599 static int tg3_poll_fw(struct tg3 *tp)
1601 int i;
1602 u32 val;
1604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1605 /* Wait up to 20ms for init done. */
1606 for (i = 0; i < 200; i++) {
1607 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1608 return 0;
1609 udelay(100);
1611 return -ENODEV;
1614 /* Wait for firmware initialization to complete. */
1615 for (i = 0; i < 100000; i++) {
1616 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1617 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1618 break;
1619 udelay(10);
1622 /* Chip might not be fitted with firmware. Some Sun onboard
1623 * parts are configured like that. So don't signal the timeout
1624 * of the above loop as an error, but do report the lack of
1625 * running firmware once.
1627 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1628 tg3_flag_set(tp, NO_FWARE_REPORTED);
1630 netdev_info(tp->dev, "No firmware running\n");
1633 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1634 /* The 57765 A0 needs a little more
1635 * time to do some important work.
1637 mdelay(10);
1640 return 0;
1643 static void tg3_link_report(struct tg3 *tp)
1645 if (!netif_carrier_ok(tp->dev)) {
1646 netif_info(tp, link, tp->dev, "Link is down\n");
1647 tg3_ump_link_report(tp);
1648 } else if (netif_msg_link(tp)) {
1649 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1650 (tp->link_config.active_speed == SPEED_1000 ?
1651 1000 :
1652 (tp->link_config.active_speed == SPEED_100 ?
1653 100 : 10)),
1654 (tp->link_config.active_duplex == DUPLEX_FULL ?
1655 "full" : "half"));
1657 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1658 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1659 "on" : "off",
1660 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1661 "on" : "off");
1663 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1664 netdev_info(tp->dev, "EEE is %s\n",
1665 tp->setlpicnt ? "enabled" : "disabled");
1667 tg3_ump_link_report(tp);
1671 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1673 u16 miireg;
1675 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1676 miireg = ADVERTISE_PAUSE_CAP;
1677 else if (flow_ctrl & FLOW_CTRL_TX)
1678 miireg = ADVERTISE_PAUSE_ASYM;
1679 else if (flow_ctrl & FLOW_CTRL_RX)
1680 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1681 else
1682 miireg = 0;
1684 return miireg;
1687 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1689 u16 miireg;
1691 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1692 miireg = ADVERTISE_1000XPAUSE;
1693 else if (flow_ctrl & FLOW_CTRL_TX)
1694 miireg = ADVERTISE_1000XPSE_ASYM;
1695 else if (flow_ctrl & FLOW_CTRL_RX)
1696 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1697 else
1698 miireg = 0;
1700 return miireg;
1703 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1705 u8 cap = 0;
1707 if (lcladv & ADVERTISE_1000XPAUSE) {
1708 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1709 if (rmtadv & LPA_1000XPAUSE)
1710 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1711 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1712 cap = FLOW_CTRL_RX;
1713 } else {
1714 if (rmtadv & LPA_1000XPAUSE)
1715 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1717 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1718 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1719 cap = FLOW_CTRL_TX;
1722 return cap;
1725 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1727 u8 autoneg;
1728 u8 flowctrl = 0;
1729 u32 old_rx_mode = tp->rx_mode;
1730 u32 old_tx_mode = tp->tx_mode;
1732 if (tg3_flag(tp, USE_PHYLIB))
1733 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1734 else
1735 autoneg = tp->link_config.autoneg;
1737 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1738 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1739 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1740 else
1741 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1742 } else
1743 flowctrl = tp->link_config.flowctrl;
1745 tp->link_config.active_flowctrl = flowctrl;
1747 if (flowctrl & FLOW_CTRL_RX)
1748 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1749 else
1750 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1752 if (old_rx_mode != tp->rx_mode)
1753 tw32_f(MAC_RX_MODE, tp->rx_mode);
1755 if (flowctrl & FLOW_CTRL_TX)
1756 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1757 else
1758 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1760 if (old_tx_mode != tp->tx_mode)
1761 tw32_f(MAC_TX_MODE, tp->tx_mode);
1764 static void tg3_adjust_link(struct net_device *dev)
1766 u8 oldflowctrl, linkmesg = 0;
1767 u32 mac_mode, lcl_adv, rmt_adv;
1768 struct tg3 *tp = netdev_priv(dev);
1769 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1771 spin_lock_bh(&tp->lock);
1773 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1774 MAC_MODE_HALF_DUPLEX);
1776 oldflowctrl = tp->link_config.active_flowctrl;
1778 if (phydev->link) {
1779 lcl_adv = 0;
1780 rmt_adv = 0;
1782 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1783 mac_mode |= MAC_MODE_PORT_MODE_MII;
1784 else if (phydev->speed == SPEED_1000 ||
1785 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1786 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1787 else
1788 mac_mode |= MAC_MODE_PORT_MODE_MII;
1790 if (phydev->duplex == DUPLEX_HALF)
1791 mac_mode |= MAC_MODE_HALF_DUPLEX;
1792 else {
1793 lcl_adv = tg3_advert_flowctrl_1000T(
1794 tp->link_config.flowctrl);
1796 if (phydev->pause)
1797 rmt_adv = LPA_PAUSE_CAP;
1798 if (phydev->asym_pause)
1799 rmt_adv |= LPA_PAUSE_ASYM;
1802 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1803 } else
1804 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1806 if (mac_mode != tp->mac_mode) {
1807 tp->mac_mode = mac_mode;
1808 tw32_f(MAC_MODE, tp->mac_mode);
1809 udelay(40);
1812 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1813 if (phydev->speed == SPEED_10)
1814 tw32(MAC_MI_STAT,
1815 MAC_MI_STAT_10MBPS_MODE |
1816 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1817 else
1818 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1821 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1822 tw32(MAC_TX_LENGTHS,
1823 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1824 (6 << TX_LENGTHS_IPG_SHIFT) |
1825 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1826 else
1827 tw32(MAC_TX_LENGTHS,
1828 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1829 (6 << TX_LENGTHS_IPG_SHIFT) |
1830 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1832 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1833 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1834 phydev->speed != tp->link_config.active_speed ||
1835 phydev->duplex != tp->link_config.active_duplex ||
1836 oldflowctrl != tp->link_config.active_flowctrl)
1837 linkmesg = 1;
1839 tp->link_config.active_speed = phydev->speed;
1840 tp->link_config.active_duplex = phydev->duplex;
1842 spin_unlock_bh(&tp->lock);
1844 if (linkmesg)
1845 tg3_link_report(tp);
1848 static int tg3_phy_init(struct tg3 *tp)
1850 struct phy_device *phydev;
1852 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1853 return 0;
1855 /* Bring the PHY back to a known state. */
1856 tg3_bmcr_reset(tp);
1858 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1860 /* Attach the MAC to the PHY. */
1861 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1862 phydev->dev_flags, phydev->interface);
1863 if (IS_ERR(phydev)) {
1864 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1865 return PTR_ERR(phydev);
1868 /* Mask with MAC supported features. */
1869 switch (phydev->interface) {
1870 case PHY_INTERFACE_MODE_GMII:
1871 case PHY_INTERFACE_MODE_RGMII:
1872 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1873 phydev->supported &= (PHY_GBIT_FEATURES |
1874 SUPPORTED_Pause |
1875 SUPPORTED_Asym_Pause);
1876 break;
1878 /* fallthru */
1879 case PHY_INTERFACE_MODE_MII:
1880 phydev->supported &= (PHY_BASIC_FEATURES |
1881 SUPPORTED_Pause |
1882 SUPPORTED_Asym_Pause);
1883 break;
1884 default:
1885 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1886 return -EINVAL;
1889 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1891 phydev->advertising = phydev->supported;
1893 return 0;
1896 static void tg3_phy_start(struct tg3 *tp)
1898 struct phy_device *phydev;
1900 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901 return;
1903 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1905 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1906 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1907 phydev->speed = tp->link_config.orig_speed;
1908 phydev->duplex = tp->link_config.orig_duplex;
1909 phydev->autoneg = tp->link_config.orig_autoneg;
1910 phydev->advertising = tp->link_config.orig_advertising;
1913 phy_start(phydev);
1915 phy_start_aneg(phydev);
1918 static void tg3_phy_stop(struct tg3 *tp)
1920 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1921 return;
1923 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926 static void tg3_phy_fini(struct tg3 *tp)
1928 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1929 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1930 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1934 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1936 int err;
1937 u32 val;
1939 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1940 return 0;
1942 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1943 /* Cannot do read-modify-write on 5401 */
1944 err = tg3_phy_auxctl_write(tp,
1945 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1946 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1947 0x4c20);
1948 goto done;
1951 err = tg3_phy_auxctl_read(tp,
1952 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1953 if (err)
1954 return err;
1956 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1957 err = tg3_phy_auxctl_write(tp,
1958 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1960 done:
1961 return err;
1964 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1966 u32 phytest;
1968 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1969 u32 phy;
1971 tg3_writephy(tp, MII_TG3_FET_TEST,
1972 phytest | MII_TG3_FET_SHADOW_EN);
1973 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1974 if (enable)
1975 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1976 else
1977 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1978 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1980 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1984 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1986 u32 reg;
1988 if (!tg3_flag(tp, 5705_PLUS) ||
1989 (tg3_flag(tp, 5717_PLUS) &&
1990 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1991 return;
1993 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1994 tg3_phy_fet_toggle_apd(tp, enable);
1995 return;
1998 reg = MII_TG3_MISC_SHDW_WREN |
1999 MII_TG3_MISC_SHDW_SCR5_SEL |
2000 MII_TG3_MISC_SHDW_SCR5_LPED |
2001 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2002 MII_TG3_MISC_SHDW_SCR5_SDTL |
2003 MII_TG3_MISC_SHDW_SCR5_C125OE;
2004 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2005 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2007 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2010 reg = MII_TG3_MISC_SHDW_WREN |
2011 MII_TG3_MISC_SHDW_APD_SEL |
2012 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2013 if (enable)
2014 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2016 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2019 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2021 u32 phy;
2023 if (!tg3_flag(tp, 5705_PLUS) ||
2024 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2025 return;
2027 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2028 u32 ephy;
2030 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2031 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2033 tg3_writephy(tp, MII_TG3_FET_TEST,
2034 ephy | MII_TG3_FET_SHADOW_EN);
2035 if (!tg3_readphy(tp, reg, &phy)) {
2036 if (enable)
2037 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2038 else
2039 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2040 tg3_writephy(tp, reg, phy);
2042 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2044 } else {
2045 int ret;
2047 ret = tg3_phy_auxctl_read(tp,
2048 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2049 if (!ret) {
2050 if (enable)
2051 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2052 else
2053 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2054 tg3_phy_auxctl_write(tp,
2055 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2060 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2062 int ret;
2063 u32 val;
2065 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2066 return;
2068 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2069 if (!ret)
2070 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2071 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2074 static void tg3_phy_apply_otp(struct tg3 *tp)
2076 u32 otp, phy;
2078 if (!tp->phy_otp)
2079 return;
2081 otp = tp->phy_otp;
2083 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2084 return;
2086 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2087 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2088 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2090 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2091 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2092 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2094 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2095 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2096 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2098 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2099 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2101 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2102 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2104 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2105 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2106 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2108 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2111 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2113 u32 val;
2115 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2116 return;
2118 tp->setlpicnt = 0;
2120 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2121 current_link_up == 1 &&
2122 tp->link_config.active_duplex == DUPLEX_FULL &&
2123 (tp->link_config.active_speed == SPEED_100 ||
2124 tp->link_config.active_speed == SPEED_1000)) {
2125 u32 eeectl;
2127 if (tp->link_config.active_speed == SPEED_1000)
2128 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2129 else
2130 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2132 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2134 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2135 TG3_CL45_D7_EEERES_STAT, &val);
2137 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2138 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2139 tp->setlpicnt = 2;
2142 if (!tp->setlpicnt) {
2143 if (current_link_up == 1 &&
2144 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2145 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2146 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2149 val = tr32(TG3_CPMU_EEE_MODE);
2150 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2154 static void tg3_phy_eee_enable(struct tg3 *tp)
2156 u32 val;
2158 if (tp->link_config.active_speed == SPEED_1000 &&
2159 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2162 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2163 val = MII_TG3_DSP_TAP26_ALNOKO |
2164 MII_TG3_DSP_TAP26_RMRXSTO;
2165 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2166 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2169 val = tr32(TG3_CPMU_EEE_MODE);
2170 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2173 static int tg3_wait_macro_done(struct tg3 *tp)
2175 int limit = 100;
2177 while (limit--) {
2178 u32 tmp32;
2180 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2181 if ((tmp32 & 0x1000) == 0)
2182 break;
2185 if (limit < 0)
2186 return -EBUSY;
2188 return 0;
2191 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2193 static const u32 test_pat[4][6] = {
2194 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2195 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2196 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2197 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2199 int chan;
2201 for (chan = 0; chan < 4; chan++) {
2202 int i;
2204 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2205 (chan * 0x2000) | 0x0200);
2206 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2208 for (i = 0; i < 6; i++)
2209 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2210 test_pat[chan][i]);
2212 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2213 if (tg3_wait_macro_done(tp)) {
2214 *resetp = 1;
2215 return -EBUSY;
2218 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2219 (chan * 0x2000) | 0x0200);
2220 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2221 if (tg3_wait_macro_done(tp)) {
2222 *resetp = 1;
2223 return -EBUSY;
2226 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2227 if (tg3_wait_macro_done(tp)) {
2228 *resetp = 1;
2229 return -EBUSY;
2232 for (i = 0; i < 6; i += 2) {
2233 u32 low, high;
2235 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2236 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2237 tg3_wait_macro_done(tp)) {
2238 *resetp = 1;
2239 return -EBUSY;
2241 low &= 0x7fff;
2242 high &= 0x000f;
2243 if (low != test_pat[chan][i] ||
2244 high != test_pat[chan][i+1]) {
2245 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2246 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2247 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2249 return -EBUSY;
2254 return 0;
2257 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2259 int chan;
2261 for (chan = 0; chan < 4; chan++) {
2262 int i;
2264 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2265 (chan * 0x2000) | 0x0200);
2266 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2267 for (i = 0; i < 6; i++)
2268 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2269 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2270 if (tg3_wait_macro_done(tp))
2271 return -EBUSY;
2274 return 0;
2277 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2279 u32 reg32, phy9_orig;
2280 int retries, do_phy_reset, err;
2282 retries = 10;
2283 do_phy_reset = 1;
2284 do {
2285 if (do_phy_reset) {
2286 err = tg3_bmcr_reset(tp);
2287 if (err)
2288 return err;
2289 do_phy_reset = 0;
2292 /* Disable transmitter and interrupt. */
2293 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2294 continue;
2296 reg32 |= 0x3000;
2297 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2299 /* Set full-duplex, 1000 mbps. */
2300 tg3_writephy(tp, MII_BMCR,
2301 BMCR_FULLDPLX | BMCR_SPEED1000);
2303 /* Set to master mode. */
2304 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2305 continue;
2307 tg3_writephy(tp, MII_CTRL1000,
2308 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2310 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2311 if (err)
2312 return err;
2314 /* Block the PHY control access. */
2315 tg3_phydsp_write(tp, 0x8005, 0x0800);
2317 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2318 if (!err)
2319 break;
2320 } while (--retries);
2322 err = tg3_phy_reset_chanpat(tp);
2323 if (err)
2324 return err;
2326 tg3_phydsp_write(tp, 0x8005, 0x0000);
2328 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2329 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2331 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2333 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2335 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2336 reg32 &= ~0x3000;
2337 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2338 } else if (!err)
2339 err = -EBUSY;
2341 return err;
2344 /* This will reset the tigon3 PHY if there is no valid
2345 * link unless the FORCE argument is non-zero.
2347 static int tg3_phy_reset(struct tg3 *tp)
2349 u32 val, cpmuctrl;
2350 int err;
2352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2353 val = tr32(GRC_MISC_CFG);
2354 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2355 udelay(40);
2357 err = tg3_readphy(tp, MII_BMSR, &val);
2358 err |= tg3_readphy(tp, MII_BMSR, &val);
2359 if (err != 0)
2360 return -EBUSY;
2362 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2363 netif_carrier_off(tp->dev);
2364 tg3_link_report(tp);
2367 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2368 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2369 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2370 err = tg3_phy_reset_5703_4_5(tp);
2371 if (err)
2372 return err;
2373 goto out;
2376 cpmuctrl = 0;
2377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2378 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2379 cpmuctrl = tr32(TG3_CPMU_CTRL);
2380 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2381 tw32(TG3_CPMU_CTRL,
2382 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2385 err = tg3_bmcr_reset(tp);
2386 if (err)
2387 return err;
2389 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2390 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2391 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2393 tw32(TG3_CPMU_CTRL, cpmuctrl);
2396 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2397 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2398 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2399 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2400 CPMU_LSPD_1000MB_MACCLK_12_5) {
2401 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2402 udelay(40);
2403 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2407 if (tg3_flag(tp, 5717_PLUS) &&
2408 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2409 return 0;
2411 tg3_phy_apply_otp(tp);
2413 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2414 tg3_phy_toggle_apd(tp, true);
2415 else
2416 tg3_phy_toggle_apd(tp, false);
2418 out:
2419 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2420 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2421 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2422 tg3_phydsp_write(tp, 0x000a, 0x0323);
2423 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2426 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2427 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2428 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2431 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2432 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2433 tg3_phydsp_write(tp, 0x000a, 0x310b);
2434 tg3_phydsp_write(tp, 0x201f, 0x9506);
2435 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2436 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2438 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2439 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2440 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2441 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2442 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2443 tg3_writephy(tp, MII_TG3_TEST1,
2444 MII_TG3_TEST1_TRIM_EN | 0x4);
2445 } else
2446 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2448 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2452 /* Set Extended packet length bit (bit 14) on all chips that */
2453 /* support jumbo frames */
2454 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2455 /* Cannot do read-modify-write on 5401 */
2456 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2457 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2458 /* Set bit 14 with read-modify-write to preserve other bits */
2459 err = tg3_phy_auxctl_read(tp,
2460 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2461 if (!err)
2462 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2463 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2466 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2467 * jumbo frames transmission.
2469 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2470 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2471 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2472 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2476 /* adjust output voltage */
2477 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2480 tg3_phy_toggle_automdix(tp, 1);
2481 tg3_phy_set_wirespeed(tp);
2482 return 0;
2485 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2486 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2487 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2488 TG3_GPIO_MSG_NEED_VAUX)
2489 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2490 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2491 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2492 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2493 (TG3_GPIO_MSG_DRVR_PRES << 12))
2495 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2496 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2497 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2498 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2499 (TG3_GPIO_MSG_NEED_VAUX << 12))
2501 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2503 u32 status, shift;
2505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2508 else
2509 status = tr32(TG3_CPMU_DRV_STATUS);
2511 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2512 status &= ~(TG3_GPIO_MSG_MASK << shift);
2513 status |= (newstat << shift);
2515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2517 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2518 else
2519 tw32(TG3_CPMU_DRV_STATUS, status);
2521 return status >> TG3_APE_GPIO_MSG_SHIFT;
2524 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2526 if (!tg3_flag(tp, IS_NIC))
2527 return 0;
2529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2532 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2533 return -EIO;
2535 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2537 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2538 TG3_GRC_LCLCTL_PWRSW_DELAY);
2540 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2541 } else {
2542 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2543 TG3_GRC_LCLCTL_PWRSW_DELAY);
2546 return 0;
2549 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2551 u32 grc_local_ctrl;
2553 if (!tg3_flag(tp, IS_NIC) ||
2554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2556 return;
2558 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2560 tw32_wait_f(GRC_LOCAL_CTRL,
2561 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2562 TG3_GRC_LCLCTL_PWRSW_DELAY);
2564 tw32_wait_f(GRC_LOCAL_CTRL,
2565 grc_local_ctrl,
2566 TG3_GRC_LCLCTL_PWRSW_DELAY);
2568 tw32_wait_f(GRC_LOCAL_CTRL,
2569 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2570 TG3_GRC_LCLCTL_PWRSW_DELAY);
2573 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2575 if (!tg3_flag(tp, IS_NIC))
2576 return;
2578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2580 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2581 (GRC_LCLCTRL_GPIO_OE0 |
2582 GRC_LCLCTRL_GPIO_OE1 |
2583 GRC_LCLCTRL_GPIO_OE2 |
2584 GRC_LCLCTRL_GPIO_OUTPUT0 |
2585 GRC_LCLCTRL_GPIO_OUTPUT1),
2586 TG3_GRC_LCLCTL_PWRSW_DELAY);
2587 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2588 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2589 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2590 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2591 GRC_LCLCTRL_GPIO_OE1 |
2592 GRC_LCLCTRL_GPIO_OE2 |
2593 GRC_LCLCTRL_GPIO_OUTPUT0 |
2594 GRC_LCLCTRL_GPIO_OUTPUT1 |
2595 tp->grc_local_ctrl;
2596 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2597 TG3_GRC_LCLCTL_PWRSW_DELAY);
2599 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2600 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601 TG3_GRC_LCLCTL_PWRSW_DELAY);
2603 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2604 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2605 TG3_GRC_LCLCTL_PWRSW_DELAY);
2606 } else {
2607 u32 no_gpio2;
2608 u32 grc_local_ctrl = 0;
2610 /* Workaround to prevent overdrawing Amps. */
2611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2612 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2613 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2614 grc_local_ctrl,
2615 TG3_GRC_LCLCTL_PWRSW_DELAY);
2618 /* On 5753 and variants, GPIO2 cannot be used. */
2619 no_gpio2 = tp->nic_sram_data_cfg &
2620 NIC_SRAM_DATA_CFG_NO_GPIO2;
2622 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2623 GRC_LCLCTRL_GPIO_OE1 |
2624 GRC_LCLCTRL_GPIO_OE2 |
2625 GRC_LCLCTRL_GPIO_OUTPUT1 |
2626 GRC_LCLCTRL_GPIO_OUTPUT2;
2627 if (no_gpio2) {
2628 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2629 GRC_LCLCTRL_GPIO_OUTPUT2);
2631 tw32_wait_f(GRC_LOCAL_CTRL,
2632 tp->grc_local_ctrl | grc_local_ctrl,
2633 TG3_GRC_LCLCTL_PWRSW_DELAY);
2635 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2637 tw32_wait_f(GRC_LOCAL_CTRL,
2638 tp->grc_local_ctrl | grc_local_ctrl,
2639 TG3_GRC_LCLCTL_PWRSW_DELAY);
2641 if (!no_gpio2) {
2642 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2643 tw32_wait_f(GRC_LOCAL_CTRL,
2644 tp->grc_local_ctrl | grc_local_ctrl,
2645 TG3_GRC_LCLCTL_PWRSW_DELAY);
2650 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2652 u32 msg = 0;
2654 /* Serialize power state transitions */
2655 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2656 return;
2658 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2659 msg = TG3_GPIO_MSG_NEED_VAUX;
2661 msg = tg3_set_function_status(tp, msg);
2663 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2664 goto done;
2666 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2667 tg3_pwrsrc_switch_to_vaux(tp);
2668 else
2669 tg3_pwrsrc_die_with_vmain(tp);
2671 done:
2672 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2675 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2677 bool need_vaux = false;
2679 /* The GPIOs do something completely different on 57765. */
2680 if (!tg3_flag(tp, IS_NIC) ||
2681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2682 return;
2684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2687 tg3_frob_aux_power_5717(tp, include_wol ?
2688 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2689 return;
2692 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2693 struct net_device *dev_peer;
2695 dev_peer = pci_get_drvdata(tp->pdev_peer);
2697 /* remove_one() may have been run on the peer. */
2698 if (dev_peer) {
2699 struct tg3 *tp_peer = netdev_priv(dev_peer);
2701 if (tg3_flag(tp_peer, INIT_COMPLETE))
2702 return;
2704 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2705 tg3_flag(tp_peer, ENABLE_ASF))
2706 need_vaux = true;
2710 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2711 tg3_flag(tp, ENABLE_ASF))
2712 need_vaux = true;
2714 if (need_vaux)
2715 tg3_pwrsrc_switch_to_vaux(tp);
2716 else
2717 tg3_pwrsrc_die_with_vmain(tp);
2720 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2722 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2723 return 1;
2724 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2725 if (speed != SPEED_10)
2726 return 1;
2727 } else if (speed == SPEED_10)
2728 return 1;
2730 return 0;
2733 static int tg3_setup_phy(struct tg3 *, int);
2734 static int tg3_halt_cpu(struct tg3 *, u32);
2736 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2738 u32 val;
2740 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2742 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2743 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2745 sg_dig_ctrl |=
2746 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2747 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2748 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2750 return;
2753 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2754 tg3_bmcr_reset(tp);
2755 val = tr32(GRC_MISC_CFG);
2756 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2757 udelay(40);
2758 return;
2759 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2760 u32 phytest;
2761 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2762 u32 phy;
2764 tg3_writephy(tp, MII_ADVERTISE, 0);
2765 tg3_writephy(tp, MII_BMCR,
2766 BMCR_ANENABLE | BMCR_ANRESTART);
2768 tg3_writephy(tp, MII_TG3_FET_TEST,
2769 phytest | MII_TG3_FET_SHADOW_EN);
2770 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2771 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2772 tg3_writephy(tp,
2773 MII_TG3_FET_SHDW_AUXMODE4,
2774 phy);
2776 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2778 return;
2779 } else if (do_low_power) {
2780 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2781 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2783 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2784 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2785 MII_TG3_AUXCTL_PCTL_VREG_11V;
2786 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2789 /* The PHY should not be powered down on some chips because
2790 * of bugs.
2792 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2794 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2795 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2796 return;
2798 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2799 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2800 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2801 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2802 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2803 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2806 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2809 /* tp->lock is held. */
2810 static int tg3_nvram_lock(struct tg3 *tp)
2812 if (tg3_flag(tp, NVRAM)) {
2813 int i;
2815 if (tp->nvram_lock_cnt == 0) {
2816 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2817 for (i = 0; i < 8000; i++) {
2818 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2819 break;
2820 udelay(20);
2822 if (i == 8000) {
2823 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2824 return -ENODEV;
2827 tp->nvram_lock_cnt++;
2829 return 0;
2832 /* tp->lock is held. */
2833 static void tg3_nvram_unlock(struct tg3 *tp)
2835 if (tg3_flag(tp, NVRAM)) {
2836 if (tp->nvram_lock_cnt > 0)
2837 tp->nvram_lock_cnt--;
2838 if (tp->nvram_lock_cnt == 0)
2839 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2843 /* tp->lock is held. */
2844 static void tg3_enable_nvram_access(struct tg3 *tp)
2846 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2847 u32 nvaccess = tr32(NVRAM_ACCESS);
2849 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2853 /* tp->lock is held. */
2854 static void tg3_disable_nvram_access(struct tg3 *tp)
2856 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2857 u32 nvaccess = tr32(NVRAM_ACCESS);
2859 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2863 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2864 u32 offset, u32 *val)
2866 u32 tmp;
2867 int i;
2869 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2870 return -EINVAL;
2872 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2873 EEPROM_ADDR_DEVID_MASK |
2874 EEPROM_ADDR_READ);
2875 tw32(GRC_EEPROM_ADDR,
2876 tmp |
2877 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2878 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2879 EEPROM_ADDR_ADDR_MASK) |
2880 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2882 for (i = 0; i < 1000; i++) {
2883 tmp = tr32(GRC_EEPROM_ADDR);
2885 if (tmp & EEPROM_ADDR_COMPLETE)
2886 break;
2887 msleep(1);
2889 if (!(tmp & EEPROM_ADDR_COMPLETE))
2890 return -EBUSY;
2892 tmp = tr32(GRC_EEPROM_DATA);
2895 * The data will always be opposite the native endian
2896 * format. Perform a blind byteswap to compensate.
2898 *val = swab32(tmp);
2900 return 0;
2903 #define NVRAM_CMD_TIMEOUT 10000
2905 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2907 int i;
2909 tw32(NVRAM_CMD, nvram_cmd);
2910 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2911 udelay(10);
2912 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2913 udelay(10);
2914 break;
2918 if (i == NVRAM_CMD_TIMEOUT)
2919 return -EBUSY;
2921 return 0;
2924 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2926 if (tg3_flag(tp, NVRAM) &&
2927 tg3_flag(tp, NVRAM_BUFFERED) &&
2928 tg3_flag(tp, FLASH) &&
2929 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2930 (tp->nvram_jedecnum == JEDEC_ATMEL))
2932 addr = ((addr / tp->nvram_pagesize) <<
2933 ATMEL_AT45DB0X1B_PAGE_POS) +
2934 (addr % tp->nvram_pagesize);
2936 return addr;
2939 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2941 if (tg3_flag(tp, NVRAM) &&
2942 tg3_flag(tp, NVRAM_BUFFERED) &&
2943 tg3_flag(tp, FLASH) &&
2944 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2945 (tp->nvram_jedecnum == JEDEC_ATMEL))
2947 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2948 tp->nvram_pagesize) +
2949 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2951 return addr;
2954 /* NOTE: Data read in from NVRAM is byteswapped according to
2955 * the byteswapping settings for all other register accesses.
2956 * tg3 devices are BE devices, so on a BE machine, the data
2957 * returned will be exactly as it is seen in NVRAM. On a LE
2958 * machine, the 32-bit value will be byteswapped.
2960 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2962 int ret;
2964 if (!tg3_flag(tp, NVRAM))
2965 return tg3_nvram_read_using_eeprom(tp, offset, val);
2967 offset = tg3_nvram_phys_addr(tp, offset);
2969 if (offset > NVRAM_ADDR_MSK)
2970 return -EINVAL;
2972 ret = tg3_nvram_lock(tp);
2973 if (ret)
2974 return ret;
2976 tg3_enable_nvram_access(tp);
2978 tw32(NVRAM_ADDR, offset);
2979 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2980 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2982 if (ret == 0)
2983 *val = tr32(NVRAM_RDDATA);
2985 tg3_disable_nvram_access(tp);
2987 tg3_nvram_unlock(tp);
2989 return ret;
2992 /* Ensures NVRAM data is in bytestream format. */
2993 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2995 u32 v;
2996 int res = tg3_nvram_read(tp, offset, &v);
2997 if (!res)
2998 *val = cpu_to_be32(v);
2999 return res;
3002 #define RX_CPU_SCRATCH_BASE 0x30000
3003 #define RX_CPU_SCRATCH_SIZE 0x04000
3004 #define TX_CPU_SCRATCH_BASE 0x34000
3005 #define TX_CPU_SCRATCH_SIZE 0x04000
3007 /* tp->lock is held. */
3008 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3010 int i;
3012 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3015 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3017 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3018 return 0;
3020 if (offset == RX_CPU_BASE) {
3021 for (i = 0; i < 10000; i++) {
3022 tw32(offset + CPU_STATE, 0xffffffff);
3023 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3024 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3025 break;
3028 tw32(offset + CPU_STATE, 0xffffffff);
3029 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3030 udelay(10);
3031 } else {
3032 for (i = 0; i < 10000; i++) {
3033 tw32(offset + CPU_STATE, 0xffffffff);
3034 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3035 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3036 break;
3040 if (i >= 10000) {
3041 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3042 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3043 return -ENODEV;
3046 /* Clear firmware's nvram arbitration. */
3047 if (tg3_flag(tp, NVRAM))
3048 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3049 return 0;
3052 struct fw_info {
3053 unsigned int fw_base;
3054 unsigned int fw_len;
3055 const __be32 *fw_data;
3058 /* tp->lock is held. */
3059 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3060 u32 cpu_scratch_base, int cpu_scratch_size,
3061 struct fw_info *info)
3063 int err, lock_err, i;
3064 void (*write_op)(struct tg3 *, u32, u32);
3066 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3067 netdev_err(tp->dev,
3068 "%s: Trying to load TX cpu firmware which is 5705\n",
3069 __func__);
3070 return -EINVAL;
3073 if (tg3_flag(tp, 5705_PLUS))
3074 write_op = tg3_write_mem;
3075 else
3076 write_op = tg3_write_indirect_reg32;
3078 /* It is possible that bootcode is still loading at this point.
3079 * Get the nvram lock first before halting the cpu.
3081 lock_err = tg3_nvram_lock(tp);
3082 err = tg3_halt_cpu(tp, cpu_base);
3083 if (!lock_err)
3084 tg3_nvram_unlock(tp);
3085 if (err)
3086 goto out;
3088 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3089 write_op(tp, cpu_scratch_base + i, 0);
3090 tw32(cpu_base + CPU_STATE, 0xffffffff);
3091 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3092 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3093 write_op(tp, (cpu_scratch_base +
3094 (info->fw_base & 0xffff) +
3095 (i * sizeof(u32))),
3096 be32_to_cpu(info->fw_data[i]));
3098 err = 0;
3100 out:
3101 return err;
3104 /* tp->lock is held. */
3105 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3107 struct fw_info info;
3108 const __be32 *fw_data;
3109 int err, i;
3111 fw_data = (void *)tp->fw->data;
3113 /* Firmware blob starts with version numbers, followed by
3114 start address and length. We are setting complete length.
3115 length = end_address_of_bss - start_address_of_text.
3116 Remainder is the blob to be loaded contiguously
3117 from start address. */
3119 info.fw_base = be32_to_cpu(fw_data[1]);
3120 info.fw_len = tp->fw->size - 12;
3121 info.fw_data = &fw_data[3];
3123 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3124 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3125 &info);
3126 if (err)
3127 return err;
3129 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3130 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3131 &info);
3132 if (err)
3133 return err;
3135 /* Now startup only the RX cpu. */
3136 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3137 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3139 for (i = 0; i < 5; i++) {
3140 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3141 break;
3142 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3143 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3144 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3145 udelay(1000);
3147 if (i >= 5) {
3148 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3149 "should be %08x\n", __func__,
3150 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3151 return -ENODEV;
3153 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3154 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3156 return 0;
3159 /* tp->lock is held. */
3160 static int tg3_load_tso_firmware(struct tg3 *tp)
3162 struct fw_info info;
3163 const __be32 *fw_data;
3164 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3165 int err, i;
3167 if (tg3_flag(tp, HW_TSO_1) ||
3168 tg3_flag(tp, HW_TSO_2) ||
3169 tg3_flag(tp, HW_TSO_3))
3170 return 0;
3172 fw_data = (void *)tp->fw->data;
3174 /* Firmware blob starts with version numbers, followed by
3175 start address and length. We are setting complete length.
3176 length = end_address_of_bss - start_address_of_text.
3177 Remainder is the blob to be loaded contiguously
3178 from start address. */
3180 info.fw_base = be32_to_cpu(fw_data[1]);
3181 cpu_scratch_size = tp->fw_len;
3182 info.fw_len = tp->fw->size - 12;
3183 info.fw_data = &fw_data[3];
3185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3186 cpu_base = RX_CPU_BASE;
3187 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3188 } else {
3189 cpu_base = TX_CPU_BASE;
3190 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3191 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3194 err = tg3_load_firmware_cpu(tp, cpu_base,
3195 cpu_scratch_base, cpu_scratch_size,
3196 &info);
3197 if (err)
3198 return err;
3200 /* Now startup the cpu. */
3201 tw32(cpu_base + CPU_STATE, 0xffffffff);
3202 tw32_f(cpu_base + CPU_PC, info.fw_base);
3204 for (i = 0; i < 5; i++) {
3205 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3206 break;
3207 tw32(cpu_base + CPU_STATE, 0xffffffff);
3208 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3209 tw32_f(cpu_base + CPU_PC, info.fw_base);
3210 udelay(1000);
3212 if (i >= 5) {
3213 netdev_err(tp->dev,
3214 "%s fails to set CPU PC, is %08x should be %08x\n",
3215 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3216 return -ENODEV;
3218 tw32(cpu_base + CPU_STATE, 0xffffffff);
3219 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3220 return 0;
3224 /* tp->lock is held. */
3225 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3227 u32 addr_high, addr_low;
3228 int i;
3230 addr_high = ((tp->dev->dev_addr[0] << 8) |
3231 tp->dev->dev_addr[1]);
3232 addr_low = ((tp->dev->dev_addr[2] << 24) |
3233 (tp->dev->dev_addr[3] << 16) |
3234 (tp->dev->dev_addr[4] << 8) |
3235 (tp->dev->dev_addr[5] << 0));
3236 for (i = 0; i < 4; i++) {
3237 if (i == 1 && skip_mac_1)
3238 continue;
3239 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3240 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3244 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3245 for (i = 0; i < 12; i++) {
3246 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3247 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3251 addr_high = (tp->dev->dev_addr[0] +
3252 tp->dev->dev_addr[1] +
3253 tp->dev->dev_addr[2] +
3254 tp->dev->dev_addr[3] +
3255 tp->dev->dev_addr[4] +
3256 tp->dev->dev_addr[5]) &
3257 TX_BACKOFF_SEED_MASK;
3258 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3261 static void tg3_enable_register_access(struct tg3 *tp)
3264 * Make sure register accesses (indirect or otherwise) will function
3265 * correctly.
3267 pci_write_config_dword(tp->pdev,
3268 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3271 static int tg3_power_up(struct tg3 *tp)
3273 int err;
3275 tg3_enable_register_access(tp);
3277 err = pci_set_power_state(tp->pdev, PCI_D0);
3278 if (!err) {
3279 /* Switch out of Vaux if it is a NIC */
3280 tg3_pwrsrc_switch_to_vmain(tp);
3281 } else {
3282 netdev_err(tp->dev, "Transition to D0 failed\n");
3285 return err;
3288 static int tg3_power_down_prepare(struct tg3 *tp)
3290 u32 misc_host_ctrl;
3291 bool device_should_wake, do_low_power;
3293 tg3_enable_register_access(tp);
3295 /* Restore the CLKREQ setting. */
3296 if (tg3_flag(tp, CLKREQ_BUG)) {
3297 u16 lnkctl;
3299 pci_read_config_word(tp->pdev,
3300 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3301 &lnkctl);
3302 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3303 pci_write_config_word(tp->pdev,
3304 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3305 lnkctl);
3308 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3309 tw32(TG3PCI_MISC_HOST_CTRL,
3310 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3312 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3313 tg3_flag(tp, WOL_ENABLE);
3315 if (tg3_flag(tp, USE_PHYLIB)) {
3316 do_low_power = false;
3317 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3318 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3319 struct phy_device *phydev;
3320 u32 phyid, advertising;
3322 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3324 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3326 tp->link_config.orig_speed = phydev->speed;
3327 tp->link_config.orig_duplex = phydev->duplex;
3328 tp->link_config.orig_autoneg = phydev->autoneg;
3329 tp->link_config.orig_advertising = phydev->advertising;
3331 advertising = ADVERTISED_TP |
3332 ADVERTISED_Pause |
3333 ADVERTISED_Autoneg |
3334 ADVERTISED_10baseT_Half;
3336 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3337 if (tg3_flag(tp, WOL_SPEED_100MB))
3338 advertising |=
3339 ADVERTISED_100baseT_Half |
3340 ADVERTISED_100baseT_Full |
3341 ADVERTISED_10baseT_Full;
3342 else
3343 advertising |= ADVERTISED_10baseT_Full;
3346 phydev->advertising = advertising;
3348 phy_start_aneg(phydev);
3350 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3351 if (phyid != PHY_ID_BCMAC131) {
3352 phyid &= PHY_BCM_OUI_MASK;
3353 if (phyid == PHY_BCM_OUI_1 ||
3354 phyid == PHY_BCM_OUI_2 ||
3355 phyid == PHY_BCM_OUI_3)
3356 do_low_power = true;
3359 } else {
3360 do_low_power = true;
3362 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3363 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3364 tp->link_config.orig_speed = tp->link_config.speed;
3365 tp->link_config.orig_duplex = tp->link_config.duplex;
3366 tp->link_config.orig_autoneg = tp->link_config.autoneg;
3369 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3370 tp->link_config.speed = SPEED_10;
3371 tp->link_config.duplex = DUPLEX_HALF;
3372 tp->link_config.autoneg = AUTONEG_ENABLE;
3373 tg3_setup_phy(tp, 0);
3377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3378 u32 val;
3380 val = tr32(GRC_VCPU_EXT_CTRL);
3381 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3382 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3383 int i;
3384 u32 val;
3386 for (i = 0; i < 200; i++) {
3387 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3388 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3389 break;
3390 msleep(1);
3393 if (tg3_flag(tp, WOL_CAP))
3394 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3395 WOL_DRV_STATE_SHUTDOWN |
3396 WOL_DRV_WOL |
3397 WOL_SET_MAGIC_PKT);
3399 if (device_should_wake) {
3400 u32 mac_mode;
3402 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3403 if (do_low_power &&
3404 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3405 tg3_phy_auxctl_write(tp,
3406 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3407 MII_TG3_AUXCTL_PCTL_WOL_EN |
3408 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3409 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3410 udelay(40);
3413 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3414 mac_mode = MAC_MODE_PORT_MODE_GMII;
3415 else
3416 mac_mode = MAC_MODE_PORT_MODE_MII;
3418 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3419 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3420 ASIC_REV_5700) {
3421 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3422 SPEED_100 : SPEED_10;
3423 if (tg3_5700_link_polarity(tp, speed))
3424 mac_mode |= MAC_MODE_LINK_POLARITY;
3425 else
3426 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3428 } else {
3429 mac_mode = MAC_MODE_PORT_MODE_TBI;
3432 if (!tg3_flag(tp, 5750_PLUS))
3433 tw32(MAC_LED_CTRL, tp->led_ctrl);
3435 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3436 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3437 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3438 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3440 if (tg3_flag(tp, ENABLE_APE))
3441 mac_mode |= MAC_MODE_APE_TX_EN |
3442 MAC_MODE_APE_RX_EN |
3443 MAC_MODE_TDE_ENABLE;
3445 tw32_f(MAC_MODE, mac_mode);
3446 udelay(100);
3448 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3449 udelay(10);
3452 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3453 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3455 u32 base_val;
3457 base_val = tp->pci_clock_ctrl;
3458 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3459 CLOCK_CTRL_TXCLK_DISABLE);
3461 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3462 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3463 } else if (tg3_flag(tp, 5780_CLASS) ||
3464 tg3_flag(tp, CPMU_PRESENT) ||
3465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3466 /* do nothing */
3467 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3468 u32 newbits1, newbits2;
3470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3472 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3473 CLOCK_CTRL_TXCLK_DISABLE |
3474 CLOCK_CTRL_ALTCLK);
3475 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3476 } else if (tg3_flag(tp, 5705_PLUS)) {
3477 newbits1 = CLOCK_CTRL_625_CORE;
3478 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3479 } else {
3480 newbits1 = CLOCK_CTRL_ALTCLK;
3481 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3484 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3485 40);
3487 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3488 40);
3490 if (!tg3_flag(tp, 5705_PLUS)) {
3491 u32 newbits3;
3493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3494 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3495 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3496 CLOCK_CTRL_TXCLK_DISABLE |
3497 CLOCK_CTRL_44MHZ_CORE);
3498 } else {
3499 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3502 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3503 tp->pci_clock_ctrl | newbits3, 40);
3507 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3508 tg3_power_down_phy(tp, do_low_power);
3510 tg3_frob_aux_power(tp, true);
3512 /* Workaround for unstable PLL clock */
3513 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3514 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3515 u32 val = tr32(0x7d00);
3517 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3518 tw32(0x7d00, val);
3519 if (!tg3_flag(tp, ENABLE_ASF)) {
3520 int err;
3522 err = tg3_nvram_lock(tp);
3523 tg3_halt_cpu(tp, RX_CPU_BASE);
3524 if (!err)
3525 tg3_nvram_unlock(tp);
3529 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3531 return 0;
3534 static void tg3_power_down(struct tg3 *tp)
3536 tg3_power_down_prepare(tp);
3538 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3539 pci_set_power_state(tp->pdev, PCI_D3hot);
3542 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3544 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3545 case MII_TG3_AUX_STAT_10HALF:
3546 *speed = SPEED_10;
3547 *duplex = DUPLEX_HALF;
3548 break;
3550 case MII_TG3_AUX_STAT_10FULL:
3551 *speed = SPEED_10;
3552 *duplex = DUPLEX_FULL;
3553 break;
3555 case MII_TG3_AUX_STAT_100HALF:
3556 *speed = SPEED_100;
3557 *duplex = DUPLEX_HALF;
3558 break;
3560 case MII_TG3_AUX_STAT_100FULL:
3561 *speed = SPEED_100;
3562 *duplex = DUPLEX_FULL;
3563 break;
3565 case MII_TG3_AUX_STAT_1000HALF:
3566 *speed = SPEED_1000;
3567 *duplex = DUPLEX_HALF;
3568 break;
3570 case MII_TG3_AUX_STAT_1000FULL:
3571 *speed = SPEED_1000;
3572 *duplex = DUPLEX_FULL;
3573 break;
3575 default:
3576 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3577 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3578 SPEED_10;
3579 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3580 DUPLEX_HALF;
3581 break;
3583 *speed = SPEED_INVALID;
3584 *duplex = DUPLEX_INVALID;
3585 break;
3589 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3591 int err = 0;
3592 u32 val, new_adv;
3594 new_adv = ADVERTISE_CSMA;
3595 if (advertise & ADVERTISED_10baseT_Half)
3596 new_adv |= ADVERTISE_10HALF;
3597 if (advertise & ADVERTISED_10baseT_Full)
3598 new_adv |= ADVERTISE_10FULL;
3599 if (advertise & ADVERTISED_100baseT_Half)
3600 new_adv |= ADVERTISE_100HALF;
3601 if (advertise & ADVERTISED_100baseT_Full)
3602 new_adv |= ADVERTISE_100FULL;
3604 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3606 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3607 if (err)
3608 goto done;
3610 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3611 goto done;
3613 new_adv = 0;
3614 if (advertise & ADVERTISED_1000baseT_Half)
3615 new_adv |= ADVERTISE_1000HALF;
3616 if (advertise & ADVERTISED_1000baseT_Full)
3617 new_adv |= ADVERTISE_1000FULL;
3619 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3620 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3621 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3623 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3624 if (err)
3625 goto done;
3627 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3628 goto done;
3630 tw32(TG3_CPMU_EEE_MODE,
3631 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3633 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3634 if (!err) {
3635 u32 err2;
3637 val = 0;
3638 /* Advertise 100-BaseTX EEE ability */
3639 if (advertise & ADVERTISED_100baseT_Full)
3640 val |= MDIO_AN_EEE_ADV_100TX;
3641 /* Advertise 1000-BaseT EEE ability */
3642 if (advertise & ADVERTISED_1000baseT_Full)
3643 val |= MDIO_AN_EEE_ADV_1000T;
3644 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3645 if (err)
3646 val = 0;
3648 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3649 case ASIC_REV_5717:
3650 case ASIC_REV_57765:
3651 case ASIC_REV_5719:
3652 /* If we advertised any eee advertisements above... */
3653 if (val)
3654 val = MII_TG3_DSP_TAP26_ALNOKO |
3655 MII_TG3_DSP_TAP26_RMRXSTO |
3656 MII_TG3_DSP_TAP26_OPCSINPT;
3657 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3658 /* Fall through */
3659 case ASIC_REV_5720:
3660 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3661 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3662 MII_TG3_DSP_CH34TP2_HIBW01);
3665 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3666 if (!err)
3667 err = err2;
3670 done:
3671 return err;
3674 static void tg3_phy_copper_begin(struct tg3 *tp)
3676 u32 new_adv;
3677 int i;
3679 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3680 new_adv = ADVERTISED_10baseT_Half |
3681 ADVERTISED_10baseT_Full;
3682 if (tg3_flag(tp, WOL_SPEED_100MB))
3683 new_adv |= ADVERTISED_100baseT_Half |
3684 ADVERTISED_100baseT_Full;
3686 tg3_phy_autoneg_cfg(tp, new_adv,
3687 FLOW_CTRL_TX | FLOW_CTRL_RX);
3688 } else if (tp->link_config.speed == SPEED_INVALID) {
3689 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3690 tp->link_config.advertising &=
3691 ~(ADVERTISED_1000baseT_Half |
3692 ADVERTISED_1000baseT_Full);
3694 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3695 tp->link_config.flowctrl);
3696 } else {
3697 /* Asking for a specific link mode. */
3698 if (tp->link_config.speed == SPEED_1000) {
3699 if (tp->link_config.duplex == DUPLEX_FULL)
3700 new_adv = ADVERTISED_1000baseT_Full;
3701 else
3702 new_adv = ADVERTISED_1000baseT_Half;
3703 } else if (tp->link_config.speed == SPEED_100) {
3704 if (tp->link_config.duplex == DUPLEX_FULL)
3705 new_adv = ADVERTISED_100baseT_Full;
3706 else
3707 new_adv = ADVERTISED_100baseT_Half;
3708 } else {
3709 if (tp->link_config.duplex == DUPLEX_FULL)
3710 new_adv = ADVERTISED_10baseT_Full;
3711 else
3712 new_adv = ADVERTISED_10baseT_Half;
3715 tg3_phy_autoneg_cfg(tp, new_adv,
3716 tp->link_config.flowctrl);
3719 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3720 tp->link_config.speed != SPEED_INVALID) {
3721 u32 bmcr, orig_bmcr;
3723 tp->link_config.active_speed = tp->link_config.speed;
3724 tp->link_config.active_duplex = tp->link_config.duplex;
3726 bmcr = 0;
3727 switch (tp->link_config.speed) {
3728 default:
3729 case SPEED_10:
3730 break;
3732 case SPEED_100:
3733 bmcr |= BMCR_SPEED100;
3734 break;
3736 case SPEED_1000:
3737 bmcr |= BMCR_SPEED1000;
3738 break;
3741 if (tp->link_config.duplex == DUPLEX_FULL)
3742 bmcr |= BMCR_FULLDPLX;
3744 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3745 (bmcr != orig_bmcr)) {
3746 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3747 for (i = 0; i < 1500; i++) {
3748 u32 tmp;
3750 udelay(10);
3751 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3752 tg3_readphy(tp, MII_BMSR, &tmp))
3753 continue;
3754 if (!(tmp & BMSR_LSTATUS)) {
3755 udelay(40);
3756 break;
3759 tg3_writephy(tp, MII_BMCR, bmcr);
3760 udelay(40);
3762 } else {
3763 tg3_writephy(tp, MII_BMCR,
3764 BMCR_ANENABLE | BMCR_ANRESTART);
3768 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3770 int err;
3772 /* Turn off tap power management. */
3773 /* Set Extended packet length bit */
3774 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3776 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3777 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3778 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3779 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3780 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3782 udelay(40);
3784 return err;
3787 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3789 u32 adv_reg, all_mask = 0;
3791 if (mask & ADVERTISED_10baseT_Half)
3792 all_mask |= ADVERTISE_10HALF;
3793 if (mask & ADVERTISED_10baseT_Full)
3794 all_mask |= ADVERTISE_10FULL;
3795 if (mask & ADVERTISED_100baseT_Half)
3796 all_mask |= ADVERTISE_100HALF;
3797 if (mask & ADVERTISED_100baseT_Full)
3798 all_mask |= ADVERTISE_100FULL;
3800 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3801 return 0;
3803 if ((adv_reg & ADVERTISE_ALL) != all_mask)
3804 return 0;
3806 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3807 u32 tg3_ctrl;
3809 all_mask = 0;
3810 if (mask & ADVERTISED_1000baseT_Half)
3811 all_mask |= ADVERTISE_1000HALF;
3812 if (mask & ADVERTISED_1000baseT_Full)
3813 all_mask |= ADVERTISE_1000FULL;
3815 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3816 return 0;
3818 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3819 if (tg3_ctrl != all_mask)
3820 return 0;
3823 return 1;
3826 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3828 u32 curadv, reqadv;
3830 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3831 return 1;
3833 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3834 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3836 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3837 if (curadv != reqadv)
3838 return 0;
3840 if (tg3_flag(tp, PAUSE_AUTONEG))
3841 tg3_readphy(tp, MII_LPA, rmtadv);
3842 } else {
3843 /* Reprogram the advertisement register, even if it
3844 * does not affect the current link. If the link
3845 * gets renegotiated in the future, we can save an
3846 * additional renegotiation cycle by advertising
3847 * it correctly in the first place.
3849 if (curadv != reqadv) {
3850 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3851 ADVERTISE_PAUSE_ASYM);
3852 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3856 return 1;
3859 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3861 int current_link_up;
3862 u32 bmsr, val;
3863 u32 lcl_adv, rmt_adv;
3864 u16 current_speed;
3865 u8 current_duplex;
3866 int i, err;
3868 tw32(MAC_EVENT, 0);
3870 tw32_f(MAC_STATUS,
3871 (MAC_STATUS_SYNC_CHANGED |
3872 MAC_STATUS_CFG_CHANGED |
3873 MAC_STATUS_MI_COMPLETION |
3874 MAC_STATUS_LNKSTATE_CHANGED));
3875 udelay(40);
3877 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3878 tw32_f(MAC_MI_MODE,
3879 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3880 udelay(80);
3883 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3885 /* Some third-party PHYs need to be reset on link going
3886 * down.
3888 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3891 netif_carrier_ok(tp->dev)) {
3892 tg3_readphy(tp, MII_BMSR, &bmsr);
3893 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3894 !(bmsr & BMSR_LSTATUS))
3895 force_reset = 1;
3897 if (force_reset)
3898 tg3_phy_reset(tp);
3900 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3901 tg3_readphy(tp, MII_BMSR, &bmsr);
3902 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3903 !tg3_flag(tp, INIT_COMPLETE))
3904 bmsr = 0;
3906 if (!(bmsr & BMSR_LSTATUS)) {
3907 err = tg3_init_5401phy_dsp(tp);
3908 if (err)
3909 return err;
3911 tg3_readphy(tp, MII_BMSR, &bmsr);
3912 for (i = 0; i < 1000; i++) {
3913 udelay(10);
3914 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3915 (bmsr & BMSR_LSTATUS)) {
3916 udelay(40);
3917 break;
3921 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3922 TG3_PHY_REV_BCM5401_B0 &&
3923 !(bmsr & BMSR_LSTATUS) &&
3924 tp->link_config.active_speed == SPEED_1000) {
3925 err = tg3_phy_reset(tp);
3926 if (!err)
3927 err = tg3_init_5401phy_dsp(tp);
3928 if (err)
3929 return err;
3932 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3933 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3934 /* 5701 {A0,B0} CRC bug workaround */
3935 tg3_writephy(tp, 0x15, 0x0a75);
3936 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3937 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3938 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3941 /* Clear pending interrupts... */
3942 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3943 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3945 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3946 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3947 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3948 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3952 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3953 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3954 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3955 else
3956 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3959 current_link_up = 0;
3960 current_speed = SPEED_INVALID;
3961 current_duplex = DUPLEX_INVALID;
3963 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3964 err = tg3_phy_auxctl_read(tp,
3965 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3966 &val);
3967 if (!err && !(val & (1 << 10))) {
3968 tg3_phy_auxctl_write(tp,
3969 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3970 val | (1 << 10));
3971 goto relink;
3975 bmsr = 0;
3976 for (i = 0; i < 100; i++) {
3977 tg3_readphy(tp, MII_BMSR, &bmsr);
3978 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3979 (bmsr & BMSR_LSTATUS))
3980 break;
3981 udelay(40);
3984 if (bmsr & BMSR_LSTATUS) {
3985 u32 aux_stat, bmcr;
3987 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3988 for (i = 0; i < 2000; i++) {
3989 udelay(10);
3990 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3991 aux_stat)
3992 break;
3995 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3996 &current_speed,
3997 &current_duplex);
3999 bmcr = 0;
4000 for (i = 0; i < 200; i++) {
4001 tg3_readphy(tp, MII_BMCR, &bmcr);
4002 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4003 continue;
4004 if (bmcr && bmcr != 0x7fff)
4005 break;
4006 udelay(10);
4009 lcl_adv = 0;
4010 rmt_adv = 0;
4012 tp->link_config.active_speed = current_speed;
4013 tp->link_config.active_duplex = current_duplex;
4015 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4016 if ((bmcr & BMCR_ANENABLE) &&
4017 tg3_copper_is_advertising_all(tp,
4018 tp->link_config.advertising)) {
4019 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4020 &rmt_adv))
4021 current_link_up = 1;
4023 } else {
4024 if (!(bmcr & BMCR_ANENABLE) &&
4025 tp->link_config.speed == current_speed &&
4026 tp->link_config.duplex == current_duplex &&
4027 tp->link_config.flowctrl ==
4028 tp->link_config.active_flowctrl) {
4029 current_link_up = 1;
4033 if (current_link_up == 1 &&
4034 tp->link_config.active_duplex == DUPLEX_FULL)
4035 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4038 relink:
4039 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4040 tg3_phy_copper_begin(tp);
4042 tg3_readphy(tp, MII_BMSR, &bmsr);
4043 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4044 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4045 current_link_up = 1;
4048 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4049 if (current_link_up == 1) {
4050 if (tp->link_config.active_speed == SPEED_100 ||
4051 tp->link_config.active_speed == SPEED_10)
4052 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4053 else
4054 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4055 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4056 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4057 else
4058 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4060 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4061 if (tp->link_config.active_duplex == DUPLEX_HALF)
4062 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4064 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4065 if (current_link_up == 1 &&
4066 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4067 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4068 else
4069 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4072 /* ??? Without this setting Netgear GA302T PHY does not
4073 * ??? send/receive packets...
4075 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4076 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4077 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4078 tw32_f(MAC_MI_MODE, tp->mi_mode);
4079 udelay(80);
4082 tw32_f(MAC_MODE, tp->mac_mode);
4083 udelay(40);
4085 tg3_phy_eee_adjust(tp, current_link_up);
4087 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4088 /* Polled via timer. */
4089 tw32_f(MAC_EVENT, 0);
4090 } else {
4091 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4093 udelay(40);
4095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4096 current_link_up == 1 &&
4097 tp->link_config.active_speed == SPEED_1000 &&
4098 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4099 udelay(120);
4100 tw32_f(MAC_STATUS,
4101 (MAC_STATUS_SYNC_CHANGED |
4102 MAC_STATUS_CFG_CHANGED));
4103 udelay(40);
4104 tg3_write_mem(tp,
4105 NIC_SRAM_FIRMWARE_MBOX,
4106 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4109 /* Prevent send BD corruption. */
4110 if (tg3_flag(tp, CLKREQ_BUG)) {
4111 u16 oldlnkctl, newlnkctl;
4113 pci_read_config_word(tp->pdev,
4114 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4115 &oldlnkctl);
4116 if (tp->link_config.active_speed == SPEED_100 ||
4117 tp->link_config.active_speed == SPEED_10)
4118 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4119 else
4120 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4121 if (newlnkctl != oldlnkctl)
4122 pci_write_config_word(tp->pdev,
4123 pci_pcie_cap(tp->pdev) +
4124 PCI_EXP_LNKCTL, newlnkctl);
4127 if (current_link_up != netif_carrier_ok(tp->dev)) {
4128 if (current_link_up)
4129 netif_carrier_on(tp->dev);
4130 else
4131 netif_carrier_off(tp->dev);
4132 tg3_link_report(tp);
4135 return 0;
4138 struct tg3_fiber_aneginfo {
4139 int state;
4140 #define ANEG_STATE_UNKNOWN 0
4141 #define ANEG_STATE_AN_ENABLE 1
4142 #define ANEG_STATE_RESTART_INIT 2
4143 #define ANEG_STATE_RESTART 3
4144 #define ANEG_STATE_DISABLE_LINK_OK 4
4145 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4146 #define ANEG_STATE_ABILITY_DETECT 6
4147 #define ANEG_STATE_ACK_DETECT_INIT 7
4148 #define ANEG_STATE_ACK_DETECT 8
4149 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4150 #define ANEG_STATE_COMPLETE_ACK 10
4151 #define ANEG_STATE_IDLE_DETECT_INIT 11
4152 #define ANEG_STATE_IDLE_DETECT 12
4153 #define ANEG_STATE_LINK_OK 13
4154 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4155 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4157 u32 flags;
4158 #define MR_AN_ENABLE 0x00000001
4159 #define MR_RESTART_AN 0x00000002
4160 #define MR_AN_COMPLETE 0x00000004
4161 #define MR_PAGE_RX 0x00000008
4162 #define MR_NP_LOADED 0x00000010
4163 #define MR_TOGGLE_TX 0x00000020
4164 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4165 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4166 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4167 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4168 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4169 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4170 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4171 #define MR_TOGGLE_RX 0x00002000
4172 #define MR_NP_RX 0x00004000
4174 #define MR_LINK_OK 0x80000000
4176 unsigned long link_time, cur_time;
4178 u32 ability_match_cfg;
4179 int ability_match_count;
4181 char ability_match, idle_match, ack_match;
4183 u32 txconfig, rxconfig;
4184 #define ANEG_CFG_NP 0x00000080
4185 #define ANEG_CFG_ACK 0x00000040
4186 #define ANEG_CFG_RF2 0x00000020
4187 #define ANEG_CFG_RF1 0x00000010
4188 #define ANEG_CFG_PS2 0x00000001
4189 #define ANEG_CFG_PS1 0x00008000
4190 #define ANEG_CFG_HD 0x00004000
4191 #define ANEG_CFG_FD 0x00002000
4192 #define ANEG_CFG_INVAL 0x00001f06
4195 #define ANEG_OK 0
4196 #define ANEG_DONE 1
4197 #define ANEG_TIMER_ENAB 2
4198 #define ANEG_FAILED -1
4200 #define ANEG_STATE_SETTLE_TIME 10000
4202 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4203 struct tg3_fiber_aneginfo *ap)
4205 u16 flowctrl;
4206 unsigned long delta;
4207 u32 rx_cfg_reg;
4208 int ret;
4210 if (ap->state == ANEG_STATE_UNKNOWN) {
4211 ap->rxconfig = 0;
4212 ap->link_time = 0;
4213 ap->cur_time = 0;
4214 ap->ability_match_cfg = 0;
4215 ap->ability_match_count = 0;
4216 ap->ability_match = 0;
4217 ap->idle_match = 0;
4218 ap->ack_match = 0;
4220 ap->cur_time++;
4222 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4223 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4225 if (rx_cfg_reg != ap->ability_match_cfg) {
4226 ap->ability_match_cfg = rx_cfg_reg;
4227 ap->ability_match = 0;
4228 ap->ability_match_count = 0;
4229 } else {
4230 if (++ap->ability_match_count > 1) {
4231 ap->ability_match = 1;
4232 ap->ability_match_cfg = rx_cfg_reg;
4235 if (rx_cfg_reg & ANEG_CFG_ACK)
4236 ap->ack_match = 1;
4237 else
4238 ap->ack_match = 0;
4240 ap->idle_match = 0;
4241 } else {
4242 ap->idle_match = 1;
4243 ap->ability_match_cfg = 0;
4244 ap->ability_match_count = 0;
4245 ap->ability_match = 0;
4246 ap->ack_match = 0;
4248 rx_cfg_reg = 0;
4251 ap->rxconfig = rx_cfg_reg;
4252 ret = ANEG_OK;
4254 switch (ap->state) {
4255 case ANEG_STATE_UNKNOWN:
4256 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4257 ap->state = ANEG_STATE_AN_ENABLE;
4259 /* fallthru */
4260 case ANEG_STATE_AN_ENABLE:
4261 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4262 if (ap->flags & MR_AN_ENABLE) {
4263 ap->link_time = 0;
4264 ap->cur_time = 0;
4265 ap->ability_match_cfg = 0;
4266 ap->ability_match_count = 0;
4267 ap->ability_match = 0;
4268 ap->idle_match = 0;
4269 ap->ack_match = 0;
4271 ap->state = ANEG_STATE_RESTART_INIT;
4272 } else {
4273 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4275 break;
4277 case ANEG_STATE_RESTART_INIT:
4278 ap->link_time = ap->cur_time;
4279 ap->flags &= ~(MR_NP_LOADED);
4280 ap->txconfig = 0;
4281 tw32(MAC_TX_AUTO_NEG, 0);
4282 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4283 tw32_f(MAC_MODE, tp->mac_mode);
4284 udelay(40);
4286 ret = ANEG_TIMER_ENAB;
4287 ap->state = ANEG_STATE_RESTART;
4289 /* fallthru */
4290 case ANEG_STATE_RESTART:
4291 delta = ap->cur_time - ap->link_time;
4292 if (delta > ANEG_STATE_SETTLE_TIME)
4293 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4294 else
4295 ret = ANEG_TIMER_ENAB;
4296 break;
4298 case ANEG_STATE_DISABLE_LINK_OK:
4299 ret = ANEG_DONE;
4300 break;
4302 case ANEG_STATE_ABILITY_DETECT_INIT:
4303 ap->flags &= ~(MR_TOGGLE_TX);
4304 ap->txconfig = ANEG_CFG_FD;
4305 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4306 if (flowctrl & ADVERTISE_1000XPAUSE)
4307 ap->txconfig |= ANEG_CFG_PS1;
4308 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4309 ap->txconfig |= ANEG_CFG_PS2;
4310 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4311 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4312 tw32_f(MAC_MODE, tp->mac_mode);
4313 udelay(40);
4315 ap->state = ANEG_STATE_ABILITY_DETECT;
4316 break;
4318 case ANEG_STATE_ABILITY_DETECT:
4319 if (ap->ability_match != 0 && ap->rxconfig != 0)
4320 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4321 break;
4323 case ANEG_STATE_ACK_DETECT_INIT:
4324 ap->txconfig |= ANEG_CFG_ACK;
4325 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4326 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4327 tw32_f(MAC_MODE, tp->mac_mode);
4328 udelay(40);
4330 ap->state = ANEG_STATE_ACK_DETECT;
4332 /* fallthru */
4333 case ANEG_STATE_ACK_DETECT:
4334 if (ap->ack_match != 0) {
4335 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4336 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4337 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4338 } else {
4339 ap->state = ANEG_STATE_AN_ENABLE;
4341 } else if (ap->ability_match != 0 &&
4342 ap->rxconfig == 0) {
4343 ap->state = ANEG_STATE_AN_ENABLE;
4345 break;
4347 case ANEG_STATE_COMPLETE_ACK_INIT:
4348 if (ap->rxconfig & ANEG_CFG_INVAL) {
4349 ret = ANEG_FAILED;
4350 break;
4352 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4353 MR_LP_ADV_HALF_DUPLEX |
4354 MR_LP_ADV_SYM_PAUSE |
4355 MR_LP_ADV_ASYM_PAUSE |
4356 MR_LP_ADV_REMOTE_FAULT1 |
4357 MR_LP_ADV_REMOTE_FAULT2 |
4358 MR_LP_ADV_NEXT_PAGE |
4359 MR_TOGGLE_RX |
4360 MR_NP_RX);
4361 if (ap->rxconfig & ANEG_CFG_FD)
4362 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4363 if (ap->rxconfig & ANEG_CFG_HD)
4364 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4365 if (ap->rxconfig & ANEG_CFG_PS1)
4366 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4367 if (ap->rxconfig & ANEG_CFG_PS2)
4368 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4369 if (ap->rxconfig & ANEG_CFG_RF1)
4370 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4371 if (ap->rxconfig & ANEG_CFG_RF2)
4372 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4373 if (ap->rxconfig & ANEG_CFG_NP)
4374 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4376 ap->link_time = ap->cur_time;
4378 ap->flags ^= (MR_TOGGLE_TX);
4379 if (ap->rxconfig & 0x0008)
4380 ap->flags |= MR_TOGGLE_RX;
4381 if (ap->rxconfig & ANEG_CFG_NP)
4382 ap->flags |= MR_NP_RX;
4383 ap->flags |= MR_PAGE_RX;
4385 ap->state = ANEG_STATE_COMPLETE_ACK;
4386 ret = ANEG_TIMER_ENAB;
4387 break;
4389 case ANEG_STATE_COMPLETE_ACK:
4390 if (ap->ability_match != 0 &&
4391 ap->rxconfig == 0) {
4392 ap->state = ANEG_STATE_AN_ENABLE;
4393 break;
4395 delta = ap->cur_time - ap->link_time;
4396 if (delta > ANEG_STATE_SETTLE_TIME) {
4397 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4398 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4399 } else {
4400 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4401 !(ap->flags & MR_NP_RX)) {
4402 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4403 } else {
4404 ret = ANEG_FAILED;
4408 break;
4410 case ANEG_STATE_IDLE_DETECT_INIT:
4411 ap->link_time = ap->cur_time;
4412 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4413 tw32_f(MAC_MODE, tp->mac_mode);
4414 udelay(40);
4416 ap->state = ANEG_STATE_IDLE_DETECT;
4417 ret = ANEG_TIMER_ENAB;
4418 break;
4420 case ANEG_STATE_IDLE_DETECT:
4421 if (ap->ability_match != 0 &&
4422 ap->rxconfig == 0) {
4423 ap->state = ANEG_STATE_AN_ENABLE;
4424 break;
4426 delta = ap->cur_time - ap->link_time;
4427 if (delta > ANEG_STATE_SETTLE_TIME) {
4428 /* XXX another gem from the Broadcom driver :( */
4429 ap->state = ANEG_STATE_LINK_OK;
4431 break;
4433 case ANEG_STATE_LINK_OK:
4434 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4435 ret = ANEG_DONE;
4436 break;
4438 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4439 /* ??? unimplemented */
4440 break;
4442 case ANEG_STATE_NEXT_PAGE_WAIT:
4443 /* ??? unimplemented */
4444 break;
4446 default:
4447 ret = ANEG_FAILED;
4448 break;
4451 return ret;
4454 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4456 int res = 0;
4457 struct tg3_fiber_aneginfo aninfo;
4458 int status = ANEG_FAILED;
4459 unsigned int tick;
4460 u32 tmp;
4462 tw32_f(MAC_TX_AUTO_NEG, 0);
4464 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4465 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4466 udelay(40);
4468 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4469 udelay(40);
4471 memset(&aninfo, 0, sizeof(aninfo));
4472 aninfo.flags |= MR_AN_ENABLE;
4473 aninfo.state = ANEG_STATE_UNKNOWN;
4474 aninfo.cur_time = 0;
4475 tick = 0;
4476 while (++tick < 195000) {
4477 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4478 if (status == ANEG_DONE || status == ANEG_FAILED)
4479 break;
4481 udelay(1);
4484 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4485 tw32_f(MAC_MODE, tp->mac_mode);
4486 udelay(40);
4488 *txflags = aninfo.txconfig;
4489 *rxflags = aninfo.flags;
4491 if (status == ANEG_DONE &&
4492 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4493 MR_LP_ADV_FULL_DUPLEX)))
4494 res = 1;
4496 return res;
4499 static void tg3_init_bcm8002(struct tg3 *tp)
4501 u32 mac_status = tr32(MAC_STATUS);
4502 int i;
4504 /* Reset when initting first time or we have a link. */
4505 if (tg3_flag(tp, INIT_COMPLETE) &&
4506 !(mac_status & MAC_STATUS_PCS_SYNCED))
4507 return;
4509 /* Set PLL lock range. */
4510 tg3_writephy(tp, 0x16, 0x8007);
4512 /* SW reset */
4513 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4515 /* Wait for reset to complete. */
4516 /* XXX schedule_timeout() ... */
4517 for (i = 0; i < 500; i++)
4518 udelay(10);
4520 /* Config mode; select PMA/Ch 1 regs. */
4521 tg3_writephy(tp, 0x10, 0x8411);
4523 /* Enable auto-lock and comdet, select txclk for tx. */
4524 tg3_writephy(tp, 0x11, 0x0a10);
4526 tg3_writephy(tp, 0x18, 0x00a0);
4527 tg3_writephy(tp, 0x16, 0x41ff);
4529 /* Assert and deassert POR. */
4530 tg3_writephy(tp, 0x13, 0x0400);
4531 udelay(40);
4532 tg3_writephy(tp, 0x13, 0x0000);
4534 tg3_writephy(tp, 0x11, 0x0a50);
4535 udelay(40);
4536 tg3_writephy(tp, 0x11, 0x0a10);
4538 /* Wait for signal to stabilize */
4539 /* XXX schedule_timeout() ... */
4540 for (i = 0; i < 15000; i++)
4541 udelay(10);
4543 /* Deselect the channel register so we can read the PHYID
4544 * later.
4546 tg3_writephy(tp, 0x10, 0x8011);
4549 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4551 u16 flowctrl;
4552 u32 sg_dig_ctrl, sg_dig_status;
4553 u32 serdes_cfg, expected_sg_dig_ctrl;
4554 int workaround, port_a;
4555 int current_link_up;
4557 serdes_cfg = 0;
4558 expected_sg_dig_ctrl = 0;
4559 workaround = 0;
4560 port_a = 1;
4561 current_link_up = 0;
4563 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4564 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4565 workaround = 1;
4566 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4567 port_a = 0;
4569 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4570 /* preserve bits 20-23 for voltage regulator */
4571 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4574 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4576 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4577 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4578 if (workaround) {
4579 u32 val = serdes_cfg;
4581 if (port_a)
4582 val |= 0xc010000;
4583 else
4584 val |= 0x4010000;
4585 tw32_f(MAC_SERDES_CFG, val);
4588 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4590 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4591 tg3_setup_flow_control(tp, 0, 0);
4592 current_link_up = 1;
4594 goto out;
4597 /* Want auto-negotiation. */
4598 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4600 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4601 if (flowctrl & ADVERTISE_1000XPAUSE)
4602 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4603 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4604 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4606 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4607 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4608 tp->serdes_counter &&
4609 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4610 MAC_STATUS_RCVD_CFG)) ==
4611 MAC_STATUS_PCS_SYNCED)) {
4612 tp->serdes_counter--;
4613 current_link_up = 1;
4614 goto out;
4616 restart_autoneg:
4617 if (workaround)
4618 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4619 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4620 udelay(5);
4621 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4623 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4624 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4625 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4626 MAC_STATUS_SIGNAL_DET)) {
4627 sg_dig_status = tr32(SG_DIG_STATUS);
4628 mac_status = tr32(MAC_STATUS);
4630 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4631 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4632 u32 local_adv = 0, remote_adv = 0;
4634 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4635 local_adv |= ADVERTISE_1000XPAUSE;
4636 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4637 local_adv |= ADVERTISE_1000XPSE_ASYM;
4639 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4640 remote_adv |= LPA_1000XPAUSE;
4641 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4642 remote_adv |= LPA_1000XPAUSE_ASYM;
4644 tg3_setup_flow_control(tp, local_adv, remote_adv);
4645 current_link_up = 1;
4646 tp->serdes_counter = 0;
4647 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4648 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4649 if (tp->serdes_counter)
4650 tp->serdes_counter--;
4651 else {
4652 if (workaround) {
4653 u32 val = serdes_cfg;
4655 if (port_a)
4656 val |= 0xc010000;
4657 else
4658 val |= 0x4010000;
4660 tw32_f(MAC_SERDES_CFG, val);
4663 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4664 udelay(40);
4666 /* Link parallel detection - link is up */
4667 /* only if we have PCS_SYNC and not */
4668 /* receiving config code words */
4669 mac_status = tr32(MAC_STATUS);
4670 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4671 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4672 tg3_setup_flow_control(tp, 0, 0);
4673 current_link_up = 1;
4674 tp->phy_flags |=
4675 TG3_PHYFLG_PARALLEL_DETECT;
4676 tp->serdes_counter =
4677 SERDES_PARALLEL_DET_TIMEOUT;
4678 } else
4679 goto restart_autoneg;
4682 } else {
4683 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4684 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4687 out:
4688 return current_link_up;
4691 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4693 int current_link_up = 0;
4695 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4696 goto out;
4698 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4699 u32 txflags, rxflags;
4700 int i;
4702 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4703 u32 local_adv = 0, remote_adv = 0;
4705 if (txflags & ANEG_CFG_PS1)
4706 local_adv |= ADVERTISE_1000XPAUSE;
4707 if (txflags & ANEG_CFG_PS2)
4708 local_adv |= ADVERTISE_1000XPSE_ASYM;
4710 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4711 remote_adv |= LPA_1000XPAUSE;
4712 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4713 remote_adv |= LPA_1000XPAUSE_ASYM;
4715 tg3_setup_flow_control(tp, local_adv, remote_adv);
4717 current_link_up = 1;
4719 for (i = 0; i < 30; i++) {
4720 udelay(20);
4721 tw32_f(MAC_STATUS,
4722 (MAC_STATUS_SYNC_CHANGED |
4723 MAC_STATUS_CFG_CHANGED));
4724 udelay(40);
4725 if ((tr32(MAC_STATUS) &
4726 (MAC_STATUS_SYNC_CHANGED |
4727 MAC_STATUS_CFG_CHANGED)) == 0)
4728 break;
4731 mac_status = tr32(MAC_STATUS);
4732 if (current_link_up == 0 &&
4733 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4734 !(mac_status & MAC_STATUS_RCVD_CFG))
4735 current_link_up = 1;
4736 } else {
4737 tg3_setup_flow_control(tp, 0, 0);
4739 /* Forcing 1000FD link up. */
4740 current_link_up = 1;
4742 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4743 udelay(40);
4745 tw32_f(MAC_MODE, tp->mac_mode);
4746 udelay(40);
4749 out:
4750 return current_link_up;
4753 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4755 u32 orig_pause_cfg;
4756 u16 orig_active_speed;
4757 u8 orig_active_duplex;
4758 u32 mac_status;
4759 int current_link_up;
4760 int i;
4762 orig_pause_cfg = tp->link_config.active_flowctrl;
4763 orig_active_speed = tp->link_config.active_speed;
4764 orig_active_duplex = tp->link_config.active_duplex;
4766 if (!tg3_flag(tp, HW_AUTONEG) &&
4767 netif_carrier_ok(tp->dev) &&
4768 tg3_flag(tp, INIT_COMPLETE)) {
4769 mac_status = tr32(MAC_STATUS);
4770 mac_status &= (MAC_STATUS_PCS_SYNCED |
4771 MAC_STATUS_SIGNAL_DET |
4772 MAC_STATUS_CFG_CHANGED |
4773 MAC_STATUS_RCVD_CFG);
4774 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4775 MAC_STATUS_SIGNAL_DET)) {
4776 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4777 MAC_STATUS_CFG_CHANGED));
4778 return 0;
4782 tw32_f(MAC_TX_AUTO_NEG, 0);
4784 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4785 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4786 tw32_f(MAC_MODE, tp->mac_mode);
4787 udelay(40);
4789 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4790 tg3_init_bcm8002(tp);
4792 /* Enable link change event even when serdes polling. */
4793 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4794 udelay(40);
4796 current_link_up = 0;
4797 mac_status = tr32(MAC_STATUS);
4799 if (tg3_flag(tp, HW_AUTONEG))
4800 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4801 else
4802 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4804 tp->napi[0].hw_status->status =
4805 (SD_STATUS_UPDATED |
4806 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4808 for (i = 0; i < 100; i++) {
4809 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4810 MAC_STATUS_CFG_CHANGED));
4811 udelay(5);
4812 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4813 MAC_STATUS_CFG_CHANGED |
4814 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4815 break;
4818 mac_status = tr32(MAC_STATUS);
4819 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4820 current_link_up = 0;
4821 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4822 tp->serdes_counter == 0) {
4823 tw32_f(MAC_MODE, (tp->mac_mode |
4824 MAC_MODE_SEND_CONFIGS));
4825 udelay(1);
4826 tw32_f(MAC_MODE, tp->mac_mode);
4830 if (current_link_up == 1) {
4831 tp->link_config.active_speed = SPEED_1000;
4832 tp->link_config.active_duplex = DUPLEX_FULL;
4833 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4834 LED_CTRL_LNKLED_OVERRIDE |
4835 LED_CTRL_1000MBPS_ON));
4836 } else {
4837 tp->link_config.active_speed = SPEED_INVALID;
4838 tp->link_config.active_duplex = DUPLEX_INVALID;
4839 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4840 LED_CTRL_LNKLED_OVERRIDE |
4841 LED_CTRL_TRAFFIC_OVERRIDE));
4844 if (current_link_up != netif_carrier_ok(tp->dev)) {
4845 if (current_link_up)
4846 netif_carrier_on(tp->dev);
4847 else
4848 netif_carrier_off(tp->dev);
4849 tg3_link_report(tp);
4850 } else {
4851 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4852 if (orig_pause_cfg != now_pause_cfg ||
4853 orig_active_speed != tp->link_config.active_speed ||
4854 orig_active_duplex != tp->link_config.active_duplex)
4855 tg3_link_report(tp);
4858 return 0;
4861 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4863 int current_link_up, err = 0;
4864 u32 bmsr, bmcr;
4865 u16 current_speed;
4866 u8 current_duplex;
4867 u32 local_adv, remote_adv;
4869 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4870 tw32_f(MAC_MODE, tp->mac_mode);
4871 udelay(40);
4873 tw32(MAC_EVENT, 0);
4875 tw32_f(MAC_STATUS,
4876 (MAC_STATUS_SYNC_CHANGED |
4877 MAC_STATUS_CFG_CHANGED |
4878 MAC_STATUS_MI_COMPLETION |
4879 MAC_STATUS_LNKSTATE_CHANGED));
4880 udelay(40);
4882 if (force_reset)
4883 tg3_phy_reset(tp);
4885 current_link_up = 0;
4886 current_speed = SPEED_INVALID;
4887 current_duplex = DUPLEX_INVALID;
4889 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4890 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4892 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4893 bmsr |= BMSR_LSTATUS;
4894 else
4895 bmsr &= ~BMSR_LSTATUS;
4898 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4900 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4901 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4902 /* do nothing, just check for link up at the end */
4903 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4904 u32 adv, new_adv;
4906 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4907 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4908 ADVERTISE_1000XPAUSE |
4909 ADVERTISE_1000XPSE_ASYM |
4910 ADVERTISE_SLCT);
4912 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4914 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4915 new_adv |= ADVERTISE_1000XHALF;
4916 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4917 new_adv |= ADVERTISE_1000XFULL;
4919 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4920 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4921 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4922 tg3_writephy(tp, MII_BMCR, bmcr);
4924 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4925 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4926 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4928 return err;
4930 } else {
4931 u32 new_bmcr;
4933 bmcr &= ~BMCR_SPEED1000;
4934 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4936 if (tp->link_config.duplex == DUPLEX_FULL)
4937 new_bmcr |= BMCR_FULLDPLX;
4939 if (new_bmcr != bmcr) {
4940 /* BMCR_SPEED1000 is a reserved bit that needs
4941 * to be set on write.
4943 new_bmcr |= BMCR_SPEED1000;
4945 /* Force a linkdown */
4946 if (netif_carrier_ok(tp->dev)) {
4947 u32 adv;
4949 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4950 adv &= ~(ADVERTISE_1000XFULL |
4951 ADVERTISE_1000XHALF |
4952 ADVERTISE_SLCT);
4953 tg3_writephy(tp, MII_ADVERTISE, adv);
4954 tg3_writephy(tp, MII_BMCR, bmcr |
4955 BMCR_ANRESTART |
4956 BMCR_ANENABLE);
4957 udelay(10);
4958 netif_carrier_off(tp->dev);
4960 tg3_writephy(tp, MII_BMCR, new_bmcr);
4961 bmcr = new_bmcr;
4962 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4963 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4964 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4965 ASIC_REV_5714) {
4966 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4967 bmsr |= BMSR_LSTATUS;
4968 else
4969 bmsr &= ~BMSR_LSTATUS;
4971 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4975 if (bmsr & BMSR_LSTATUS) {
4976 current_speed = SPEED_1000;
4977 current_link_up = 1;
4978 if (bmcr & BMCR_FULLDPLX)
4979 current_duplex = DUPLEX_FULL;
4980 else
4981 current_duplex = DUPLEX_HALF;
4983 local_adv = 0;
4984 remote_adv = 0;
4986 if (bmcr & BMCR_ANENABLE) {
4987 u32 common;
4989 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4990 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4991 common = local_adv & remote_adv;
4992 if (common & (ADVERTISE_1000XHALF |
4993 ADVERTISE_1000XFULL)) {
4994 if (common & ADVERTISE_1000XFULL)
4995 current_duplex = DUPLEX_FULL;
4996 else
4997 current_duplex = DUPLEX_HALF;
4998 } else if (!tg3_flag(tp, 5780_CLASS)) {
4999 /* Link is up via parallel detect */
5000 } else {
5001 current_link_up = 0;
5006 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5007 tg3_setup_flow_control(tp, local_adv, remote_adv);
5009 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5010 if (tp->link_config.active_duplex == DUPLEX_HALF)
5011 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5013 tw32_f(MAC_MODE, tp->mac_mode);
5014 udelay(40);
5016 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5018 tp->link_config.active_speed = current_speed;
5019 tp->link_config.active_duplex = current_duplex;
5021 if (current_link_up != netif_carrier_ok(tp->dev)) {
5022 if (current_link_up)
5023 netif_carrier_on(tp->dev);
5024 else {
5025 netif_carrier_off(tp->dev);
5026 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5028 tg3_link_report(tp);
5030 return err;
5033 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5035 if (tp->serdes_counter) {
5036 /* Give autoneg time to complete. */
5037 tp->serdes_counter--;
5038 return;
5041 if (!netif_carrier_ok(tp->dev) &&
5042 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5043 u32 bmcr;
5045 tg3_readphy(tp, MII_BMCR, &bmcr);
5046 if (bmcr & BMCR_ANENABLE) {
5047 u32 phy1, phy2;
5049 /* Select shadow register 0x1f */
5050 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5051 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5053 /* Select expansion interrupt status register */
5054 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5055 MII_TG3_DSP_EXP1_INT_STAT);
5056 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5057 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5059 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5060 /* We have signal detect and not receiving
5061 * config code words, link is up by parallel
5062 * detection.
5065 bmcr &= ~BMCR_ANENABLE;
5066 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5067 tg3_writephy(tp, MII_BMCR, bmcr);
5068 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5071 } else if (netif_carrier_ok(tp->dev) &&
5072 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5073 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5074 u32 phy2;
5076 /* Select expansion interrupt status register */
5077 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5078 MII_TG3_DSP_EXP1_INT_STAT);
5079 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5080 if (phy2 & 0x20) {
5081 u32 bmcr;
5083 /* Config code words received, turn on autoneg. */
5084 tg3_readphy(tp, MII_BMCR, &bmcr);
5085 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5087 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5093 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5095 u32 val;
5096 int err;
5098 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5099 err = tg3_setup_fiber_phy(tp, force_reset);
5100 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5101 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5102 else
5103 err = tg3_setup_copper_phy(tp, force_reset);
5105 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5106 u32 scale;
5108 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5109 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5110 scale = 65;
5111 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5112 scale = 6;
5113 else
5114 scale = 12;
5116 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5117 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5118 tw32(GRC_MISC_CFG, val);
5121 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5122 (6 << TX_LENGTHS_IPG_SHIFT);
5123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5124 val |= tr32(MAC_TX_LENGTHS) &
5125 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5126 TX_LENGTHS_CNT_DWN_VAL_MSK);
5128 if (tp->link_config.active_speed == SPEED_1000 &&
5129 tp->link_config.active_duplex == DUPLEX_HALF)
5130 tw32(MAC_TX_LENGTHS, val |
5131 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5132 else
5133 tw32(MAC_TX_LENGTHS, val |
5134 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5136 if (!tg3_flag(tp, 5705_PLUS)) {
5137 if (netif_carrier_ok(tp->dev)) {
5138 tw32(HOSTCC_STAT_COAL_TICKS,
5139 tp->coal.stats_block_coalesce_usecs);
5140 } else {
5141 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5145 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5146 val = tr32(PCIE_PWR_MGMT_THRESH);
5147 if (!netif_carrier_ok(tp->dev))
5148 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5149 tp->pwrmgmt_thresh;
5150 else
5151 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5152 tw32(PCIE_PWR_MGMT_THRESH, val);
5155 return err;
5158 static inline int tg3_irq_sync(struct tg3 *tp)
5160 return tp->irq_sync;
5163 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5165 int i;
5167 dst = (u32 *)((u8 *)dst + off);
5168 for (i = 0; i < len; i += sizeof(u32))
5169 *dst++ = tr32(off + i);
5172 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5174 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5175 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5176 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5177 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5178 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5179 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5180 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5181 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5182 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5183 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5184 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5185 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5186 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5187 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5188 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5189 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5190 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5191 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5192 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5194 if (tg3_flag(tp, SUPPORT_MSIX))
5195 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5197 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5198 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5199 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5200 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5201 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5202 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5203 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5204 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5206 if (!tg3_flag(tp, 5705_PLUS)) {
5207 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5208 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5209 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5212 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5213 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5214 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5215 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5216 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5218 if (tg3_flag(tp, NVRAM))
5219 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5222 static void tg3_dump_state(struct tg3 *tp)
5224 int i;
5225 u32 *regs;
5227 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5228 if (!regs) {
5229 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5230 return;
5233 if (tg3_flag(tp, PCI_EXPRESS)) {
5234 /* Read up to but not including private PCI registers */
5235 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5236 regs[i / sizeof(u32)] = tr32(i);
5237 } else
5238 tg3_dump_legacy_regs(tp, regs);
5240 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5241 if (!regs[i + 0] && !regs[i + 1] &&
5242 !regs[i + 2] && !regs[i + 3])
5243 continue;
5245 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5246 i * 4,
5247 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5250 kfree(regs);
5252 for (i = 0; i < tp->irq_cnt; i++) {
5253 struct tg3_napi *tnapi = &tp->napi[i];
5255 /* SW status block */
5256 netdev_err(tp->dev,
5257 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5259 tnapi->hw_status->status,
5260 tnapi->hw_status->status_tag,
5261 tnapi->hw_status->rx_jumbo_consumer,
5262 tnapi->hw_status->rx_consumer,
5263 tnapi->hw_status->rx_mini_consumer,
5264 tnapi->hw_status->idx[0].rx_producer,
5265 tnapi->hw_status->idx[0].tx_consumer);
5267 netdev_err(tp->dev,
5268 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5270 tnapi->last_tag, tnapi->last_irq_tag,
5271 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5272 tnapi->rx_rcb_ptr,
5273 tnapi->prodring.rx_std_prod_idx,
5274 tnapi->prodring.rx_std_cons_idx,
5275 tnapi->prodring.rx_jmb_prod_idx,
5276 tnapi->prodring.rx_jmb_cons_idx);
5280 /* This is called whenever we suspect that the system chipset is re-
5281 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5282 * is bogus tx completions. We try to recover by setting the
5283 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5284 * in the workqueue.
5286 static void tg3_tx_recover(struct tg3 *tp)
5288 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5289 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5291 netdev_warn(tp->dev,
5292 "The system may be re-ordering memory-mapped I/O "
5293 "cycles to the network device, attempting to recover. "
5294 "Please report the problem to the driver maintainer "
5295 "and include system chipset information.\n");
5297 spin_lock(&tp->lock);
5298 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5299 spin_unlock(&tp->lock);
5302 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5304 /* Tell compiler to fetch tx indices from memory. */
5305 barrier();
5306 return tnapi->tx_pending -
5307 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5310 /* Tigon3 never reports partial packet sends. So we do not
5311 * need special logic to handle SKBs that have not had all
5312 * of their frags sent yet, like SunGEM does.
5314 static void tg3_tx(struct tg3_napi *tnapi)
5316 struct tg3 *tp = tnapi->tp;
5317 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5318 u32 sw_idx = tnapi->tx_cons;
5319 struct netdev_queue *txq;
5320 int index = tnapi - tp->napi;
5322 if (tg3_flag(tp, ENABLE_TSS))
5323 index--;
5325 txq = netdev_get_tx_queue(tp->dev, index);
5327 while (sw_idx != hw_idx) {
5328 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5329 struct sk_buff *skb = ri->skb;
5330 int i, tx_bug = 0;
5332 if (unlikely(skb == NULL)) {
5333 tg3_tx_recover(tp);
5334 return;
5337 pci_unmap_single(tp->pdev,
5338 dma_unmap_addr(ri, mapping),
5339 skb_headlen(skb),
5340 PCI_DMA_TODEVICE);
5342 ri->skb = NULL;
5344 while (ri->fragmented) {
5345 ri->fragmented = false;
5346 sw_idx = NEXT_TX(sw_idx);
5347 ri = &tnapi->tx_buffers[sw_idx];
5350 sw_idx = NEXT_TX(sw_idx);
5352 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5353 ri = &tnapi->tx_buffers[sw_idx];
5354 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5355 tx_bug = 1;
5357 pci_unmap_page(tp->pdev,
5358 dma_unmap_addr(ri, mapping),
5359 skb_shinfo(skb)->frags[i].size,
5360 PCI_DMA_TODEVICE);
5362 while (ri->fragmented) {
5363 ri->fragmented = false;
5364 sw_idx = NEXT_TX(sw_idx);
5365 ri = &tnapi->tx_buffers[sw_idx];
5368 sw_idx = NEXT_TX(sw_idx);
5371 dev_kfree_skb(skb);
5373 if (unlikely(tx_bug)) {
5374 tg3_tx_recover(tp);
5375 return;
5379 tnapi->tx_cons = sw_idx;
5381 /* Need to make the tx_cons update visible to tg3_start_xmit()
5382 * before checking for netif_queue_stopped(). Without the
5383 * memory barrier, there is a small possibility that tg3_start_xmit()
5384 * will miss it and cause the queue to be stopped forever.
5386 smp_mb();
5388 if (unlikely(netif_tx_queue_stopped(txq) &&
5389 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5390 __netif_tx_lock(txq, smp_processor_id());
5391 if (netif_tx_queue_stopped(txq) &&
5392 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5393 netif_tx_wake_queue(txq);
5394 __netif_tx_unlock(txq);
5398 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5400 if (!ri->skb)
5401 return;
5403 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5404 map_sz, PCI_DMA_FROMDEVICE);
5405 dev_kfree_skb_any(ri->skb);
5406 ri->skb = NULL;
5409 /* Returns size of skb allocated or < 0 on error.
5411 * We only need to fill in the address because the other members
5412 * of the RX descriptor are invariant, see tg3_init_rings.
5414 * Note the purposeful assymetry of cpu vs. chip accesses. For
5415 * posting buffers we only dirty the first cache line of the RX
5416 * descriptor (containing the address). Whereas for the RX status
5417 * buffers the cpu only reads the last cacheline of the RX descriptor
5418 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5420 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5421 u32 opaque_key, u32 dest_idx_unmasked)
5423 struct tg3_rx_buffer_desc *desc;
5424 struct ring_info *map;
5425 struct sk_buff *skb;
5426 dma_addr_t mapping;
5427 int skb_size, dest_idx;
5429 switch (opaque_key) {
5430 case RXD_OPAQUE_RING_STD:
5431 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5432 desc = &tpr->rx_std[dest_idx];
5433 map = &tpr->rx_std_buffers[dest_idx];
5434 skb_size = tp->rx_pkt_map_sz;
5435 break;
5437 case RXD_OPAQUE_RING_JUMBO:
5438 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5439 desc = &tpr->rx_jmb[dest_idx].std;
5440 map = &tpr->rx_jmb_buffers[dest_idx];
5441 skb_size = TG3_RX_JMB_MAP_SZ;
5442 break;
5444 default:
5445 return -EINVAL;
5448 /* Do not overwrite any of the map or rp information
5449 * until we are sure we can commit to a new buffer.
5451 * Callers depend upon this behavior and assume that
5452 * we leave everything unchanged if we fail.
5454 skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5455 if (skb == NULL)
5456 return -ENOMEM;
5458 skb_reserve(skb, TG3_RX_OFFSET(tp));
5460 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5461 PCI_DMA_FROMDEVICE);
5462 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5463 dev_kfree_skb(skb);
5464 return -EIO;
5467 map->skb = skb;
5468 dma_unmap_addr_set(map, mapping, mapping);
5470 desc->addr_hi = ((u64)mapping >> 32);
5471 desc->addr_lo = ((u64)mapping & 0xffffffff);
5473 return skb_size;
5476 /* We only need to move over in the address because the other
5477 * members of the RX descriptor are invariant. See notes above
5478 * tg3_alloc_rx_skb for full details.
5480 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5481 struct tg3_rx_prodring_set *dpr,
5482 u32 opaque_key, int src_idx,
5483 u32 dest_idx_unmasked)
5485 struct tg3 *tp = tnapi->tp;
5486 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5487 struct ring_info *src_map, *dest_map;
5488 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5489 int dest_idx;
5491 switch (opaque_key) {
5492 case RXD_OPAQUE_RING_STD:
5493 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5494 dest_desc = &dpr->rx_std[dest_idx];
5495 dest_map = &dpr->rx_std_buffers[dest_idx];
5496 src_desc = &spr->rx_std[src_idx];
5497 src_map = &spr->rx_std_buffers[src_idx];
5498 break;
5500 case RXD_OPAQUE_RING_JUMBO:
5501 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5502 dest_desc = &dpr->rx_jmb[dest_idx].std;
5503 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5504 src_desc = &spr->rx_jmb[src_idx].std;
5505 src_map = &spr->rx_jmb_buffers[src_idx];
5506 break;
5508 default:
5509 return;
5512 dest_map->skb = src_map->skb;
5513 dma_unmap_addr_set(dest_map, mapping,
5514 dma_unmap_addr(src_map, mapping));
5515 dest_desc->addr_hi = src_desc->addr_hi;
5516 dest_desc->addr_lo = src_desc->addr_lo;
5518 /* Ensure that the update to the skb happens after the physical
5519 * addresses have been transferred to the new BD location.
5521 smp_wmb();
5523 src_map->skb = NULL;
5526 /* The RX ring scheme is composed of multiple rings which post fresh
5527 * buffers to the chip, and one special ring the chip uses to report
5528 * status back to the host.
5530 * The special ring reports the status of received packets to the
5531 * host. The chip does not write into the original descriptor the
5532 * RX buffer was obtained from. The chip simply takes the original
5533 * descriptor as provided by the host, updates the status and length
5534 * field, then writes this into the next status ring entry.
5536 * Each ring the host uses to post buffers to the chip is described
5537 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5538 * it is first placed into the on-chip ram. When the packet's length
5539 * is known, it walks down the TG3_BDINFO entries to select the ring.
5540 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5541 * which is within the range of the new packet's length is chosen.
5543 * The "separate ring for rx status" scheme may sound queer, but it makes
5544 * sense from a cache coherency perspective. If only the host writes
5545 * to the buffer post rings, and only the chip writes to the rx status
5546 * rings, then cache lines never move beyond shared-modified state.
5547 * If both the host and chip were to write into the same ring, cache line
5548 * eviction could occur since both entities want it in an exclusive state.
5550 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5552 struct tg3 *tp = tnapi->tp;
5553 u32 work_mask, rx_std_posted = 0;
5554 u32 std_prod_idx, jmb_prod_idx;
5555 u32 sw_idx = tnapi->rx_rcb_ptr;
5556 u16 hw_idx;
5557 int received;
5558 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5560 hw_idx = *(tnapi->rx_rcb_prod_idx);
5562 * We need to order the read of hw_idx and the read of
5563 * the opaque cookie.
5565 rmb();
5566 work_mask = 0;
5567 received = 0;
5568 std_prod_idx = tpr->rx_std_prod_idx;
5569 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5570 while (sw_idx != hw_idx && budget > 0) {
5571 struct ring_info *ri;
5572 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5573 unsigned int len;
5574 struct sk_buff *skb;
5575 dma_addr_t dma_addr;
5576 u32 opaque_key, desc_idx, *post_ptr;
5578 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5579 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5580 if (opaque_key == RXD_OPAQUE_RING_STD) {
5581 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5582 dma_addr = dma_unmap_addr(ri, mapping);
5583 skb = ri->skb;
5584 post_ptr = &std_prod_idx;
5585 rx_std_posted++;
5586 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5587 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5588 dma_addr = dma_unmap_addr(ri, mapping);
5589 skb = ri->skb;
5590 post_ptr = &jmb_prod_idx;
5591 } else
5592 goto next_pkt_nopost;
5594 work_mask |= opaque_key;
5596 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5597 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5598 drop_it:
5599 tg3_recycle_rx(tnapi, tpr, opaque_key,
5600 desc_idx, *post_ptr);
5601 drop_it_no_recycle:
5602 /* Other statistics kept track of by card. */
5603 tp->rx_dropped++;
5604 goto next_pkt;
5607 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5608 ETH_FCS_LEN;
5610 if (len > TG3_RX_COPY_THRESH(tp)) {
5611 int skb_size;
5613 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5614 *post_ptr);
5615 if (skb_size < 0)
5616 goto drop_it;
5618 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5619 PCI_DMA_FROMDEVICE);
5621 /* Ensure that the update to the skb happens
5622 * after the usage of the old DMA mapping.
5624 smp_wmb();
5626 ri->skb = NULL;
5628 skb_put(skb, len);
5629 } else {
5630 struct sk_buff *copy_skb;
5632 tg3_recycle_rx(tnapi, tpr, opaque_key,
5633 desc_idx, *post_ptr);
5635 copy_skb = netdev_alloc_skb(tp->dev, len +
5636 TG3_RAW_IP_ALIGN);
5637 if (copy_skb == NULL)
5638 goto drop_it_no_recycle;
5640 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5641 skb_put(copy_skb, len);
5642 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5643 skb_copy_from_linear_data(skb, copy_skb->data, len);
5644 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5646 /* We'll reuse the original ring buffer. */
5647 skb = copy_skb;
5650 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5651 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5652 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5653 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5654 skb->ip_summed = CHECKSUM_UNNECESSARY;
5655 else
5656 skb_checksum_none_assert(skb);
5658 skb->protocol = eth_type_trans(skb, tp->dev);
5660 if (len > (tp->dev->mtu + ETH_HLEN) &&
5661 skb->protocol != htons(ETH_P_8021Q)) {
5662 dev_kfree_skb(skb);
5663 goto drop_it_no_recycle;
5666 if (desc->type_flags & RXD_FLAG_VLAN &&
5667 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5668 __vlan_hwaccel_put_tag(skb,
5669 desc->err_vlan & RXD_VLAN_MASK);
5671 napi_gro_receive(&tnapi->napi, skb);
5673 received++;
5674 budget--;
5676 next_pkt:
5677 (*post_ptr)++;
5679 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5680 tpr->rx_std_prod_idx = std_prod_idx &
5681 tp->rx_std_ring_mask;
5682 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5683 tpr->rx_std_prod_idx);
5684 work_mask &= ~RXD_OPAQUE_RING_STD;
5685 rx_std_posted = 0;
5687 next_pkt_nopost:
5688 sw_idx++;
5689 sw_idx &= tp->rx_ret_ring_mask;
5691 /* Refresh hw_idx to see if there is new work */
5692 if (sw_idx == hw_idx) {
5693 hw_idx = *(tnapi->rx_rcb_prod_idx);
5694 rmb();
5698 /* ACK the status ring. */
5699 tnapi->rx_rcb_ptr = sw_idx;
5700 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5702 /* Refill RX ring(s). */
5703 if (!tg3_flag(tp, ENABLE_RSS)) {
5704 if (work_mask & RXD_OPAQUE_RING_STD) {
5705 tpr->rx_std_prod_idx = std_prod_idx &
5706 tp->rx_std_ring_mask;
5707 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5708 tpr->rx_std_prod_idx);
5710 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5711 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5712 tp->rx_jmb_ring_mask;
5713 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5714 tpr->rx_jmb_prod_idx);
5716 mmiowb();
5717 } else if (work_mask) {
5718 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5719 * updated before the producer indices can be updated.
5721 smp_wmb();
5723 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5724 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5726 if (tnapi != &tp->napi[1])
5727 napi_schedule(&tp->napi[1].napi);
5730 return received;
5733 static void tg3_poll_link(struct tg3 *tp)
5735 /* handle link change and other phy events */
5736 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5737 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5739 if (sblk->status & SD_STATUS_LINK_CHG) {
5740 sblk->status = SD_STATUS_UPDATED |
5741 (sblk->status & ~SD_STATUS_LINK_CHG);
5742 spin_lock(&tp->lock);
5743 if (tg3_flag(tp, USE_PHYLIB)) {
5744 tw32_f(MAC_STATUS,
5745 (MAC_STATUS_SYNC_CHANGED |
5746 MAC_STATUS_CFG_CHANGED |
5747 MAC_STATUS_MI_COMPLETION |
5748 MAC_STATUS_LNKSTATE_CHANGED));
5749 udelay(40);
5750 } else
5751 tg3_setup_phy(tp, 0);
5752 spin_unlock(&tp->lock);
5757 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5758 struct tg3_rx_prodring_set *dpr,
5759 struct tg3_rx_prodring_set *spr)
5761 u32 si, di, cpycnt, src_prod_idx;
5762 int i, err = 0;
5764 while (1) {
5765 src_prod_idx = spr->rx_std_prod_idx;
5767 /* Make sure updates to the rx_std_buffers[] entries and the
5768 * standard producer index are seen in the correct order.
5770 smp_rmb();
5772 if (spr->rx_std_cons_idx == src_prod_idx)
5773 break;
5775 if (spr->rx_std_cons_idx < src_prod_idx)
5776 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5777 else
5778 cpycnt = tp->rx_std_ring_mask + 1 -
5779 spr->rx_std_cons_idx;
5781 cpycnt = min(cpycnt,
5782 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5784 si = spr->rx_std_cons_idx;
5785 di = dpr->rx_std_prod_idx;
5787 for (i = di; i < di + cpycnt; i++) {
5788 if (dpr->rx_std_buffers[i].skb) {
5789 cpycnt = i - di;
5790 err = -ENOSPC;
5791 break;
5795 if (!cpycnt)
5796 break;
5798 /* Ensure that updates to the rx_std_buffers ring and the
5799 * shadowed hardware producer ring from tg3_recycle_skb() are
5800 * ordered correctly WRT the skb check above.
5802 smp_rmb();
5804 memcpy(&dpr->rx_std_buffers[di],
5805 &spr->rx_std_buffers[si],
5806 cpycnt * sizeof(struct ring_info));
5808 for (i = 0; i < cpycnt; i++, di++, si++) {
5809 struct tg3_rx_buffer_desc *sbd, *dbd;
5810 sbd = &spr->rx_std[si];
5811 dbd = &dpr->rx_std[di];
5812 dbd->addr_hi = sbd->addr_hi;
5813 dbd->addr_lo = sbd->addr_lo;
5816 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5817 tp->rx_std_ring_mask;
5818 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5819 tp->rx_std_ring_mask;
5822 while (1) {
5823 src_prod_idx = spr->rx_jmb_prod_idx;
5825 /* Make sure updates to the rx_jmb_buffers[] entries and
5826 * the jumbo producer index are seen in the correct order.
5828 smp_rmb();
5830 if (spr->rx_jmb_cons_idx == src_prod_idx)
5831 break;
5833 if (spr->rx_jmb_cons_idx < src_prod_idx)
5834 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5835 else
5836 cpycnt = tp->rx_jmb_ring_mask + 1 -
5837 spr->rx_jmb_cons_idx;
5839 cpycnt = min(cpycnt,
5840 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5842 si = spr->rx_jmb_cons_idx;
5843 di = dpr->rx_jmb_prod_idx;
5845 for (i = di; i < di + cpycnt; i++) {
5846 if (dpr->rx_jmb_buffers[i].skb) {
5847 cpycnt = i - di;
5848 err = -ENOSPC;
5849 break;
5853 if (!cpycnt)
5854 break;
5856 /* Ensure that updates to the rx_jmb_buffers ring and the
5857 * shadowed hardware producer ring from tg3_recycle_skb() are
5858 * ordered correctly WRT the skb check above.
5860 smp_rmb();
5862 memcpy(&dpr->rx_jmb_buffers[di],
5863 &spr->rx_jmb_buffers[si],
5864 cpycnt * sizeof(struct ring_info));
5866 for (i = 0; i < cpycnt; i++, di++, si++) {
5867 struct tg3_rx_buffer_desc *sbd, *dbd;
5868 sbd = &spr->rx_jmb[si].std;
5869 dbd = &dpr->rx_jmb[di].std;
5870 dbd->addr_hi = sbd->addr_hi;
5871 dbd->addr_lo = sbd->addr_lo;
5874 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5875 tp->rx_jmb_ring_mask;
5876 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5877 tp->rx_jmb_ring_mask;
5880 return err;
5883 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5885 struct tg3 *tp = tnapi->tp;
5887 /* run TX completion thread */
5888 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5889 tg3_tx(tnapi);
5890 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5891 return work_done;
5894 /* run RX thread, within the bounds set by NAPI.
5895 * All RX "locking" is done by ensuring outside
5896 * code synchronizes with tg3->napi.poll()
5898 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5899 work_done += tg3_rx(tnapi, budget - work_done);
5901 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5902 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5903 int i, err = 0;
5904 u32 std_prod_idx = dpr->rx_std_prod_idx;
5905 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5907 for (i = 1; i < tp->irq_cnt; i++)
5908 err |= tg3_rx_prodring_xfer(tp, dpr,
5909 &tp->napi[i].prodring);
5911 wmb();
5913 if (std_prod_idx != dpr->rx_std_prod_idx)
5914 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5915 dpr->rx_std_prod_idx);
5917 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5918 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5919 dpr->rx_jmb_prod_idx);
5921 mmiowb();
5923 if (err)
5924 tw32_f(HOSTCC_MODE, tp->coal_now);
5927 return work_done;
5930 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5932 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5933 struct tg3 *tp = tnapi->tp;
5934 int work_done = 0;
5935 struct tg3_hw_status *sblk = tnapi->hw_status;
5937 while (1) {
5938 work_done = tg3_poll_work(tnapi, work_done, budget);
5940 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5941 goto tx_recovery;
5943 if (unlikely(work_done >= budget))
5944 break;
5946 /* tp->last_tag is used in tg3_int_reenable() below
5947 * to tell the hw how much work has been processed,
5948 * so we must read it before checking for more work.
5950 tnapi->last_tag = sblk->status_tag;
5951 tnapi->last_irq_tag = tnapi->last_tag;
5952 rmb();
5954 /* check for RX/TX work to do */
5955 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5956 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5957 napi_complete(napi);
5958 /* Reenable interrupts. */
5959 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5960 mmiowb();
5961 break;
5965 return work_done;
5967 tx_recovery:
5968 /* work_done is guaranteed to be less than budget. */
5969 napi_complete(napi);
5970 schedule_work(&tp->reset_task);
5971 return work_done;
5974 static void tg3_process_error(struct tg3 *tp)
5976 u32 val;
5977 bool real_error = false;
5979 if (tg3_flag(tp, ERROR_PROCESSED))
5980 return;
5982 /* Check Flow Attention register */
5983 val = tr32(HOSTCC_FLOW_ATTN);
5984 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5985 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5986 real_error = true;
5989 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5990 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5991 real_error = true;
5994 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5995 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5996 real_error = true;
5999 if (!real_error)
6000 return;
6002 tg3_dump_state(tp);
6004 tg3_flag_set(tp, ERROR_PROCESSED);
6005 schedule_work(&tp->reset_task);
6008 static int tg3_poll(struct napi_struct *napi, int budget)
6010 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6011 struct tg3 *tp = tnapi->tp;
6012 int work_done = 0;
6013 struct tg3_hw_status *sblk = tnapi->hw_status;
6015 while (1) {
6016 if (sblk->status & SD_STATUS_ERROR)
6017 tg3_process_error(tp);
6019 tg3_poll_link(tp);
6021 work_done = tg3_poll_work(tnapi, work_done, budget);
6023 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6024 goto tx_recovery;
6026 if (unlikely(work_done >= budget))
6027 break;
6029 if (tg3_flag(tp, TAGGED_STATUS)) {
6030 /* tp->last_tag is used in tg3_int_reenable() below
6031 * to tell the hw how much work has been processed,
6032 * so we must read it before checking for more work.
6034 tnapi->last_tag = sblk->status_tag;
6035 tnapi->last_irq_tag = tnapi->last_tag;
6036 rmb();
6037 } else
6038 sblk->status &= ~SD_STATUS_UPDATED;
6040 if (likely(!tg3_has_work(tnapi))) {
6041 napi_complete(napi);
6042 tg3_int_reenable(tnapi);
6043 break;
6047 return work_done;
6049 tx_recovery:
6050 /* work_done is guaranteed to be less than budget. */
6051 napi_complete(napi);
6052 schedule_work(&tp->reset_task);
6053 return work_done;
6056 static void tg3_napi_disable(struct tg3 *tp)
6058 int i;
6060 for (i = tp->irq_cnt - 1; i >= 0; i--)
6061 napi_disable(&tp->napi[i].napi);
6064 static void tg3_napi_enable(struct tg3 *tp)
6066 int i;
6068 for (i = 0; i < tp->irq_cnt; i++)
6069 napi_enable(&tp->napi[i].napi);
6072 static void tg3_napi_init(struct tg3 *tp)
6074 int i;
6076 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6077 for (i = 1; i < tp->irq_cnt; i++)
6078 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6081 static void tg3_napi_fini(struct tg3 *tp)
6083 int i;
6085 for (i = 0; i < tp->irq_cnt; i++)
6086 netif_napi_del(&tp->napi[i].napi);
6089 static inline void tg3_netif_stop(struct tg3 *tp)
6091 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6092 tg3_napi_disable(tp);
6093 netif_tx_disable(tp->dev);
6096 static inline void tg3_netif_start(struct tg3 *tp)
6098 /* NOTE: unconditional netif_tx_wake_all_queues is only
6099 * appropriate so long as all callers are assured to
6100 * have free tx slots (such as after tg3_init_hw)
6102 netif_tx_wake_all_queues(tp->dev);
6104 tg3_napi_enable(tp);
6105 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6106 tg3_enable_ints(tp);
6109 static void tg3_irq_quiesce(struct tg3 *tp)
6111 int i;
6113 BUG_ON(tp->irq_sync);
6115 tp->irq_sync = 1;
6116 smp_mb();
6118 for (i = 0; i < tp->irq_cnt; i++)
6119 synchronize_irq(tp->napi[i].irq_vec);
6122 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6123 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6124 * with as well. Most of the time, this is not necessary except when
6125 * shutting down the device.
6127 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6129 spin_lock_bh(&tp->lock);
6130 if (irq_sync)
6131 tg3_irq_quiesce(tp);
6134 static inline void tg3_full_unlock(struct tg3 *tp)
6136 spin_unlock_bh(&tp->lock);
6139 /* One-shot MSI handler - Chip automatically disables interrupt
6140 * after sending MSI so driver doesn't have to do it.
6142 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6144 struct tg3_napi *tnapi = dev_id;
6145 struct tg3 *tp = tnapi->tp;
6147 prefetch(tnapi->hw_status);
6148 if (tnapi->rx_rcb)
6149 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6151 if (likely(!tg3_irq_sync(tp)))
6152 napi_schedule(&tnapi->napi);
6154 return IRQ_HANDLED;
6157 /* MSI ISR - No need to check for interrupt sharing and no need to
6158 * flush status block and interrupt mailbox. PCI ordering rules
6159 * guarantee that MSI will arrive after the status block.
6161 static irqreturn_t tg3_msi(int irq, void *dev_id)
6163 struct tg3_napi *tnapi = dev_id;
6164 struct tg3 *tp = tnapi->tp;
6166 prefetch(tnapi->hw_status);
6167 if (tnapi->rx_rcb)
6168 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6170 * Writing any value to intr-mbox-0 clears PCI INTA# and
6171 * chip-internal interrupt pending events.
6172 * Writing non-zero to intr-mbox-0 additional tells the
6173 * NIC to stop sending us irqs, engaging "in-intr-handler"
6174 * event coalescing.
6176 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6177 if (likely(!tg3_irq_sync(tp)))
6178 napi_schedule(&tnapi->napi);
6180 return IRQ_RETVAL(1);
6183 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6185 struct tg3_napi *tnapi = dev_id;
6186 struct tg3 *tp = tnapi->tp;
6187 struct tg3_hw_status *sblk = tnapi->hw_status;
6188 unsigned int handled = 1;
6190 /* In INTx mode, it is possible for the interrupt to arrive at
6191 * the CPU before the status block posted prior to the interrupt.
6192 * Reading the PCI State register will confirm whether the
6193 * interrupt is ours and will flush the status block.
6195 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6196 if (tg3_flag(tp, CHIP_RESETTING) ||
6197 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6198 handled = 0;
6199 goto out;
6204 * Writing any value to intr-mbox-0 clears PCI INTA# and
6205 * chip-internal interrupt pending events.
6206 * Writing non-zero to intr-mbox-0 additional tells the
6207 * NIC to stop sending us irqs, engaging "in-intr-handler"
6208 * event coalescing.
6210 * Flush the mailbox to de-assert the IRQ immediately to prevent
6211 * spurious interrupts. The flush impacts performance but
6212 * excessive spurious interrupts can be worse in some cases.
6214 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6215 if (tg3_irq_sync(tp))
6216 goto out;
6217 sblk->status &= ~SD_STATUS_UPDATED;
6218 if (likely(tg3_has_work(tnapi))) {
6219 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6220 napi_schedule(&tnapi->napi);
6221 } else {
6222 /* No work, shared interrupt perhaps? re-enable
6223 * interrupts, and flush that PCI write
6225 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6226 0x00000000);
6228 out:
6229 return IRQ_RETVAL(handled);
6232 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6234 struct tg3_napi *tnapi = dev_id;
6235 struct tg3 *tp = tnapi->tp;
6236 struct tg3_hw_status *sblk = tnapi->hw_status;
6237 unsigned int handled = 1;
6239 /* In INTx mode, it is possible for the interrupt to arrive at
6240 * the CPU before the status block posted prior to the interrupt.
6241 * Reading the PCI State register will confirm whether the
6242 * interrupt is ours and will flush the status block.
6244 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6245 if (tg3_flag(tp, CHIP_RESETTING) ||
6246 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6247 handled = 0;
6248 goto out;
6253 * writing any value to intr-mbox-0 clears PCI INTA# and
6254 * chip-internal interrupt pending events.
6255 * writing non-zero to intr-mbox-0 additional tells the
6256 * NIC to stop sending us irqs, engaging "in-intr-handler"
6257 * event coalescing.
6259 * Flush the mailbox to de-assert the IRQ immediately to prevent
6260 * spurious interrupts. The flush impacts performance but
6261 * excessive spurious interrupts can be worse in some cases.
6263 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6266 * In a shared interrupt configuration, sometimes other devices'
6267 * interrupts will scream. We record the current status tag here
6268 * so that the above check can report that the screaming interrupts
6269 * are unhandled. Eventually they will be silenced.
6271 tnapi->last_irq_tag = sblk->status_tag;
6273 if (tg3_irq_sync(tp))
6274 goto out;
6276 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6278 napi_schedule(&tnapi->napi);
6280 out:
6281 return IRQ_RETVAL(handled);
6284 /* ISR for interrupt test */
6285 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6287 struct tg3_napi *tnapi = dev_id;
6288 struct tg3 *tp = tnapi->tp;
6289 struct tg3_hw_status *sblk = tnapi->hw_status;
6291 if ((sblk->status & SD_STATUS_UPDATED) ||
6292 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6293 tg3_disable_ints(tp);
6294 return IRQ_RETVAL(1);
6296 return IRQ_RETVAL(0);
6299 static int tg3_init_hw(struct tg3 *, int);
6300 static int tg3_halt(struct tg3 *, int, int);
6302 /* Restart hardware after configuration changes, self-test, etc.
6303 * Invoked with tp->lock held.
6305 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6306 __releases(tp->lock)
6307 __acquires(tp->lock)
6309 int err;
6311 err = tg3_init_hw(tp, reset_phy);
6312 if (err) {
6313 netdev_err(tp->dev,
6314 "Failed to re-initialize device, aborting\n");
6315 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6316 tg3_full_unlock(tp);
6317 del_timer_sync(&tp->timer);
6318 tp->irq_sync = 0;
6319 tg3_napi_enable(tp);
6320 dev_close(tp->dev);
6321 tg3_full_lock(tp, 0);
6323 return err;
6326 #ifdef CONFIG_NET_POLL_CONTROLLER
6327 static void tg3_poll_controller(struct net_device *dev)
6329 int i;
6330 struct tg3 *tp = netdev_priv(dev);
6332 for (i = 0; i < tp->irq_cnt; i++)
6333 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6335 #endif
6337 static void tg3_reset_task(struct work_struct *work)
6339 struct tg3 *tp = container_of(work, struct tg3, reset_task);
6340 int err;
6341 unsigned int restart_timer;
6343 tg3_full_lock(tp, 0);
6345 if (!netif_running(tp->dev)) {
6346 tg3_full_unlock(tp);
6347 return;
6350 tg3_full_unlock(tp);
6352 tg3_phy_stop(tp);
6354 tg3_netif_stop(tp);
6356 tg3_full_lock(tp, 1);
6358 restart_timer = tg3_flag(tp, RESTART_TIMER);
6359 tg3_flag_clear(tp, RESTART_TIMER);
6361 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6362 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6363 tp->write32_rx_mbox = tg3_write_flush_reg32;
6364 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6365 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6368 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6369 err = tg3_init_hw(tp, 1);
6370 if (err)
6371 goto out;
6373 tg3_netif_start(tp);
6375 if (restart_timer)
6376 mod_timer(&tp->timer, jiffies + 1);
6378 out:
6379 tg3_full_unlock(tp);
6381 if (!err)
6382 tg3_phy_start(tp);
6385 static void tg3_tx_timeout(struct net_device *dev)
6387 struct tg3 *tp = netdev_priv(dev);
6389 if (netif_msg_tx_err(tp)) {
6390 netdev_err(dev, "transmit timed out, resetting\n");
6391 tg3_dump_state(tp);
6394 schedule_work(&tp->reset_task);
6397 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6398 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6400 u32 base = (u32) mapping & 0xffffffff;
6402 return (base > 0xffffdcc0) && (base + len + 8 < base);
6405 /* Test for DMA addresses > 40-bit */
6406 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6407 int len)
6409 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6410 if (tg3_flag(tp, 40BIT_DMA_BUG))
6411 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6412 return 0;
6413 #else
6414 return 0;
6415 #endif
6418 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6419 dma_addr_t mapping, u32 len, u32 flags,
6420 u32 mss, u32 vlan)
6422 txbd->addr_hi = ((u64) mapping >> 32);
6423 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6424 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6425 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6428 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6429 dma_addr_t map, u32 len, u32 flags,
6430 u32 mss, u32 vlan)
6432 struct tg3 *tp = tnapi->tp;
6433 bool hwbug = false;
6435 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6436 hwbug = 1;
6438 if (tg3_4g_overflow_test(map, len))
6439 hwbug = 1;
6441 if (tg3_40bit_overflow_test(tp, map, len))
6442 hwbug = 1;
6444 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6445 u32 tmp_flag = flags & ~TXD_FLAG_END;
6446 while (len > TG3_TX_BD_DMA_MAX) {
6447 u32 frag_len = TG3_TX_BD_DMA_MAX;
6448 len -= TG3_TX_BD_DMA_MAX;
6450 if (len) {
6451 tnapi->tx_buffers[*entry].fragmented = true;
6452 /* Avoid the 8byte DMA problem */
6453 if (len <= 8) {
6454 len += TG3_TX_BD_DMA_MAX / 2;
6455 frag_len = TG3_TX_BD_DMA_MAX / 2;
6457 } else
6458 tmp_flag = flags;
6460 if (*budget) {
6461 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6462 frag_len, tmp_flag, mss, vlan);
6463 (*budget)--;
6464 *entry = NEXT_TX(*entry);
6465 } else {
6466 hwbug = 1;
6467 break;
6470 map += frag_len;
6473 if (len) {
6474 if (*budget) {
6475 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6476 len, flags, mss, vlan);
6477 (*budget)--;
6478 *entry = NEXT_TX(*entry);
6479 } else {
6480 hwbug = 1;
6483 } else {
6484 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6485 len, flags, mss, vlan);
6486 *entry = NEXT_TX(*entry);
6489 return hwbug;
6492 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6494 int i;
6495 struct sk_buff *skb;
6496 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6498 skb = txb->skb;
6499 txb->skb = NULL;
6501 pci_unmap_single(tnapi->tp->pdev,
6502 dma_unmap_addr(txb, mapping),
6503 skb_headlen(skb),
6504 PCI_DMA_TODEVICE);
6506 while (txb->fragmented) {
6507 txb->fragmented = false;
6508 entry = NEXT_TX(entry);
6509 txb = &tnapi->tx_buffers[entry];
6512 for (i = 0; i < last; i++) {
6513 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6515 entry = NEXT_TX(entry);
6516 txb = &tnapi->tx_buffers[entry];
6518 pci_unmap_page(tnapi->tp->pdev,
6519 dma_unmap_addr(txb, mapping),
6520 frag->size, PCI_DMA_TODEVICE);
6522 while (txb->fragmented) {
6523 txb->fragmented = false;
6524 entry = NEXT_TX(entry);
6525 txb = &tnapi->tx_buffers[entry];
6530 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6531 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6532 struct sk_buff *skb,
6533 u32 *entry, u32 *budget,
6534 u32 base_flags, u32 mss, u32 vlan)
6536 struct tg3 *tp = tnapi->tp;
6537 struct sk_buff *new_skb;
6538 dma_addr_t new_addr = 0;
6539 int ret = 0;
6541 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6542 new_skb = skb_copy(skb, GFP_ATOMIC);
6543 else {
6544 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6546 new_skb = skb_copy_expand(skb,
6547 skb_headroom(skb) + more_headroom,
6548 skb_tailroom(skb), GFP_ATOMIC);
6551 if (!new_skb) {
6552 ret = -1;
6553 } else {
6554 /* New SKB is guaranteed to be linear. */
6555 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6556 PCI_DMA_TODEVICE);
6557 /* Make sure the mapping succeeded */
6558 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6559 dev_kfree_skb(new_skb);
6560 ret = -1;
6561 } else {
6562 base_flags |= TXD_FLAG_END;
6564 tnapi->tx_buffers[*entry].skb = new_skb;
6565 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6566 mapping, new_addr);
6568 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6569 new_skb->len, base_flags,
6570 mss, vlan)) {
6571 tg3_tx_skb_unmap(tnapi, *entry, 0);
6572 dev_kfree_skb(new_skb);
6573 ret = -1;
6578 dev_kfree_skb(skb);
6580 return ret;
6583 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6585 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6586 * TSO header is greater than 80 bytes.
6588 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6590 struct sk_buff *segs, *nskb;
6591 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6593 /* Estimate the number of fragments in the worst case */
6594 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6595 netif_stop_queue(tp->dev);
6597 /* netif_tx_stop_queue() must be done before checking
6598 * checking tx index in tg3_tx_avail() below, because in
6599 * tg3_tx(), we update tx index before checking for
6600 * netif_tx_queue_stopped().
6602 smp_mb();
6603 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6604 return NETDEV_TX_BUSY;
6606 netif_wake_queue(tp->dev);
6609 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6610 if (IS_ERR(segs))
6611 goto tg3_tso_bug_end;
6613 do {
6614 nskb = segs;
6615 segs = segs->next;
6616 nskb->next = NULL;
6617 tg3_start_xmit(nskb, tp->dev);
6618 } while (segs);
6620 tg3_tso_bug_end:
6621 dev_kfree_skb(skb);
6623 return NETDEV_TX_OK;
6626 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6627 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6629 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6631 struct tg3 *tp = netdev_priv(dev);
6632 u32 len, entry, base_flags, mss, vlan = 0;
6633 u32 budget;
6634 int i = -1, would_hit_hwbug;
6635 dma_addr_t mapping;
6636 struct tg3_napi *tnapi;
6637 struct netdev_queue *txq;
6638 unsigned int last;
6640 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6641 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6642 if (tg3_flag(tp, ENABLE_TSS))
6643 tnapi++;
6645 budget = tg3_tx_avail(tnapi);
6647 /* We are running in BH disabled context with netif_tx_lock
6648 * and TX reclaim runs via tp->napi.poll inside of a software
6649 * interrupt. Furthermore, IRQ processing runs lockless so we have
6650 * no IRQ context deadlocks to worry about either. Rejoice!
6652 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6653 if (!netif_tx_queue_stopped(txq)) {
6654 netif_tx_stop_queue(txq);
6656 /* This is a hard error, log it. */
6657 netdev_err(dev,
6658 "BUG! Tx Ring full when queue awake!\n");
6660 return NETDEV_TX_BUSY;
6663 entry = tnapi->tx_prod;
6664 base_flags = 0;
6665 if (skb->ip_summed == CHECKSUM_PARTIAL)
6666 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6668 mss = skb_shinfo(skb)->gso_size;
6669 if (mss) {
6670 struct iphdr *iph;
6671 u32 tcp_opt_len, hdr_len;
6673 if (skb_header_cloned(skb) &&
6674 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6675 dev_kfree_skb(skb);
6676 goto out_unlock;
6679 iph = ip_hdr(skb);
6680 tcp_opt_len = tcp_optlen(skb);
6682 if (skb_is_gso_v6(skb)) {
6683 hdr_len = skb_headlen(skb) - ETH_HLEN;
6684 } else {
6685 u32 ip_tcp_len;
6687 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6688 hdr_len = ip_tcp_len + tcp_opt_len;
6690 iph->check = 0;
6691 iph->tot_len = htons(mss + hdr_len);
6694 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6695 tg3_flag(tp, TSO_BUG))
6696 return tg3_tso_bug(tp, skb);
6698 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6699 TXD_FLAG_CPU_POST_DMA);
6701 if (tg3_flag(tp, HW_TSO_1) ||
6702 tg3_flag(tp, HW_TSO_2) ||
6703 tg3_flag(tp, HW_TSO_3)) {
6704 tcp_hdr(skb)->check = 0;
6705 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6706 } else
6707 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6708 iph->daddr, 0,
6709 IPPROTO_TCP,
6712 if (tg3_flag(tp, HW_TSO_3)) {
6713 mss |= (hdr_len & 0xc) << 12;
6714 if (hdr_len & 0x10)
6715 base_flags |= 0x00000010;
6716 base_flags |= (hdr_len & 0x3e0) << 5;
6717 } else if (tg3_flag(tp, HW_TSO_2))
6718 mss |= hdr_len << 9;
6719 else if (tg3_flag(tp, HW_TSO_1) ||
6720 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6721 if (tcp_opt_len || iph->ihl > 5) {
6722 int tsflags;
6724 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6725 mss |= (tsflags << 11);
6727 } else {
6728 if (tcp_opt_len || iph->ihl > 5) {
6729 int tsflags;
6731 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6732 base_flags |= tsflags << 12;
6737 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6738 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6739 base_flags |= TXD_FLAG_JMB_PKT;
6741 if (vlan_tx_tag_present(skb)) {
6742 base_flags |= TXD_FLAG_VLAN;
6743 vlan = vlan_tx_tag_get(skb);
6746 len = skb_headlen(skb);
6748 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6749 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6750 dev_kfree_skb(skb);
6751 goto out_unlock;
6754 tnapi->tx_buffers[entry].skb = skb;
6755 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6757 would_hit_hwbug = 0;
6759 if (tg3_flag(tp, 5701_DMA_BUG))
6760 would_hit_hwbug = 1;
6762 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6763 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6764 mss, vlan))
6765 would_hit_hwbug = 1;
6767 /* Now loop through additional data fragments, and queue them. */
6768 if (skb_shinfo(skb)->nr_frags > 0) {
6769 u32 tmp_mss = mss;
6771 if (!tg3_flag(tp, HW_TSO_1) &&
6772 !tg3_flag(tp, HW_TSO_2) &&
6773 !tg3_flag(tp, HW_TSO_3))
6774 tmp_mss = 0;
6776 last = skb_shinfo(skb)->nr_frags - 1;
6777 for (i = 0; i <= last; i++) {
6778 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6780 len = frag->size;
6781 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6782 len, DMA_TO_DEVICE);
6784 tnapi->tx_buffers[entry].skb = NULL;
6785 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6786 mapping);
6787 if (dma_mapping_error(&tp->pdev->dev, mapping))
6788 goto dma_error;
6790 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6791 len, base_flags |
6792 ((i == last) ? TXD_FLAG_END : 0),
6793 tmp_mss, vlan))
6794 would_hit_hwbug = 1;
6798 if (would_hit_hwbug) {
6799 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6801 /* If the workaround fails due to memory/mapping
6802 * failure, silently drop this packet.
6804 entry = tnapi->tx_prod;
6805 budget = tg3_tx_avail(tnapi);
6806 if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
6807 base_flags, mss, vlan))
6808 goto out_unlock;
6811 skb_tx_timestamp(skb);
6813 /* Packets are ready, update Tx producer idx local and on card. */
6814 tw32_tx_mbox(tnapi->prodmbox, entry);
6816 tnapi->tx_prod = entry;
6817 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6818 netif_tx_stop_queue(txq);
6820 /* netif_tx_stop_queue() must be done before checking
6821 * checking tx index in tg3_tx_avail() below, because in
6822 * tg3_tx(), we update tx index before checking for
6823 * netif_tx_queue_stopped().
6825 smp_mb();
6826 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6827 netif_tx_wake_queue(txq);
6830 out_unlock:
6831 mmiowb();
6833 return NETDEV_TX_OK;
6835 dma_error:
6836 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6837 dev_kfree_skb(skb);
6838 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6839 return NETDEV_TX_OK;
6842 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6844 if (enable) {
6845 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6846 MAC_MODE_PORT_MODE_MASK);
6848 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6850 if (!tg3_flag(tp, 5705_PLUS))
6851 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6853 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6854 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6855 else
6856 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6857 } else {
6858 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6860 if (tg3_flag(tp, 5705_PLUS) ||
6861 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6863 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6866 tw32(MAC_MODE, tp->mac_mode);
6867 udelay(40);
6870 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6872 u32 val, bmcr, mac_mode, ptest = 0;
6874 tg3_phy_toggle_apd(tp, false);
6875 tg3_phy_toggle_automdix(tp, 0);
6877 if (extlpbk && tg3_phy_set_extloopbk(tp))
6878 return -EIO;
6880 bmcr = BMCR_FULLDPLX;
6881 switch (speed) {
6882 case SPEED_10:
6883 break;
6884 case SPEED_100:
6885 bmcr |= BMCR_SPEED100;
6886 break;
6887 case SPEED_1000:
6888 default:
6889 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6890 speed = SPEED_100;
6891 bmcr |= BMCR_SPEED100;
6892 } else {
6893 speed = SPEED_1000;
6894 bmcr |= BMCR_SPEED1000;
6898 if (extlpbk) {
6899 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6900 tg3_readphy(tp, MII_CTRL1000, &val);
6901 val |= CTL1000_AS_MASTER |
6902 CTL1000_ENABLE_MASTER;
6903 tg3_writephy(tp, MII_CTRL1000, val);
6904 } else {
6905 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6906 MII_TG3_FET_PTEST_TRIM_2;
6907 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6909 } else
6910 bmcr |= BMCR_LOOPBACK;
6912 tg3_writephy(tp, MII_BMCR, bmcr);
6914 /* The write needs to be flushed for the FETs */
6915 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6916 tg3_readphy(tp, MII_BMCR, &bmcr);
6918 udelay(40);
6920 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6921 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6922 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6923 MII_TG3_FET_PTEST_FRC_TX_LINK |
6924 MII_TG3_FET_PTEST_FRC_TX_LOCK);
6926 /* The write needs to be flushed for the AC131 */
6927 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6930 /* Reset to prevent losing 1st rx packet intermittently */
6931 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6932 tg3_flag(tp, 5780_CLASS)) {
6933 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6934 udelay(10);
6935 tw32_f(MAC_RX_MODE, tp->rx_mode);
6938 mac_mode = tp->mac_mode &
6939 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6940 if (speed == SPEED_1000)
6941 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6942 else
6943 mac_mode |= MAC_MODE_PORT_MODE_MII;
6945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6946 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6948 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6949 mac_mode &= ~MAC_MODE_LINK_POLARITY;
6950 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6951 mac_mode |= MAC_MODE_LINK_POLARITY;
6953 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6954 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6957 tw32(MAC_MODE, mac_mode);
6958 udelay(40);
6960 return 0;
6963 static void tg3_set_loopback(struct net_device *dev, u32 features)
6965 struct tg3 *tp = netdev_priv(dev);
6967 if (features & NETIF_F_LOOPBACK) {
6968 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6969 return;
6971 spin_lock_bh(&tp->lock);
6972 tg3_mac_loopback(tp, true);
6973 netif_carrier_on(tp->dev);
6974 spin_unlock_bh(&tp->lock);
6975 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6976 } else {
6977 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6978 return;
6980 spin_lock_bh(&tp->lock);
6981 tg3_mac_loopback(tp, false);
6982 /* Force link status check */
6983 tg3_setup_phy(tp, 1);
6984 spin_unlock_bh(&tp->lock);
6985 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6989 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6991 struct tg3 *tp = netdev_priv(dev);
6993 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6994 features &= ~NETIF_F_ALL_TSO;
6996 return features;
6999 static int tg3_set_features(struct net_device *dev, u32 features)
7001 u32 changed = dev->features ^ features;
7003 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7004 tg3_set_loopback(dev, features);
7006 return 0;
7009 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7010 int new_mtu)
7012 dev->mtu = new_mtu;
7014 if (new_mtu > ETH_DATA_LEN) {
7015 if (tg3_flag(tp, 5780_CLASS)) {
7016 netdev_update_features(dev);
7017 tg3_flag_clear(tp, TSO_CAPABLE);
7018 } else {
7019 tg3_flag_set(tp, JUMBO_RING_ENABLE);
7021 } else {
7022 if (tg3_flag(tp, 5780_CLASS)) {
7023 tg3_flag_set(tp, TSO_CAPABLE);
7024 netdev_update_features(dev);
7026 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7030 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7032 struct tg3 *tp = netdev_priv(dev);
7033 int err;
7035 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7036 return -EINVAL;
7038 if (!netif_running(dev)) {
7039 /* We'll just catch it later when the
7040 * device is up'd.
7042 tg3_set_mtu(dev, tp, new_mtu);
7043 return 0;
7046 tg3_phy_stop(tp);
7048 tg3_netif_stop(tp);
7050 tg3_full_lock(tp, 1);
7052 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7054 tg3_set_mtu(dev, tp, new_mtu);
7056 err = tg3_restart_hw(tp, 0);
7058 if (!err)
7059 tg3_netif_start(tp);
7061 tg3_full_unlock(tp);
7063 if (!err)
7064 tg3_phy_start(tp);
7066 return err;
7069 static void tg3_rx_prodring_free(struct tg3 *tp,
7070 struct tg3_rx_prodring_set *tpr)
7072 int i;
7074 if (tpr != &tp->napi[0].prodring) {
7075 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7076 i = (i + 1) & tp->rx_std_ring_mask)
7077 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7078 tp->rx_pkt_map_sz);
7080 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7081 for (i = tpr->rx_jmb_cons_idx;
7082 i != tpr->rx_jmb_prod_idx;
7083 i = (i + 1) & tp->rx_jmb_ring_mask) {
7084 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7085 TG3_RX_JMB_MAP_SZ);
7089 return;
7092 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7093 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7094 tp->rx_pkt_map_sz);
7096 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7097 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7098 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7099 TG3_RX_JMB_MAP_SZ);
7103 /* Initialize rx rings for packet processing.
7105 * The chip has been shut down and the driver detached from
7106 * the networking, so no interrupts or new tx packets will
7107 * end up in the driver. tp->{tx,}lock are held and thus
7108 * we may not sleep.
7110 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7111 struct tg3_rx_prodring_set *tpr)
7113 u32 i, rx_pkt_dma_sz;
7115 tpr->rx_std_cons_idx = 0;
7116 tpr->rx_std_prod_idx = 0;
7117 tpr->rx_jmb_cons_idx = 0;
7118 tpr->rx_jmb_prod_idx = 0;
7120 if (tpr != &tp->napi[0].prodring) {
7121 memset(&tpr->rx_std_buffers[0], 0,
7122 TG3_RX_STD_BUFF_RING_SIZE(tp));
7123 if (tpr->rx_jmb_buffers)
7124 memset(&tpr->rx_jmb_buffers[0], 0,
7125 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7126 goto done;
7129 /* Zero out all descriptors. */
7130 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7132 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7133 if (tg3_flag(tp, 5780_CLASS) &&
7134 tp->dev->mtu > ETH_DATA_LEN)
7135 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7136 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7138 /* Initialize invariants of the rings, we only set this
7139 * stuff once. This works because the card does not
7140 * write into the rx buffer posting rings.
7142 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7143 struct tg3_rx_buffer_desc *rxd;
7145 rxd = &tpr->rx_std[i];
7146 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7147 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7148 rxd->opaque = (RXD_OPAQUE_RING_STD |
7149 (i << RXD_OPAQUE_INDEX_SHIFT));
7152 /* Now allocate fresh SKBs for each rx ring. */
7153 for (i = 0; i < tp->rx_pending; i++) {
7154 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7155 netdev_warn(tp->dev,
7156 "Using a smaller RX standard ring. Only "
7157 "%d out of %d buffers were allocated "
7158 "successfully\n", i, tp->rx_pending);
7159 if (i == 0)
7160 goto initfail;
7161 tp->rx_pending = i;
7162 break;
7166 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7167 goto done;
7169 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7171 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7172 goto done;
7174 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7175 struct tg3_rx_buffer_desc *rxd;
7177 rxd = &tpr->rx_jmb[i].std;
7178 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7179 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7180 RXD_FLAG_JUMBO;
7181 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7182 (i << RXD_OPAQUE_INDEX_SHIFT));
7185 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7186 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7187 netdev_warn(tp->dev,
7188 "Using a smaller RX jumbo ring. Only %d "
7189 "out of %d buffers were allocated "
7190 "successfully\n", i, tp->rx_jumbo_pending);
7191 if (i == 0)
7192 goto initfail;
7193 tp->rx_jumbo_pending = i;
7194 break;
7198 done:
7199 return 0;
7201 initfail:
7202 tg3_rx_prodring_free(tp, tpr);
7203 return -ENOMEM;
7206 static void tg3_rx_prodring_fini(struct tg3 *tp,
7207 struct tg3_rx_prodring_set *tpr)
7209 kfree(tpr->rx_std_buffers);
7210 tpr->rx_std_buffers = NULL;
7211 kfree(tpr->rx_jmb_buffers);
7212 tpr->rx_jmb_buffers = NULL;
7213 if (tpr->rx_std) {
7214 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7215 tpr->rx_std, tpr->rx_std_mapping);
7216 tpr->rx_std = NULL;
7218 if (tpr->rx_jmb) {
7219 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7220 tpr->rx_jmb, tpr->rx_jmb_mapping);
7221 tpr->rx_jmb = NULL;
7225 static int tg3_rx_prodring_init(struct tg3 *tp,
7226 struct tg3_rx_prodring_set *tpr)
7228 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7229 GFP_KERNEL);
7230 if (!tpr->rx_std_buffers)
7231 return -ENOMEM;
7233 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7234 TG3_RX_STD_RING_BYTES(tp),
7235 &tpr->rx_std_mapping,
7236 GFP_KERNEL);
7237 if (!tpr->rx_std)
7238 goto err_out;
7240 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7241 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7242 GFP_KERNEL);
7243 if (!tpr->rx_jmb_buffers)
7244 goto err_out;
7246 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7247 TG3_RX_JMB_RING_BYTES(tp),
7248 &tpr->rx_jmb_mapping,
7249 GFP_KERNEL);
7250 if (!tpr->rx_jmb)
7251 goto err_out;
7254 return 0;
7256 err_out:
7257 tg3_rx_prodring_fini(tp, tpr);
7258 return -ENOMEM;
7261 /* Free up pending packets in all rx/tx rings.
7263 * The chip has been shut down and the driver detached from
7264 * the networking, so no interrupts or new tx packets will
7265 * end up in the driver. tp->{tx,}lock is not held and we are not
7266 * in an interrupt context and thus may sleep.
7268 static void tg3_free_rings(struct tg3 *tp)
7270 int i, j;
7272 for (j = 0; j < tp->irq_cnt; j++) {
7273 struct tg3_napi *tnapi = &tp->napi[j];
7275 tg3_rx_prodring_free(tp, &tnapi->prodring);
7277 if (!tnapi->tx_buffers)
7278 continue;
7280 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7281 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7283 if (!skb)
7284 continue;
7286 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
7288 dev_kfree_skb_any(skb);
7293 /* Initialize tx/rx rings for packet processing.
7295 * The chip has been shut down and the driver detached from
7296 * the networking, so no interrupts or new tx packets will
7297 * end up in the driver. tp->{tx,}lock are held and thus
7298 * we may not sleep.
7300 static int tg3_init_rings(struct tg3 *tp)
7302 int i;
7304 /* Free up all the SKBs. */
7305 tg3_free_rings(tp);
7307 for (i = 0; i < tp->irq_cnt; i++) {
7308 struct tg3_napi *tnapi = &tp->napi[i];
7310 tnapi->last_tag = 0;
7311 tnapi->last_irq_tag = 0;
7312 tnapi->hw_status->status = 0;
7313 tnapi->hw_status->status_tag = 0;
7314 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7316 tnapi->tx_prod = 0;
7317 tnapi->tx_cons = 0;
7318 if (tnapi->tx_ring)
7319 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7321 tnapi->rx_rcb_ptr = 0;
7322 if (tnapi->rx_rcb)
7323 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7325 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7326 tg3_free_rings(tp);
7327 return -ENOMEM;
7331 return 0;
7335 * Must not be invoked with interrupt sources disabled and
7336 * the hardware shutdown down.
7338 static void tg3_free_consistent(struct tg3 *tp)
7340 int i;
7342 for (i = 0; i < tp->irq_cnt; i++) {
7343 struct tg3_napi *tnapi = &tp->napi[i];
7345 if (tnapi->tx_ring) {
7346 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7347 tnapi->tx_ring, tnapi->tx_desc_mapping);
7348 tnapi->tx_ring = NULL;
7351 kfree(tnapi->tx_buffers);
7352 tnapi->tx_buffers = NULL;
7354 if (tnapi->rx_rcb) {
7355 dma_free_coherent(&tp->pdev->dev,
7356 TG3_RX_RCB_RING_BYTES(tp),
7357 tnapi->rx_rcb,
7358 tnapi->rx_rcb_mapping);
7359 tnapi->rx_rcb = NULL;
7362 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7364 if (tnapi->hw_status) {
7365 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7366 tnapi->hw_status,
7367 tnapi->status_mapping);
7368 tnapi->hw_status = NULL;
7372 if (tp->hw_stats) {
7373 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7374 tp->hw_stats, tp->stats_mapping);
7375 tp->hw_stats = NULL;
7380 * Must not be invoked with interrupt sources disabled and
7381 * the hardware shutdown down. Can sleep.
7383 static int tg3_alloc_consistent(struct tg3 *tp)
7385 int i;
7387 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7388 sizeof(struct tg3_hw_stats),
7389 &tp->stats_mapping,
7390 GFP_KERNEL);
7391 if (!tp->hw_stats)
7392 goto err_out;
7394 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7396 for (i = 0; i < tp->irq_cnt; i++) {
7397 struct tg3_napi *tnapi = &tp->napi[i];
7398 struct tg3_hw_status *sblk;
7400 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7401 TG3_HW_STATUS_SIZE,
7402 &tnapi->status_mapping,
7403 GFP_KERNEL);
7404 if (!tnapi->hw_status)
7405 goto err_out;
7407 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7408 sblk = tnapi->hw_status;
7410 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7411 goto err_out;
7413 /* If multivector TSS is enabled, vector 0 does not handle
7414 * tx interrupts. Don't allocate any resources for it.
7416 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7417 (i && tg3_flag(tp, ENABLE_TSS))) {
7418 tnapi->tx_buffers = kzalloc(
7419 sizeof(struct tg3_tx_ring_info) *
7420 TG3_TX_RING_SIZE, GFP_KERNEL);
7421 if (!tnapi->tx_buffers)
7422 goto err_out;
7424 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7425 TG3_TX_RING_BYTES,
7426 &tnapi->tx_desc_mapping,
7427 GFP_KERNEL);
7428 if (!tnapi->tx_ring)
7429 goto err_out;
7433 * When RSS is enabled, the status block format changes
7434 * slightly. The "rx_jumbo_consumer", "reserved",
7435 * and "rx_mini_consumer" members get mapped to the
7436 * other three rx return ring producer indexes.
7438 switch (i) {
7439 default:
7440 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7441 break;
7442 case 2:
7443 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7444 break;
7445 case 3:
7446 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7447 break;
7448 case 4:
7449 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7450 break;
7454 * If multivector RSS is enabled, vector 0 does not handle
7455 * rx or tx interrupts. Don't allocate any resources for it.
7457 if (!i && tg3_flag(tp, ENABLE_RSS))
7458 continue;
7460 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7461 TG3_RX_RCB_RING_BYTES(tp),
7462 &tnapi->rx_rcb_mapping,
7463 GFP_KERNEL);
7464 if (!tnapi->rx_rcb)
7465 goto err_out;
7467 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7470 return 0;
7472 err_out:
7473 tg3_free_consistent(tp);
7474 return -ENOMEM;
7477 #define MAX_WAIT_CNT 1000
7479 /* To stop a block, clear the enable bit and poll till it
7480 * clears. tp->lock is held.
7482 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7484 unsigned int i;
7485 u32 val;
7487 if (tg3_flag(tp, 5705_PLUS)) {
7488 switch (ofs) {
7489 case RCVLSC_MODE:
7490 case DMAC_MODE:
7491 case MBFREE_MODE:
7492 case BUFMGR_MODE:
7493 case MEMARB_MODE:
7494 /* We can't enable/disable these bits of the
7495 * 5705/5750, just say success.
7497 return 0;
7499 default:
7500 break;
7504 val = tr32(ofs);
7505 val &= ~enable_bit;
7506 tw32_f(ofs, val);
7508 for (i = 0; i < MAX_WAIT_CNT; i++) {
7509 udelay(100);
7510 val = tr32(ofs);
7511 if ((val & enable_bit) == 0)
7512 break;
7515 if (i == MAX_WAIT_CNT && !silent) {
7516 dev_err(&tp->pdev->dev,
7517 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7518 ofs, enable_bit);
7519 return -ENODEV;
7522 return 0;
7525 /* tp->lock is held. */
7526 static int tg3_abort_hw(struct tg3 *tp, int silent)
7528 int i, err;
7530 tg3_disable_ints(tp);
7532 tp->rx_mode &= ~RX_MODE_ENABLE;
7533 tw32_f(MAC_RX_MODE, tp->rx_mode);
7534 udelay(10);
7536 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7537 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7538 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7539 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7540 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7541 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7543 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7544 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7545 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7546 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7547 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7548 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7549 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7551 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7552 tw32_f(MAC_MODE, tp->mac_mode);
7553 udelay(40);
7555 tp->tx_mode &= ~TX_MODE_ENABLE;
7556 tw32_f(MAC_TX_MODE, tp->tx_mode);
7558 for (i = 0; i < MAX_WAIT_CNT; i++) {
7559 udelay(100);
7560 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7561 break;
7563 if (i >= MAX_WAIT_CNT) {
7564 dev_err(&tp->pdev->dev,
7565 "%s timed out, TX_MODE_ENABLE will not clear "
7566 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7567 err |= -ENODEV;
7570 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7571 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7572 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7574 tw32(FTQ_RESET, 0xffffffff);
7575 tw32(FTQ_RESET, 0x00000000);
7577 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7578 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7580 for (i = 0; i < tp->irq_cnt; i++) {
7581 struct tg3_napi *tnapi = &tp->napi[i];
7582 if (tnapi->hw_status)
7583 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7585 if (tp->hw_stats)
7586 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7588 return err;
7591 /* Save PCI command register before chip reset */
7592 static void tg3_save_pci_state(struct tg3 *tp)
7594 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7597 /* Restore PCI state after chip reset */
7598 static void tg3_restore_pci_state(struct tg3 *tp)
7600 u32 val;
7602 /* Re-enable indirect register accesses. */
7603 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7604 tp->misc_host_ctrl);
7606 /* Set MAX PCI retry to zero. */
7607 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7608 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7609 tg3_flag(tp, PCIX_MODE))
7610 val |= PCISTATE_RETRY_SAME_DMA;
7611 /* Allow reads and writes to the APE register and memory space. */
7612 if (tg3_flag(tp, ENABLE_APE))
7613 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7614 PCISTATE_ALLOW_APE_SHMEM_WR |
7615 PCISTATE_ALLOW_APE_PSPACE_WR;
7616 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7618 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7620 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7621 if (tg3_flag(tp, PCI_EXPRESS))
7622 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7623 else {
7624 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7625 tp->pci_cacheline_sz);
7626 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7627 tp->pci_lat_timer);
7631 /* Make sure PCI-X relaxed ordering bit is clear. */
7632 if (tg3_flag(tp, PCIX_MODE)) {
7633 u16 pcix_cmd;
7635 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7636 &pcix_cmd);
7637 pcix_cmd &= ~PCI_X_CMD_ERO;
7638 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7639 pcix_cmd);
7642 if (tg3_flag(tp, 5780_CLASS)) {
7644 /* Chip reset on 5780 will reset MSI enable bit,
7645 * so need to restore it.
7647 if (tg3_flag(tp, USING_MSI)) {
7648 u16 ctrl;
7650 pci_read_config_word(tp->pdev,
7651 tp->msi_cap + PCI_MSI_FLAGS,
7652 &ctrl);
7653 pci_write_config_word(tp->pdev,
7654 tp->msi_cap + PCI_MSI_FLAGS,
7655 ctrl | PCI_MSI_FLAGS_ENABLE);
7656 val = tr32(MSGINT_MODE);
7657 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7662 /* tp->lock is held. */
7663 static int tg3_chip_reset(struct tg3 *tp)
7665 u32 val;
7666 void (*write_op)(struct tg3 *, u32, u32);
7667 int i, err;
7669 tg3_nvram_lock(tp);
7671 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7673 /* No matching tg3_nvram_unlock() after this because
7674 * chip reset below will undo the nvram lock.
7676 tp->nvram_lock_cnt = 0;
7678 /* GRC_MISC_CFG core clock reset will clear the memory
7679 * enable bit in PCI register 4 and the MSI enable bit
7680 * on some chips, so we save relevant registers here.
7682 tg3_save_pci_state(tp);
7684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7685 tg3_flag(tp, 5755_PLUS))
7686 tw32(GRC_FASTBOOT_PC, 0);
7689 * We must avoid the readl() that normally takes place.
7690 * It locks machines, causes machine checks, and other
7691 * fun things. So, temporarily disable the 5701
7692 * hardware workaround, while we do the reset.
7694 write_op = tp->write32;
7695 if (write_op == tg3_write_flush_reg32)
7696 tp->write32 = tg3_write32;
7698 /* Prevent the irq handler from reading or writing PCI registers
7699 * during chip reset when the memory enable bit in the PCI command
7700 * register may be cleared. The chip does not generate interrupt
7701 * at this time, but the irq handler may still be called due to irq
7702 * sharing or irqpoll.
7704 tg3_flag_set(tp, CHIP_RESETTING);
7705 for (i = 0; i < tp->irq_cnt; i++) {
7706 struct tg3_napi *tnapi = &tp->napi[i];
7707 if (tnapi->hw_status) {
7708 tnapi->hw_status->status = 0;
7709 tnapi->hw_status->status_tag = 0;
7711 tnapi->last_tag = 0;
7712 tnapi->last_irq_tag = 0;
7714 smp_mb();
7716 for (i = 0; i < tp->irq_cnt; i++)
7717 synchronize_irq(tp->napi[i].irq_vec);
7719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7720 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7721 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7724 /* do the reset */
7725 val = GRC_MISC_CFG_CORECLK_RESET;
7727 if (tg3_flag(tp, PCI_EXPRESS)) {
7728 /* Force PCIe 1.0a mode */
7729 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7730 !tg3_flag(tp, 57765_PLUS) &&
7731 tr32(TG3_PCIE_PHY_TSTCTL) ==
7732 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7733 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7735 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7736 tw32(GRC_MISC_CFG, (1 << 29));
7737 val |= (1 << 29);
7741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7742 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7743 tw32(GRC_VCPU_EXT_CTRL,
7744 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7747 /* Manage gphy power for all CPMU absent PCIe devices. */
7748 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7749 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7751 tw32(GRC_MISC_CFG, val);
7753 /* restore 5701 hardware bug workaround write method */
7754 tp->write32 = write_op;
7756 /* Unfortunately, we have to delay before the PCI read back.
7757 * Some 575X chips even will not respond to a PCI cfg access
7758 * when the reset command is given to the chip.
7760 * How do these hardware designers expect things to work
7761 * properly if the PCI write is posted for a long period
7762 * of time? It is always necessary to have some method by
7763 * which a register read back can occur to push the write
7764 * out which does the reset.
7766 * For most tg3 variants the trick below was working.
7767 * Ho hum...
7769 udelay(120);
7771 /* Flush PCI posted writes. The normal MMIO registers
7772 * are inaccessible at this time so this is the only
7773 * way to make this reliably (actually, this is no longer
7774 * the case, see above). I tried to use indirect
7775 * register read/write but this upset some 5701 variants.
7777 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7779 udelay(120);
7781 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7782 u16 val16;
7784 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7785 int i;
7786 u32 cfg_val;
7788 /* Wait for link training to complete. */
7789 for (i = 0; i < 5000; i++)
7790 udelay(100);
7792 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7793 pci_write_config_dword(tp->pdev, 0xc4,
7794 cfg_val | (1 << 15));
7797 /* Clear the "no snoop" and "relaxed ordering" bits. */
7798 pci_read_config_word(tp->pdev,
7799 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7800 &val16);
7801 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7802 PCI_EXP_DEVCTL_NOSNOOP_EN);
7804 * Older PCIe devices only support the 128 byte
7805 * MPS setting. Enforce the restriction.
7807 if (!tg3_flag(tp, CPMU_PRESENT))
7808 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7809 pci_write_config_word(tp->pdev,
7810 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7811 val16);
7813 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7815 /* Clear error status */
7816 pci_write_config_word(tp->pdev,
7817 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7818 PCI_EXP_DEVSTA_CED |
7819 PCI_EXP_DEVSTA_NFED |
7820 PCI_EXP_DEVSTA_FED |
7821 PCI_EXP_DEVSTA_URD);
7824 tg3_restore_pci_state(tp);
7826 tg3_flag_clear(tp, CHIP_RESETTING);
7827 tg3_flag_clear(tp, ERROR_PROCESSED);
7829 val = 0;
7830 if (tg3_flag(tp, 5780_CLASS))
7831 val = tr32(MEMARB_MODE);
7832 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7834 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7835 tg3_stop_fw(tp);
7836 tw32(0x5000, 0x400);
7839 tw32(GRC_MODE, tp->grc_mode);
7841 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7842 val = tr32(0xc4);
7844 tw32(0xc4, val | (1 << 15));
7847 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7849 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7850 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7851 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7852 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7855 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7856 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7857 val = tp->mac_mode;
7858 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7859 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7860 val = tp->mac_mode;
7861 } else
7862 val = 0;
7864 tw32_f(MAC_MODE, val);
7865 udelay(40);
7867 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7869 err = tg3_poll_fw(tp);
7870 if (err)
7871 return err;
7873 tg3_mdio_start(tp);
7875 if (tg3_flag(tp, PCI_EXPRESS) &&
7876 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7877 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7878 !tg3_flag(tp, 57765_PLUS)) {
7879 val = tr32(0x7c00);
7881 tw32(0x7c00, val | (1 << 25));
7884 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7885 val = tr32(TG3_CPMU_CLCK_ORIDE);
7886 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7889 /* Reprobe ASF enable state. */
7890 tg3_flag_clear(tp, ENABLE_ASF);
7891 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7892 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7893 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7894 u32 nic_cfg;
7896 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7897 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7898 tg3_flag_set(tp, ENABLE_ASF);
7899 tp->last_event_jiffies = jiffies;
7900 if (tg3_flag(tp, 5750_PLUS))
7901 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7905 return 0;
7908 /* tp->lock is held. */
7909 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7911 int err;
7913 tg3_stop_fw(tp);
7915 tg3_write_sig_pre_reset(tp, kind);
7917 tg3_abort_hw(tp, silent);
7918 err = tg3_chip_reset(tp);
7920 __tg3_set_mac_addr(tp, 0);
7922 tg3_write_sig_legacy(tp, kind);
7923 tg3_write_sig_post_reset(tp, kind);
7925 if (err)
7926 return err;
7928 return 0;
7931 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7933 struct tg3 *tp = netdev_priv(dev);
7934 struct sockaddr *addr = p;
7935 int err = 0, skip_mac_1 = 0;
7937 if (!is_valid_ether_addr(addr->sa_data))
7938 return -EINVAL;
7940 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7942 if (!netif_running(dev))
7943 return 0;
7945 if (tg3_flag(tp, ENABLE_ASF)) {
7946 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7948 addr0_high = tr32(MAC_ADDR_0_HIGH);
7949 addr0_low = tr32(MAC_ADDR_0_LOW);
7950 addr1_high = tr32(MAC_ADDR_1_HIGH);
7951 addr1_low = tr32(MAC_ADDR_1_LOW);
7953 /* Skip MAC addr 1 if ASF is using it. */
7954 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7955 !(addr1_high == 0 && addr1_low == 0))
7956 skip_mac_1 = 1;
7958 spin_lock_bh(&tp->lock);
7959 __tg3_set_mac_addr(tp, skip_mac_1);
7960 spin_unlock_bh(&tp->lock);
7962 return err;
7965 /* tp->lock is held. */
7966 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7967 dma_addr_t mapping, u32 maxlen_flags,
7968 u32 nic_addr)
7970 tg3_write_mem(tp,
7971 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7972 ((u64) mapping >> 32));
7973 tg3_write_mem(tp,
7974 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7975 ((u64) mapping & 0xffffffff));
7976 tg3_write_mem(tp,
7977 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7978 maxlen_flags);
7980 if (!tg3_flag(tp, 5705_PLUS))
7981 tg3_write_mem(tp,
7982 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7983 nic_addr);
7986 static void __tg3_set_rx_mode(struct net_device *);
7987 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7989 int i;
7991 if (!tg3_flag(tp, ENABLE_TSS)) {
7992 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7993 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7994 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7995 } else {
7996 tw32(HOSTCC_TXCOL_TICKS, 0);
7997 tw32(HOSTCC_TXMAX_FRAMES, 0);
7998 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8001 if (!tg3_flag(tp, ENABLE_RSS)) {
8002 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8003 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8004 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8005 } else {
8006 tw32(HOSTCC_RXCOL_TICKS, 0);
8007 tw32(HOSTCC_RXMAX_FRAMES, 0);
8008 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8011 if (!tg3_flag(tp, 5705_PLUS)) {
8012 u32 val = ec->stats_block_coalesce_usecs;
8014 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8015 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8017 if (!netif_carrier_ok(tp->dev))
8018 val = 0;
8020 tw32(HOSTCC_STAT_COAL_TICKS, val);
8023 for (i = 0; i < tp->irq_cnt - 1; i++) {
8024 u32 reg;
8026 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8027 tw32(reg, ec->rx_coalesce_usecs);
8028 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8029 tw32(reg, ec->rx_max_coalesced_frames);
8030 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8031 tw32(reg, ec->rx_max_coalesced_frames_irq);
8033 if (tg3_flag(tp, ENABLE_TSS)) {
8034 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8035 tw32(reg, ec->tx_coalesce_usecs);
8036 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8037 tw32(reg, ec->tx_max_coalesced_frames);
8038 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8039 tw32(reg, ec->tx_max_coalesced_frames_irq);
8043 for (; i < tp->irq_max - 1; i++) {
8044 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8045 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8046 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8048 if (tg3_flag(tp, ENABLE_TSS)) {
8049 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8050 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8051 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8056 /* tp->lock is held. */
8057 static void tg3_rings_reset(struct tg3 *tp)
8059 int i;
8060 u32 stblk, txrcb, rxrcb, limit;
8061 struct tg3_napi *tnapi = &tp->napi[0];
8063 /* Disable all transmit rings but the first. */
8064 if (!tg3_flag(tp, 5705_PLUS))
8065 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8066 else if (tg3_flag(tp, 5717_PLUS))
8067 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8068 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8069 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8070 else
8071 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8073 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8074 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8075 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8076 BDINFO_FLAGS_DISABLED);
8079 /* Disable all receive return rings but the first. */
8080 if (tg3_flag(tp, 5717_PLUS))
8081 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8082 else if (!tg3_flag(tp, 5705_PLUS))
8083 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8084 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8085 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8086 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8087 else
8088 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8090 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8091 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8092 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8093 BDINFO_FLAGS_DISABLED);
8095 /* Disable interrupts */
8096 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8097 tp->napi[0].chk_msi_cnt = 0;
8098 tp->napi[0].last_rx_cons = 0;
8099 tp->napi[0].last_tx_cons = 0;
8101 /* Zero mailbox registers. */
8102 if (tg3_flag(tp, SUPPORT_MSIX)) {
8103 for (i = 1; i < tp->irq_max; i++) {
8104 tp->napi[i].tx_prod = 0;
8105 tp->napi[i].tx_cons = 0;
8106 if (tg3_flag(tp, ENABLE_TSS))
8107 tw32_mailbox(tp->napi[i].prodmbox, 0);
8108 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8109 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8110 tp->napi[i].chk_msi_cnt = 0;
8111 tp->napi[i].last_rx_cons = 0;
8112 tp->napi[i].last_tx_cons = 0;
8114 if (!tg3_flag(tp, ENABLE_TSS))
8115 tw32_mailbox(tp->napi[0].prodmbox, 0);
8116 } else {
8117 tp->napi[0].tx_prod = 0;
8118 tp->napi[0].tx_cons = 0;
8119 tw32_mailbox(tp->napi[0].prodmbox, 0);
8120 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8123 /* Make sure the NIC-based send BD rings are disabled. */
8124 if (!tg3_flag(tp, 5705_PLUS)) {
8125 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8126 for (i = 0; i < 16; i++)
8127 tw32_tx_mbox(mbox + i * 8, 0);
8130 txrcb = NIC_SRAM_SEND_RCB;
8131 rxrcb = NIC_SRAM_RCV_RET_RCB;
8133 /* Clear status block in ram. */
8134 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8136 /* Set status block DMA address */
8137 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8138 ((u64) tnapi->status_mapping >> 32));
8139 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8140 ((u64) tnapi->status_mapping & 0xffffffff));
8142 if (tnapi->tx_ring) {
8143 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8144 (TG3_TX_RING_SIZE <<
8145 BDINFO_FLAGS_MAXLEN_SHIFT),
8146 NIC_SRAM_TX_BUFFER_DESC);
8147 txrcb += TG3_BDINFO_SIZE;
8150 if (tnapi->rx_rcb) {
8151 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8152 (tp->rx_ret_ring_mask + 1) <<
8153 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8154 rxrcb += TG3_BDINFO_SIZE;
8157 stblk = HOSTCC_STATBLCK_RING1;
8159 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8160 u64 mapping = (u64)tnapi->status_mapping;
8161 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8162 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8164 /* Clear status block in ram. */
8165 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8167 if (tnapi->tx_ring) {
8168 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8169 (TG3_TX_RING_SIZE <<
8170 BDINFO_FLAGS_MAXLEN_SHIFT),
8171 NIC_SRAM_TX_BUFFER_DESC);
8172 txrcb += TG3_BDINFO_SIZE;
8175 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8176 ((tp->rx_ret_ring_mask + 1) <<
8177 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8179 stblk += 8;
8180 rxrcb += TG3_BDINFO_SIZE;
8184 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8186 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8188 if (!tg3_flag(tp, 5750_PLUS) ||
8189 tg3_flag(tp, 5780_CLASS) ||
8190 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8191 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8192 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8193 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8195 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8196 else
8197 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8199 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8200 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8202 val = min(nic_rep_thresh, host_rep_thresh);
8203 tw32(RCVBDI_STD_THRESH, val);
8205 if (tg3_flag(tp, 57765_PLUS))
8206 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8208 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8209 return;
8211 if (!tg3_flag(tp, 5705_PLUS))
8212 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8213 else
8214 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8216 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8218 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8219 tw32(RCVBDI_JUMBO_THRESH, val);
8221 if (tg3_flag(tp, 57765_PLUS))
8222 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8225 /* tp->lock is held. */
8226 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8228 u32 val, rdmac_mode;
8229 int i, err, limit;
8230 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8232 tg3_disable_ints(tp);
8234 tg3_stop_fw(tp);
8236 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8238 if (tg3_flag(tp, INIT_COMPLETE))
8239 tg3_abort_hw(tp, 1);
8241 /* Enable MAC control of LPI */
8242 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8243 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8244 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8245 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8247 tw32_f(TG3_CPMU_EEE_CTRL,
8248 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8250 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8251 TG3_CPMU_EEEMD_LPI_IN_TX |
8252 TG3_CPMU_EEEMD_LPI_IN_RX |
8253 TG3_CPMU_EEEMD_EEE_ENABLE;
8255 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8256 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8258 if (tg3_flag(tp, ENABLE_APE))
8259 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8261 tw32_f(TG3_CPMU_EEE_MODE, val);
8263 tw32_f(TG3_CPMU_EEE_DBTMR1,
8264 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8265 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8267 tw32_f(TG3_CPMU_EEE_DBTMR2,
8268 TG3_CPMU_DBTMR2_APE_TX_2047US |
8269 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8272 if (reset_phy)
8273 tg3_phy_reset(tp);
8275 err = tg3_chip_reset(tp);
8276 if (err)
8277 return err;
8279 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8281 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8282 val = tr32(TG3_CPMU_CTRL);
8283 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8284 tw32(TG3_CPMU_CTRL, val);
8286 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8287 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8288 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8289 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8291 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8292 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8293 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8294 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8296 val = tr32(TG3_CPMU_HST_ACC);
8297 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8298 val |= CPMU_HST_ACC_MACCLK_6_25;
8299 tw32(TG3_CPMU_HST_ACC, val);
8302 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8303 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8304 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8305 PCIE_PWR_MGMT_L1_THRESH_4MS;
8306 tw32(PCIE_PWR_MGMT_THRESH, val);
8308 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8309 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8311 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8313 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8314 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8317 if (tg3_flag(tp, L1PLLPD_EN)) {
8318 u32 grc_mode = tr32(GRC_MODE);
8320 /* Access the lower 1K of PL PCIE block registers. */
8321 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8322 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8324 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8325 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8326 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8328 tw32(GRC_MODE, grc_mode);
8331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8332 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8333 u32 grc_mode = tr32(GRC_MODE);
8335 /* Access the lower 1K of PL PCIE block registers. */
8336 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8337 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8339 val = tr32(TG3_PCIE_TLDLPL_PORT +
8340 TG3_PCIE_PL_LO_PHYCTL5);
8341 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8342 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8344 tw32(GRC_MODE, grc_mode);
8347 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8348 u32 grc_mode = tr32(GRC_MODE);
8350 /* Access the lower 1K of DL PCIE block registers. */
8351 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8352 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8354 val = tr32(TG3_PCIE_TLDLPL_PORT +
8355 TG3_PCIE_DL_LO_FTSMAX);
8356 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8357 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8358 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8360 tw32(GRC_MODE, grc_mode);
8363 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8364 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8365 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8366 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8369 /* This works around an issue with Athlon chipsets on
8370 * B3 tigon3 silicon. This bit has no effect on any
8371 * other revision. But do not set this on PCI Express
8372 * chips and don't even touch the clocks if the CPMU is present.
8374 if (!tg3_flag(tp, CPMU_PRESENT)) {
8375 if (!tg3_flag(tp, PCI_EXPRESS))
8376 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8377 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8380 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8381 tg3_flag(tp, PCIX_MODE)) {
8382 val = tr32(TG3PCI_PCISTATE);
8383 val |= PCISTATE_RETRY_SAME_DMA;
8384 tw32(TG3PCI_PCISTATE, val);
8387 if (tg3_flag(tp, ENABLE_APE)) {
8388 /* Allow reads and writes to the
8389 * APE register and memory space.
8391 val = tr32(TG3PCI_PCISTATE);
8392 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8393 PCISTATE_ALLOW_APE_SHMEM_WR |
8394 PCISTATE_ALLOW_APE_PSPACE_WR;
8395 tw32(TG3PCI_PCISTATE, val);
8398 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8399 /* Enable some hw fixes. */
8400 val = tr32(TG3PCI_MSI_DATA);
8401 val |= (1 << 26) | (1 << 28) | (1 << 29);
8402 tw32(TG3PCI_MSI_DATA, val);
8405 /* Descriptor ring init may make accesses to the
8406 * NIC SRAM area to setup the TX descriptors, so we
8407 * can only do this after the hardware has been
8408 * successfully reset.
8410 err = tg3_init_rings(tp);
8411 if (err)
8412 return err;
8414 if (tg3_flag(tp, 57765_PLUS)) {
8415 val = tr32(TG3PCI_DMA_RW_CTRL) &
8416 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8417 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8418 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8419 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8420 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8421 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8422 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8423 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8424 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8425 /* This value is determined during the probe time DMA
8426 * engine test, tg3_test_dma.
8428 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8431 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8432 GRC_MODE_4X_NIC_SEND_RINGS |
8433 GRC_MODE_NO_TX_PHDR_CSUM |
8434 GRC_MODE_NO_RX_PHDR_CSUM);
8435 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8437 /* Pseudo-header checksum is done by hardware logic and not
8438 * the offload processers, so make the chip do the pseudo-
8439 * header checksums on receive. For transmit it is more
8440 * convenient to do the pseudo-header checksum in software
8441 * as Linux does that on transmit for us in all cases.
8443 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8445 tw32(GRC_MODE,
8446 tp->grc_mode |
8447 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8449 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8450 val = tr32(GRC_MISC_CFG);
8451 val &= ~0xff;
8452 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8453 tw32(GRC_MISC_CFG, val);
8455 /* Initialize MBUF/DESC pool. */
8456 if (tg3_flag(tp, 5750_PLUS)) {
8457 /* Do nothing. */
8458 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8459 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8461 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8462 else
8463 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8464 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8465 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8466 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8467 int fw_len;
8469 fw_len = tp->fw_len;
8470 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8471 tw32(BUFMGR_MB_POOL_ADDR,
8472 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8473 tw32(BUFMGR_MB_POOL_SIZE,
8474 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8477 if (tp->dev->mtu <= ETH_DATA_LEN) {
8478 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8479 tp->bufmgr_config.mbuf_read_dma_low_water);
8480 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8481 tp->bufmgr_config.mbuf_mac_rx_low_water);
8482 tw32(BUFMGR_MB_HIGH_WATER,
8483 tp->bufmgr_config.mbuf_high_water);
8484 } else {
8485 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8486 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8487 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8488 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8489 tw32(BUFMGR_MB_HIGH_WATER,
8490 tp->bufmgr_config.mbuf_high_water_jumbo);
8492 tw32(BUFMGR_DMA_LOW_WATER,
8493 tp->bufmgr_config.dma_low_water);
8494 tw32(BUFMGR_DMA_HIGH_WATER,
8495 tp->bufmgr_config.dma_high_water);
8497 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8499 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8500 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8501 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8502 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8503 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8504 tw32(BUFMGR_MODE, val);
8505 for (i = 0; i < 2000; i++) {
8506 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8507 break;
8508 udelay(10);
8510 if (i >= 2000) {
8511 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8512 return -ENODEV;
8515 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8516 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8518 tg3_setup_rxbd_thresholds(tp);
8520 /* Initialize TG3_BDINFO's at:
8521 * RCVDBDI_STD_BD: standard eth size rx ring
8522 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8523 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8525 * like so:
8526 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8527 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8528 * ring attribute flags
8529 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8531 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8532 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8534 * The size of each ring is fixed in the firmware, but the location is
8535 * configurable.
8537 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8538 ((u64) tpr->rx_std_mapping >> 32));
8539 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8540 ((u64) tpr->rx_std_mapping & 0xffffffff));
8541 if (!tg3_flag(tp, 5717_PLUS))
8542 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8543 NIC_SRAM_RX_BUFFER_DESC);
8545 /* Disable the mini ring */
8546 if (!tg3_flag(tp, 5705_PLUS))
8547 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8548 BDINFO_FLAGS_DISABLED);
8550 /* Program the jumbo buffer descriptor ring control
8551 * blocks on those devices that have them.
8553 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8554 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8556 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8557 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8558 ((u64) tpr->rx_jmb_mapping >> 32));
8559 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8560 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8561 val = TG3_RX_JMB_RING_SIZE(tp) <<
8562 BDINFO_FLAGS_MAXLEN_SHIFT;
8563 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8564 val | BDINFO_FLAGS_USE_EXT_RECV);
8565 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8566 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8567 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8568 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8569 } else {
8570 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8571 BDINFO_FLAGS_DISABLED);
8574 if (tg3_flag(tp, 57765_PLUS)) {
8575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8576 val = TG3_RX_STD_MAX_SIZE_5700;
8577 else
8578 val = TG3_RX_STD_MAX_SIZE_5717;
8579 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8580 val |= (TG3_RX_STD_DMA_SZ << 2);
8581 } else
8582 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8583 } else
8584 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8586 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8588 tpr->rx_std_prod_idx = tp->rx_pending;
8589 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8591 tpr->rx_jmb_prod_idx =
8592 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8593 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8595 tg3_rings_reset(tp);
8597 /* Initialize MAC address and backoff seed. */
8598 __tg3_set_mac_addr(tp, 0);
8600 /* MTU + ethernet header + FCS + optional VLAN tag */
8601 tw32(MAC_RX_MTU_SIZE,
8602 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8604 /* The slot time is changed by tg3_setup_phy if we
8605 * run at gigabit with half duplex.
8607 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8608 (6 << TX_LENGTHS_IPG_SHIFT) |
8609 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8612 val |= tr32(MAC_TX_LENGTHS) &
8613 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8614 TX_LENGTHS_CNT_DWN_VAL_MSK);
8616 tw32(MAC_TX_LENGTHS, val);
8618 /* Receive rules. */
8619 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8620 tw32(RCVLPC_CONFIG, 0x0181);
8622 /* Calculate RDMAC_MODE setting early, we need it to determine
8623 * the RCVLPC_STATE_ENABLE mask.
8625 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8626 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8627 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8628 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8629 RDMAC_MODE_LNGREAD_ENAB);
8631 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8632 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8637 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8638 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8639 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8642 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8643 if (tg3_flag(tp, TSO_CAPABLE) &&
8644 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8645 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8646 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8647 !tg3_flag(tp, IS_5788)) {
8648 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8652 if (tg3_flag(tp, PCI_EXPRESS))
8653 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8655 if (tg3_flag(tp, HW_TSO_1) ||
8656 tg3_flag(tp, HW_TSO_2) ||
8657 tg3_flag(tp, HW_TSO_3))
8658 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8660 if (tg3_flag(tp, 57765_PLUS) ||
8661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8662 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8663 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8666 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8670 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8672 tg3_flag(tp, 57765_PLUS)) {
8673 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8676 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8677 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8678 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8679 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8680 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8681 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8683 tw32(TG3_RDMA_RSRVCTRL_REG,
8684 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8689 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8690 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8691 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8692 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8695 /* Receive/send statistics. */
8696 if (tg3_flag(tp, 5750_PLUS)) {
8697 val = tr32(RCVLPC_STATS_ENABLE);
8698 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8699 tw32(RCVLPC_STATS_ENABLE, val);
8700 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8701 tg3_flag(tp, TSO_CAPABLE)) {
8702 val = tr32(RCVLPC_STATS_ENABLE);
8703 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8704 tw32(RCVLPC_STATS_ENABLE, val);
8705 } else {
8706 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8708 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8709 tw32(SNDDATAI_STATSENAB, 0xffffff);
8710 tw32(SNDDATAI_STATSCTRL,
8711 (SNDDATAI_SCTRL_ENABLE |
8712 SNDDATAI_SCTRL_FASTUPD));
8714 /* Setup host coalescing engine. */
8715 tw32(HOSTCC_MODE, 0);
8716 for (i = 0; i < 2000; i++) {
8717 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8718 break;
8719 udelay(10);
8722 __tg3_set_coalesce(tp, &tp->coal);
8724 if (!tg3_flag(tp, 5705_PLUS)) {
8725 /* Status/statistics block address. See tg3_timer,
8726 * the tg3_periodic_fetch_stats call there, and
8727 * tg3_get_stats to see how this works for 5705/5750 chips.
8729 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8730 ((u64) tp->stats_mapping >> 32));
8731 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8732 ((u64) tp->stats_mapping & 0xffffffff));
8733 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8735 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8737 /* Clear statistics and status block memory areas */
8738 for (i = NIC_SRAM_STATS_BLK;
8739 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8740 i += sizeof(u32)) {
8741 tg3_write_mem(tp, i, 0);
8742 udelay(40);
8746 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8748 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8749 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8750 if (!tg3_flag(tp, 5705_PLUS))
8751 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8753 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8754 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8755 /* reset to prevent losing 1st rx packet intermittently */
8756 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8757 udelay(10);
8760 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8761 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8762 MAC_MODE_FHDE_ENABLE;
8763 if (tg3_flag(tp, ENABLE_APE))
8764 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8765 if (!tg3_flag(tp, 5705_PLUS) &&
8766 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8767 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8768 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8769 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8770 udelay(40);
8772 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8773 * If TG3_FLAG_IS_NIC is zero, we should read the
8774 * register to preserve the GPIO settings for LOMs. The GPIOs,
8775 * whether used as inputs or outputs, are set by boot code after
8776 * reset.
8778 if (!tg3_flag(tp, IS_NIC)) {
8779 u32 gpio_mask;
8781 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8782 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8783 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8786 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8787 GRC_LCLCTRL_GPIO_OUTPUT3;
8789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8790 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8792 tp->grc_local_ctrl &= ~gpio_mask;
8793 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8795 /* GPIO1 must be driven high for eeprom write protect */
8796 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8797 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8798 GRC_LCLCTRL_GPIO_OUTPUT1);
8800 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8801 udelay(100);
8803 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8804 val = tr32(MSGINT_MODE);
8805 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8806 if (!tg3_flag(tp, 1SHOT_MSI))
8807 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8808 tw32(MSGINT_MODE, val);
8811 if (!tg3_flag(tp, 5705_PLUS)) {
8812 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8813 udelay(40);
8816 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8817 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8818 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8819 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8820 WDMAC_MODE_LNGREAD_ENAB);
8822 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8823 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8824 if (tg3_flag(tp, TSO_CAPABLE) &&
8825 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8826 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8827 /* nothing */
8828 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8829 !tg3_flag(tp, IS_5788)) {
8830 val |= WDMAC_MODE_RX_ACCEL;
8834 /* Enable host coalescing bug fix */
8835 if (tg3_flag(tp, 5755_PLUS))
8836 val |= WDMAC_MODE_STATUS_TAG_FIX;
8838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8839 val |= WDMAC_MODE_BURST_ALL_DATA;
8841 tw32_f(WDMAC_MODE, val);
8842 udelay(40);
8844 if (tg3_flag(tp, PCIX_MODE)) {
8845 u16 pcix_cmd;
8847 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8848 &pcix_cmd);
8849 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8850 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8851 pcix_cmd |= PCI_X_CMD_READ_2K;
8852 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8853 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8854 pcix_cmd |= PCI_X_CMD_READ_2K;
8856 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8857 pcix_cmd);
8860 tw32_f(RDMAC_MODE, rdmac_mode);
8861 udelay(40);
8863 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8864 if (!tg3_flag(tp, 5705_PLUS))
8865 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8867 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8868 tw32(SNDDATAC_MODE,
8869 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8870 else
8871 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8873 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8874 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8875 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8876 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8877 val |= RCVDBDI_MODE_LRG_RING_SZ;
8878 tw32(RCVDBDI_MODE, val);
8879 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8880 if (tg3_flag(tp, HW_TSO_1) ||
8881 tg3_flag(tp, HW_TSO_2) ||
8882 tg3_flag(tp, HW_TSO_3))
8883 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8884 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8885 if (tg3_flag(tp, ENABLE_TSS))
8886 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8887 tw32(SNDBDI_MODE, val);
8888 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8890 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8891 err = tg3_load_5701_a0_firmware_fix(tp);
8892 if (err)
8893 return err;
8896 if (tg3_flag(tp, TSO_CAPABLE)) {
8897 err = tg3_load_tso_firmware(tp);
8898 if (err)
8899 return err;
8902 tp->tx_mode = TX_MODE_ENABLE;
8904 if (tg3_flag(tp, 5755_PLUS) ||
8905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8906 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8909 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8910 tp->tx_mode &= ~val;
8911 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8914 tw32_f(MAC_TX_MODE, tp->tx_mode);
8915 udelay(100);
8917 if (tg3_flag(tp, ENABLE_RSS)) {
8918 int i = 0;
8919 u32 reg = MAC_RSS_INDIR_TBL_0;
8921 if (tp->irq_cnt == 2) {
8922 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8923 tw32(reg, 0x0);
8924 reg += 4;
8926 } else {
8927 u32 val;
8929 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8930 val = i % (tp->irq_cnt - 1);
8931 i++;
8932 for (; i % 8; i++) {
8933 val <<= 4;
8934 val |= (i % (tp->irq_cnt - 1));
8936 tw32(reg, val);
8937 reg += 4;
8941 /* Setup the "secret" hash key. */
8942 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8943 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8944 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8945 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8946 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8947 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8948 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8949 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8950 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8951 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8954 tp->rx_mode = RX_MODE_ENABLE;
8955 if (tg3_flag(tp, 5755_PLUS))
8956 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8958 if (tg3_flag(tp, ENABLE_RSS))
8959 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8960 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8961 RX_MODE_RSS_IPV6_HASH_EN |
8962 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8963 RX_MODE_RSS_IPV4_HASH_EN |
8964 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8966 tw32_f(MAC_RX_MODE, tp->rx_mode);
8967 udelay(10);
8969 tw32(MAC_LED_CTRL, tp->led_ctrl);
8971 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8972 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8973 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8974 udelay(10);
8976 tw32_f(MAC_RX_MODE, tp->rx_mode);
8977 udelay(10);
8979 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8980 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8981 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8982 /* Set drive transmission level to 1.2V */
8983 /* only if the signal pre-emphasis bit is not set */
8984 val = tr32(MAC_SERDES_CFG);
8985 val &= 0xfffff000;
8986 val |= 0x880;
8987 tw32(MAC_SERDES_CFG, val);
8989 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8990 tw32(MAC_SERDES_CFG, 0x616000);
8993 /* Prevent chip from dropping frames when flow control
8994 * is enabled.
8996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8997 val = 1;
8998 else
8999 val = 2;
9000 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9003 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9004 /* Use hardware link auto-negotiation */
9005 tg3_flag_set(tp, HW_AUTONEG);
9008 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9010 u32 tmp;
9012 tmp = tr32(SERDES_RX_CTRL);
9013 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9014 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9015 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9016 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9019 if (!tg3_flag(tp, USE_PHYLIB)) {
9020 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9021 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9022 tp->link_config.speed = tp->link_config.orig_speed;
9023 tp->link_config.duplex = tp->link_config.orig_duplex;
9024 tp->link_config.autoneg = tp->link_config.orig_autoneg;
9027 err = tg3_setup_phy(tp, 0);
9028 if (err)
9029 return err;
9031 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9032 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9033 u32 tmp;
9035 /* Clear CRC stats. */
9036 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9037 tg3_writephy(tp, MII_TG3_TEST1,
9038 tmp | MII_TG3_TEST1_CRC_EN);
9039 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9044 __tg3_set_rx_mode(tp->dev);
9046 /* Initialize receive rules. */
9047 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9048 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9049 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9050 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9052 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9053 limit = 8;
9054 else
9055 limit = 16;
9056 if (tg3_flag(tp, ENABLE_ASF))
9057 limit -= 4;
9058 switch (limit) {
9059 case 16:
9060 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9061 case 15:
9062 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9063 case 14:
9064 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9065 case 13:
9066 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9067 case 12:
9068 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9069 case 11:
9070 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9071 case 10:
9072 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9073 case 9:
9074 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9075 case 8:
9076 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9077 case 7:
9078 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9079 case 6:
9080 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9081 case 5:
9082 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9083 case 4:
9084 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9085 case 3:
9086 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9087 case 2:
9088 case 1:
9090 default:
9091 break;
9094 if (tg3_flag(tp, ENABLE_APE))
9095 /* Write our heartbeat update interval to APE. */
9096 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9097 APE_HOST_HEARTBEAT_INT_DISABLE);
9099 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9101 return 0;
9104 /* Called at device open time to get the chip ready for
9105 * packet processing. Invoked with tp->lock held.
9107 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9109 tg3_switch_clocks(tp);
9111 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9113 return tg3_reset_hw(tp, reset_phy);
9116 #define TG3_STAT_ADD32(PSTAT, REG) \
9117 do { u32 __val = tr32(REG); \
9118 (PSTAT)->low += __val; \
9119 if ((PSTAT)->low < __val) \
9120 (PSTAT)->high += 1; \
9121 } while (0)
9123 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9125 struct tg3_hw_stats *sp = tp->hw_stats;
9127 if (!netif_carrier_ok(tp->dev))
9128 return;
9130 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9131 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9132 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9133 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9134 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9135 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9136 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9137 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9138 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9139 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9140 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9141 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9142 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9144 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9145 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9146 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9147 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9148 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9149 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9150 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9151 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9152 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9153 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9154 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9155 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9156 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9157 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9159 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9160 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9161 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9162 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9163 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9164 } else {
9165 u32 val = tr32(HOSTCC_FLOW_ATTN);
9166 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9167 if (val) {
9168 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9169 sp->rx_discards.low += val;
9170 if (sp->rx_discards.low < val)
9171 sp->rx_discards.high += 1;
9173 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9175 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9178 static void tg3_chk_missed_msi(struct tg3 *tp)
9180 u32 i;
9182 for (i = 0; i < tp->irq_cnt; i++) {
9183 struct tg3_napi *tnapi = &tp->napi[i];
9185 if (tg3_has_work(tnapi)) {
9186 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9187 tnapi->last_tx_cons == tnapi->tx_cons) {
9188 if (tnapi->chk_msi_cnt < 1) {
9189 tnapi->chk_msi_cnt++;
9190 return;
9192 tg3_msi(0, tnapi);
9195 tnapi->chk_msi_cnt = 0;
9196 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9197 tnapi->last_tx_cons = tnapi->tx_cons;
9201 static void tg3_timer(unsigned long __opaque)
9203 struct tg3 *tp = (struct tg3 *) __opaque;
9205 if (tp->irq_sync)
9206 goto restart_timer;
9208 spin_lock(&tp->lock);
9210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9212 tg3_chk_missed_msi(tp);
9214 if (!tg3_flag(tp, TAGGED_STATUS)) {
9215 /* All of this garbage is because when using non-tagged
9216 * IRQ status the mailbox/status_block protocol the chip
9217 * uses with the cpu is race prone.
9219 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9220 tw32(GRC_LOCAL_CTRL,
9221 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9222 } else {
9223 tw32(HOSTCC_MODE, tp->coalesce_mode |
9224 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9227 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9228 tg3_flag_set(tp, RESTART_TIMER);
9229 spin_unlock(&tp->lock);
9230 schedule_work(&tp->reset_task);
9231 return;
9235 /* This part only runs once per second. */
9236 if (!--tp->timer_counter) {
9237 if (tg3_flag(tp, 5705_PLUS))
9238 tg3_periodic_fetch_stats(tp);
9240 if (tp->setlpicnt && !--tp->setlpicnt)
9241 tg3_phy_eee_enable(tp);
9243 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9244 u32 mac_stat;
9245 int phy_event;
9247 mac_stat = tr32(MAC_STATUS);
9249 phy_event = 0;
9250 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9251 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9252 phy_event = 1;
9253 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9254 phy_event = 1;
9256 if (phy_event)
9257 tg3_setup_phy(tp, 0);
9258 } else if (tg3_flag(tp, POLL_SERDES)) {
9259 u32 mac_stat = tr32(MAC_STATUS);
9260 int need_setup = 0;
9262 if (netif_carrier_ok(tp->dev) &&
9263 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9264 need_setup = 1;
9266 if (!netif_carrier_ok(tp->dev) &&
9267 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9268 MAC_STATUS_SIGNAL_DET))) {
9269 need_setup = 1;
9271 if (need_setup) {
9272 if (!tp->serdes_counter) {
9273 tw32_f(MAC_MODE,
9274 (tp->mac_mode &
9275 ~MAC_MODE_PORT_MODE_MASK));
9276 udelay(40);
9277 tw32_f(MAC_MODE, tp->mac_mode);
9278 udelay(40);
9280 tg3_setup_phy(tp, 0);
9282 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9283 tg3_flag(tp, 5780_CLASS)) {
9284 tg3_serdes_parallel_detect(tp);
9287 tp->timer_counter = tp->timer_multiplier;
9290 /* Heartbeat is only sent once every 2 seconds.
9292 * The heartbeat is to tell the ASF firmware that the host
9293 * driver is still alive. In the event that the OS crashes,
9294 * ASF needs to reset the hardware to free up the FIFO space
9295 * that may be filled with rx packets destined for the host.
9296 * If the FIFO is full, ASF will no longer function properly.
9298 * Unintended resets have been reported on real time kernels
9299 * where the timer doesn't run on time. Netpoll will also have
9300 * same problem.
9302 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9303 * to check the ring condition when the heartbeat is expiring
9304 * before doing the reset. This will prevent most unintended
9305 * resets.
9307 if (!--tp->asf_counter) {
9308 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9309 tg3_wait_for_event_ack(tp);
9311 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9312 FWCMD_NICDRV_ALIVE3);
9313 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9314 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9315 TG3_FW_UPDATE_TIMEOUT_SEC);
9317 tg3_generate_fw_event(tp);
9319 tp->asf_counter = tp->asf_multiplier;
9322 spin_unlock(&tp->lock);
9324 restart_timer:
9325 tp->timer.expires = jiffies + tp->timer_offset;
9326 add_timer(&tp->timer);
9329 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9331 irq_handler_t fn;
9332 unsigned long flags;
9333 char *name;
9334 struct tg3_napi *tnapi = &tp->napi[irq_num];
9336 if (tp->irq_cnt == 1)
9337 name = tp->dev->name;
9338 else {
9339 name = &tnapi->irq_lbl[0];
9340 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9341 name[IFNAMSIZ-1] = 0;
9344 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9345 fn = tg3_msi;
9346 if (tg3_flag(tp, 1SHOT_MSI))
9347 fn = tg3_msi_1shot;
9348 flags = 0;
9349 } else {
9350 fn = tg3_interrupt;
9351 if (tg3_flag(tp, TAGGED_STATUS))
9352 fn = tg3_interrupt_tagged;
9353 flags = IRQF_SHARED;
9356 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9359 static int tg3_test_interrupt(struct tg3 *tp)
9361 struct tg3_napi *tnapi = &tp->napi[0];
9362 struct net_device *dev = tp->dev;
9363 int err, i, intr_ok = 0;
9364 u32 val;
9366 if (!netif_running(dev))
9367 return -ENODEV;
9369 tg3_disable_ints(tp);
9371 free_irq(tnapi->irq_vec, tnapi);
9374 * Turn off MSI one shot mode. Otherwise this test has no
9375 * observable way to know whether the interrupt was delivered.
9377 if (tg3_flag(tp, 57765_PLUS)) {
9378 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9379 tw32(MSGINT_MODE, val);
9382 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9383 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9384 if (err)
9385 return err;
9387 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9388 tg3_enable_ints(tp);
9390 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9391 tnapi->coal_now);
9393 for (i = 0; i < 5; i++) {
9394 u32 int_mbox, misc_host_ctrl;
9396 int_mbox = tr32_mailbox(tnapi->int_mbox);
9397 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9399 if ((int_mbox != 0) ||
9400 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9401 intr_ok = 1;
9402 break;
9405 if (tg3_flag(tp, 57765_PLUS) &&
9406 tnapi->hw_status->status_tag != tnapi->last_tag)
9407 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9409 msleep(10);
9412 tg3_disable_ints(tp);
9414 free_irq(tnapi->irq_vec, tnapi);
9416 err = tg3_request_irq(tp, 0);
9418 if (err)
9419 return err;
9421 if (intr_ok) {
9422 /* Reenable MSI one shot mode. */
9423 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9424 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9425 tw32(MSGINT_MODE, val);
9427 return 0;
9430 return -EIO;
9433 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9434 * successfully restored
9436 static int tg3_test_msi(struct tg3 *tp)
9438 int err;
9439 u16 pci_cmd;
9441 if (!tg3_flag(tp, USING_MSI))
9442 return 0;
9444 /* Turn off SERR reporting in case MSI terminates with Master
9445 * Abort.
9447 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9448 pci_write_config_word(tp->pdev, PCI_COMMAND,
9449 pci_cmd & ~PCI_COMMAND_SERR);
9451 err = tg3_test_interrupt(tp);
9453 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9455 if (!err)
9456 return 0;
9458 /* other failures */
9459 if (err != -EIO)
9460 return err;
9462 /* MSI test failed, go back to INTx mode */
9463 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9464 "to INTx mode. Please report this failure to the PCI "
9465 "maintainer and include system chipset information\n");
9467 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9469 pci_disable_msi(tp->pdev);
9471 tg3_flag_clear(tp, USING_MSI);
9472 tp->napi[0].irq_vec = tp->pdev->irq;
9474 err = tg3_request_irq(tp, 0);
9475 if (err)
9476 return err;
9478 /* Need to reset the chip because the MSI cycle may have terminated
9479 * with Master Abort.
9481 tg3_full_lock(tp, 1);
9483 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9484 err = tg3_init_hw(tp, 1);
9486 tg3_full_unlock(tp);
9488 if (err)
9489 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9491 return err;
9494 static int tg3_request_firmware(struct tg3 *tp)
9496 const __be32 *fw_data;
9498 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9499 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9500 tp->fw_needed);
9501 return -ENOENT;
9504 fw_data = (void *)tp->fw->data;
9506 /* Firmware blob starts with version numbers, followed by
9507 * start address and _full_ length including BSS sections
9508 * (which must be longer than the actual data, of course
9511 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9512 if (tp->fw_len < (tp->fw->size - 12)) {
9513 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9514 tp->fw_len, tp->fw_needed);
9515 release_firmware(tp->fw);
9516 tp->fw = NULL;
9517 return -EINVAL;
9520 /* We no longer need firmware; we have it. */
9521 tp->fw_needed = NULL;
9522 return 0;
9525 static bool tg3_enable_msix(struct tg3 *tp)
9527 int i, rc, cpus = num_online_cpus();
9528 struct msix_entry msix_ent[tp->irq_max];
9530 if (cpus == 1)
9531 /* Just fallback to the simpler MSI mode. */
9532 return false;
9535 * We want as many rx rings enabled as there are cpus.
9536 * The first MSIX vector only deals with link interrupts, etc,
9537 * so we add one to the number of vectors we are requesting.
9539 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9541 for (i = 0; i < tp->irq_max; i++) {
9542 msix_ent[i].entry = i;
9543 msix_ent[i].vector = 0;
9546 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9547 if (rc < 0) {
9548 return false;
9549 } else if (rc != 0) {
9550 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9551 return false;
9552 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9553 tp->irq_cnt, rc);
9554 tp->irq_cnt = rc;
9557 for (i = 0; i < tp->irq_max; i++)
9558 tp->napi[i].irq_vec = msix_ent[i].vector;
9560 netif_set_real_num_tx_queues(tp->dev, 1);
9561 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9562 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9563 pci_disable_msix(tp->pdev);
9564 return false;
9567 if (tp->irq_cnt > 1) {
9568 tg3_flag_set(tp, ENABLE_RSS);
9570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9571 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9572 tg3_flag_set(tp, ENABLE_TSS);
9573 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9577 return true;
9580 static void tg3_ints_init(struct tg3 *tp)
9582 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9583 !tg3_flag(tp, TAGGED_STATUS)) {
9584 /* All MSI supporting chips should support tagged
9585 * status. Assert that this is the case.
9587 netdev_warn(tp->dev,
9588 "MSI without TAGGED_STATUS? Not using MSI\n");
9589 goto defcfg;
9592 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9593 tg3_flag_set(tp, USING_MSIX);
9594 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9595 tg3_flag_set(tp, USING_MSI);
9597 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9598 u32 msi_mode = tr32(MSGINT_MODE);
9599 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9600 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9601 if (!tg3_flag(tp, 1SHOT_MSI))
9602 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9603 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9605 defcfg:
9606 if (!tg3_flag(tp, USING_MSIX)) {
9607 tp->irq_cnt = 1;
9608 tp->napi[0].irq_vec = tp->pdev->irq;
9609 netif_set_real_num_tx_queues(tp->dev, 1);
9610 netif_set_real_num_rx_queues(tp->dev, 1);
9614 static void tg3_ints_fini(struct tg3 *tp)
9616 if (tg3_flag(tp, USING_MSIX))
9617 pci_disable_msix(tp->pdev);
9618 else if (tg3_flag(tp, USING_MSI))
9619 pci_disable_msi(tp->pdev);
9620 tg3_flag_clear(tp, USING_MSI);
9621 tg3_flag_clear(tp, USING_MSIX);
9622 tg3_flag_clear(tp, ENABLE_RSS);
9623 tg3_flag_clear(tp, ENABLE_TSS);
9626 static int tg3_open(struct net_device *dev)
9628 struct tg3 *tp = netdev_priv(dev);
9629 int i, err;
9631 if (tp->fw_needed) {
9632 err = tg3_request_firmware(tp);
9633 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9634 if (err)
9635 return err;
9636 } else if (err) {
9637 netdev_warn(tp->dev, "TSO capability disabled\n");
9638 tg3_flag_clear(tp, TSO_CAPABLE);
9639 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9640 netdev_notice(tp->dev, "TSO capability restored\n");
9641 tg3_flag_set(tp, TSO_CAPABLE);
9645 netif_carrier_off(tp->dev);
9647 err = tg3_power_up(tp);
9648 if (err)
9649 return err;
9651 tg3_full_lock(tp, 0);
9653 tg3_disable_ints(tp);
9654 tg3_flag_clear(tp, INIT_COMPLETE);
9656 tg3_full_unlock(tp);
9659 * Setup interrupts first so we know how
9660 * many NAPI resources to allocate
9662 tg3_ints_init(tp);
9664 /* The placement of this call is tied
9665 * to the setup and use of Host TX descriptors.
9667 err = tg3_alloc_consistent(tp);
9668 if (err)
9669 goto err_out1;
9671 tg3_napi_init(tp);
9673 tg3_napi_enable(tp);
9675 for (i = 0; i < tp->irq_cnt; i++) {
9676 struct tg3_napi *tnapi = &tp->napi[i];
9677 err = tg3_request_irq(tp, i);
9678 if (err) {
9679 for (i--; i >= 0; i--)
9680 free_irq(tnapi->irq_vec, tnapi);
9681 break;
9685 if (err)
9686 goto err_out2;
9688 tg3_full_lock(tp, 0);
9690 err = tg3_init_hw(tp, 1);
9691 if (err) {
9692 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9693 tg3_free_rings(tp);
9694 } else {
9695 if (tg3_flag(tp, TAGGED_STATUS) &&
9696 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9697 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9698 tp->timer_offset = HZ;
9699 else
9700 tp->timer_offset = HZ / 10;
9702 BUG_ON(tp->timer_offset > HZ);
9703 tp->timer_counter = tp->timer_multiplier =
9704 (HZ / tp->timer_offset);
9705 tp->asf_counter = tp->asf_multiplier =
9706 ((HZ / tp->timer_offset) * 2);
9708 init_timer(&tp->timer);
9709 tp->timer.expires = jiffies + tp->timer_offset;
9710 tp->timer.data = (unsigned long) tp;
9711 tp->timer.function = tg3_timer;
9714 tg3_full_unlock(tp);
9716 if (err)
9717 goto err_out3;
9719 if (tg3_flag(tp, USING_MSI)) {
9720 err = tg3_test_msi(tp);
9722 if (err) {
9723 tg3_full_lock(tp, 0);
9724 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9725 tg3_free_rings(tp);
9726 tg3_full_unlock(tp);
9728 goto err_out2;
9731 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9732 u32 val = tr32(PCIE_TRANSACTION_CFG);
9734 tw32(PCIE_TRANSACTION_CFG,
9735 val | PCIE_TRANS_CFG_1SHOT_MSI);
9739 tg3_phy_start(tp);
9741 tg3_full_lock(tp, 0);
9743 add_timer(&tp->timer);
9744 tg3_flag_set(tp, INIT_COMPLETE);
9745 tg3_enable_ints(tp);
9747 tg3_full_unlock(tp);
9749 netif_tx_start_all_queues(dev);
9752 * Reset loopback feature if it was turned on while the device was down
9753 * make sure that it's installed properly now.
9755 if (dev->features & NETIF_F_LOOPBACK)
9756 tg3_set_loopback(dev, dev->features);
9758 return 0;
9760 err_out3:
9761 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9762 struct tg3_napi *tnapi = &tp->napi[i];
9763 free_irq(tnapi->irq_vec, tnapi);
9766 err_out2:
9767 tg3_napi_disable(tp);
9768 tg3_napi_fini(tp);
9769 tg3_free_consistent(tp);
9771 err_out1:
9772 tg3_ints_fini(tp);
9773 tg3_frob_aux_power(tp, false);
9774 pci_set_power_state(tp->pdev, PCI_D3hot);
9775 return err;
9778 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9779 struct rtnl_link_stats64 *);
9780 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9782 static int tg3_close(struct net_device *dev)
9784 int i;
9785 struct tg3 *tp = netdev_priv(dev);
9787 tg3_napi_disable(tp);
9788 cancel_work_sync(&tp->reset_task);
9790 netif_tx_stop_all_queues(dev);
9792 del_timer_sync(&tp->timer);
9794 tg3_phy_stop(tp);
9796 tg3_full_lock(tp, 1);
9798 tg3_disable_ints(tp);
9800 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9801 tg3_free_rings(tp);
9802 tg3_flag_clear(tp, INIT_COMPLETE);
9804 tg3_full_unlock(tp);
9806 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9807 struct tg3_napi *tnapi = &tp->napi[i];
9808 free_irq(tnapi->irq_vec, tnapi);
9811 tg3_ints_fini(tp);
9813 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9815 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9816 sizeof(tp->estats_prev));
9818 tg3_napi_fini(tp);
9820 tg3_free_consistent(tp);
9822 tg3_power_down(tp);
9824 netif_carrier_off(tp->dev);
9826 return 0;
9829 static inline u64 get_stat64(tg3_stat64_t *val)
9831 return ((u64)val->high << 32) | ((u64)val->low);
9834 static u64 calc_crc_errors(struct tg3 *tp)
9836 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9838 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9839 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9841 u32 val;
9843 spin_lock_bh(&tp->lock);
9844 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9845 tg3_writephy(tp, MII_TG3_TEST1,
9846 val | MII_TG3_TEST1_CRC_EN);
9847 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9848 } else
9849 val = 0;
9850 spin_unlock_bh(&tp->lock);
9852 tp->phy_crc_errors += val;
9854 return tp->phy_crc_errors;
9857 return get_stat64(&hw_stats->rx_fcs_errors);
9860 #define ESTAT_ADD(member) \
9861 estats->member = old_estats->member + \
9862 get_stat64(&hw_stats->member)
9864 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9866 struct tg3_ethtool_stats *estats = &tp->estats;
9867 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9868 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9870 if (!hw_stats)
9871 return old_estats;
9873 ESTAT_ADD(rx_octets);
9874 ESTAT_ADD(rx_fragments);
9875 ESTAT_ADD(rx_ucast_packets);
9876 ESTAT_ADD(rx_mcast_packets);
9877 ESTAT_ADD(rx_bcast_packets);
9878 ESTAT_ADD(rx_fcs_errors);
9879 ESTAT_ADD(rx_align_errors);
9880 ESTAT_ADD(rx_xon_pause_rcvd);
9881 ESTAT_ADD(rx_xoff_pause_rcvd);
9882 ESTAT_ADD(rx_mac_ctrl_rcvd);
9883 ESTAT_ADD(rx_xoff_entered);
9884 ESTAT_ADD(rx_frame_too_long_errors);
9885 ESTAT_ADD(rx_jabbers);
9886 ESTAT_ADD(rx_undersize_packets);
9887 ESTAT_ADD(rx_in_length_errors);
9888 ESTAT_ADD(rx_out_length_errors);
9889 ESTAT_ADD(rx_64_or_less_octet_packets);
9890 ESTAT_ADD(rx_65_to_127_octet_packets);
9891 ESTAT_ADD(rx_128_to_255_octet_packets);
9892 ESTAT_ADD(rx_256_to_511_octet_packets);
9893 ESTAT_ADD(rx_512_to_1023_octet_packets);
9894 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9895 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9896 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9897 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9898 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9900 ESTAT_ADD(tx_octets);
9901 ESTAT_ADD(tx_collisions);
9902 ESTAT_ADD(tx_xon_sent);
9903 ESTAT_ADD(tx_xoff_sent);
9904 ESTAT_ADD(tx_flow_control);
9905 ESTAT_ADD(tx_mac_errors);
9906 ESTAT_ADD(tx_single_collisions);
9907 ESTAT_ADD(tx_mult_collisions);
9908 ESTAT_ADD(tx_deferred);
9909 ESTAT_ADD(tx_excessive_collisions);
9910 ESTAT_ADD(tx_late_collisions);
9911 ESTAT_ADD(tx_collide_2times);
9912 ESTAT_ADD(tx_collide_3times);
9913 ESTAT_ADD(tx_collide_4times);
9914 ESTAT_ADD(tx_collide_5times);
9915 ESTAT_ADD(tx_collide_6times);
9916 ESTAT_ADD(tx_collide_7times);
9917 ESTAT_ADD(tx_collide_8times);
9918 ESTAT_ADD(tx_collide_9times);
9919 ESTAT_ADD(tx_collide_10times);
9920 ESTAT_ADD(tx_collide_11times);
9921 ESTAT_ADD(tx_collide_12times);
9922 ESTAT_ADD(tx_collide_13times);
9923 ESTAT_ADD(tx_collide_14times);
9924 ESTAT_ADD(tx_collide_15times);
9925 ESTAT_ADD(tx_ucast_packets);
9926 ESTAT_ADD(tx_mcast_packets);
9927 ESTAT_ADD(tx_bcast_packets);
9928 ESTAT_ADD(tx_carrier_sense_errors);
9929 ESTAT_ADD(tx_discards);
9930 ESTAT_ADD(tx_errors);
9932 ESTAT_ADD(dma_writeq_full);
9933 ESTAT_ADD(dma_write_prioq_full);
9934 ESTAT_ADD(rxbds_empty);
9935 ESTAT_ADD(rx_discards);
9936 ESTAT_ADD(rx_errors);
9937 ESTAT_ADD(rx_threshold_hit);
9939 ESTAT_ADD(dma_readq_full);
9940 ESTAT_ADD(dma_read_prioq_full);
9941 ESTAT_ADD(tx_comp_queue_full);
9943 ESTAT_ADD(ring_set_send_prod_index);
9944 ESTAT_ADD(ring_status_update);
9945 ESTAT_ADD(nic_irqs);
9946 ESTAT_ADD(nic_avoided_irqs);
9947 ESTAT_ADD(nic_tx_threshold_hit);
9949 ESTAT_ADD(mbuf_lwm_thresh_hit);
9951 return estats;
9954 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9955 struct rtnl_link_stats64 *stats)
9957 struct tg3 *tp = netdev_priv(dev);
9958 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9959 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9961 if (!hw_stats)
9962 return old_stats;
9964 stats->rx_packets = old_stats->rx_packets +
9965 get_stat64(&hw_stats->rx_ucast_packets) +
9966 get_stat64(&hw_stats->rx_mcast_packets) +
9967 get_stat64(&hw_stats->rx_bcast_packets);
9969 stats->tx_packets = old_stats->tx_packets +
9970 get_stat64(&hw_stats->tx_ucast_packets) +
9971 get_stat64(&hw_stats->tx_mcast_packets) +
9972 get_stat64(&hw_stats->tx_bcast_packets);
9974 stats->rx_bytes = old_stats->rx_bytes +
9975 get_stat64(&hw_stats->rx_octets);
9976 stats->tx_bytes = old_stats->tx_bytes +
9977 get_stat64(&hw_stats->tx_octets);
9979 stats->rx_errors = old_stats->rx_errors +
9980 get_stat64(&hw_stats->rx_errors);
9981 stats->tx_errors = old_stats->tx_errors +
9982 get_stat64(&hw_stats->tx_errors) +
9983 get_stat64(&hw_stats->tx_mac_errors) +
9984 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9985 get_stat64(&hw_stats->tx_discards);
9987 stats->multicast = old_stats->multicast +
9988 get_stat64(&hw_stats->rx_mcast_packets);
9989 stats->collisions = old_stats->collisions +
9990 get_stat64(&hw_stats->tx_collisions);
9992 stats->rx_length_errors = old_stats->rx_length_errors +
9993 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9994 get_stat64(&hw_stats->rx_undersize_packets);
9996 stats->rx_over_errors = old_stats->rx_over_errors +
9997 get_stat64(&hw_stats->rxbds_empty);
9998 stats->rx_frame_errors = old_stats->rx_frame_errors +
9999 get_stat64(&hw_stats->rx_align_errors);
10000 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10001 get_stat64(&hw_stats->tx_discards);
10002 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10003 get_stat64(&hw_stats->tx_carrier_sense_errors);
10005 stats->rx_crc_errors = old_stats->rx_crc_errors +
10006 calc_crc_errors(tp);
10008 stats->rx_missed_errors = old_stats->rx_missed_errors +
10009 get_stat64(&hw_stats->rx_discards);
10011 stats->rx_dropped = tp->rx_dropped;
10013 return stats;
10016 static inline u32 calc_crc(unsigned char *buf, int len)
10018 u32 reg;
10019 u32 tmp;
10020 int j, k;
10022 reg = 0xffffffff;
10024 for (j = 0; j < len; j++) {
10025 reg ^= buf[j];
10027 for (k = 0; k < 8; k++) {
10028 tmp = reg & 0x01;
10030 reg >>= 1;
10032 if (tmp)
10033 reg ^= 0xedb88320;
10037 return ~reg;
10040 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10042 /* accept or reject all multicast frames */
10043 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10044 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10045 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10046 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10049 static void __tg3_set_rx_mode(struct net_device *dev)
10051 struct tg3 *tp = netdev_priv(dev);
10052 u32 rx_mode;
10054 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10055 RX_MODE_KEEP_VLAN_TAG);
10057 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10058 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10059 * flag clear.
10061 if (!tg3_flag(tp, ENABLE_ASF))
10062 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10063 #endif
10065 if (dev->flags & IFF_PROMISC) {
10066 /* Promiscuous mode. */
10067 rx_mode |= RX_MODE_PROMISC;
10068 } else if (dev->flags & IFF_ALLMULTI) {
10069 /* Accept all multicast. */
10070 tg3_set_multi(tp, 1);
10071 } else if (netdev_mc_empty(dev)) {
10072 /* Reject all multicast. */
10073 tg3_set_multi(tp, 0);
10074 } else {
10075 /* Accept one or more multicast(s). */
10076 struct netdev_hw_addr *ha;
10077 u32 mc_filter[4] = { 0, };
10078 u32 regidx;
10079 u32 bit;
10080 u32 crc;
10082 netdev_for_each_mc_addr(ha, dev) {
10083 crc = calc_crc(ha->addr, ETH_ALEN);
10084 bit = ~crc & 0x7f;
10085 regidx = (bit & 0x60) >> 5;
10086 bit &= 0x1f;
10087 mc_filter[regidx] |= (1 << bit);
10090 tw32(MAC_HASH_REG_0, mc_filter[0]);
10091 tw32(MAC_HASH_REG_1, mc_filter[1]);
10092 tw32(MAC_HASH_REG_2, mc_filter[2]);
10093 tw32(MAC_HASH_REG_3, mc_filter[3]);
10096 if (rx_mode != tp->rx_mode) {
10097 tp->rx_mode = rx_mode;
10098 tw32_f(MAC_RX_MODE, rx_mode);
10099 udelay(10);
10103 static void tg3_set_rx_mode(struct net_device *dev)
10105 struct tg3 *tp = netdev_priv(dev);
10107 if (!netif_running(dev))
10108 return;
10110 tg3_full_lock(tp, 0);
10111 __tg3_set_rx_mode(dev);
10112 tg3_full_unlock(tp);
10115 static int tg3_get_regs_len(struct net_device *dev)
10117 return TG3_REG_BLK_SIZE;
10120 static void tg3_get_regs(struct net_device *dev,
10121 struct ethtool_regs *regs, void *_p)
10123 struct tg3 *tp = netdev_priv(dev);
10125 regs->version = 0;
10127 memset(_p, 0, TG3_REG_BLK_SIZE);
10129 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10130 return;
10132 tg3_full_lock(tp, 0);
10134 tg3_dump_legacy_regs(tp, (u32 *)_p);
10136 tg3_full_unlock(tp);
10139 static int tg3_get_eeprom_len(struct net_device *dev)
10141 struct tg3 *tp = netdev_priv(dev);
10143 return tp->nvram_size;
10146 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10148 struct tg3 *tp = netdev_priv(dev);
10149 int ret;
10150 u8 *pd;
10151 u32 i, offset, len, b_offset, b_count;
10152 __be32 val;
10154 if (tg3_flag(tp, NO_NVRAM))
10155 return -EINVAL;
10157 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10158 return -EAGAIN;
10160 offset = eeprom->offset;
10161 len = eeprom->len;
10162 eeprom->len = 0;
10164 eeprom->magic = TG3_EEPROM_MAGIC;
10166 if (offset & 3) {
10167 /* adjustments to start on required 4 byte boundary */
10168 b_offset = offset & 3;
10169 b_count = 4 - b_offset;
10170 if (b_count > len) {
10171 /* i.e. offset=1 len=2 */
10172 b_count = len;
10174 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10175 if (ret)
10176 return ret;
10177 memcpy(data, ((char *)&val) + b_offset, b_count);
10178 len -= b_count;
10179 offset += b_count;
10180 eeprom->len += b_count;
10183 /* read bytes up to the last 4 byte boundary */
10184 pd = &data[eeprom->len];
10185 for (i = 0; i < (len - (len & 3)); i += 4) {
10186 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10187 if (ret) {
10188 eeprom->len += i;
10189 return ret;
10191 memcpy(pd + i, &val, 4);
10193 eeprom->len += i;
10195 if (len & 3) {
10196 /* read last bytes not ending on 4 byte boundary */
10197 pd = &data[eeprom->len];
10198 b_count = len & 3;
10199 b_offset = offset + len - b_count;
10200 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10201 if (ret)
10202 return ret;
10203 memcpy(pd, &val, b_count);
10204 eeprom->len += b_count;
10206 return 0;
10209 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10211 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10213 struct tg3 *tp = netdev_priv(dev);
10214 int ret;
10215 u32 offset, len, b_offset, odd_len;
10216 u8 *buf;
10217 __be32 start, end;
10219 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10220 return -EAGAIN;
10222 if (tg3_flag(tp, NO_NVRAM) ||
10223 eeprom->magic != TG3_EEPROM_MAGIC)
10224 return -EINVAL;
10226 offset = eeprom->offset;
10227 len = eeprom->len;
10229 if ((b_offset = (offset & 3))) {
10230 /* adjustments to start on required 4 byte boundary */
10231 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10232 if (ret)
10233 return ret;
10234 len += b_offset;
10235 offset &= ~3;
10236 if (len < 4)
10237 len = 4;
10240 odd_len = 0;
10241 if (len & 3) {
10242 /* adjustments to end on required 4 byte boundary */
10243 odd_len = 1;
10244 len = (len + 3) & ~3;
10245 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10246 if (ret)
10247 return ret;
10250 buf = data;
10251 if (b_offset || odd_len) {
10252 buf = kmalloc(len, GFP_KERNEL);
10253 if (!buf)
10254 return -ENOMEM;
10255 if (b_offset)
10256 memcpy(buf, &start, 4);
10257 if (odd_len)
10258 memcpy(buf+len-4, &end, 4);
10259 memcpy(buf + b_offset, data, eeprom->len);
10262 ret = tg3_nvram_write_block(tp, offset, len, buf);
10264 if (buf != data)
10265 kfree(buf);
10267 return ret;
10270 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10272 struct tg3 *tp = netdev_priv(dev);
10274 if (tg3_flag(tp, USE_PHYLIB)) {
10275 struct phy_device *phydev;
10276 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10277 return -EAGAIN;
10278 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10279 return phy_ethtool_gset(phydev, cmd);
10282 cmd->supported = (SUPPORTED_Autoneg);
10284 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10285 cmd->supported |= (SUPPORTED_1000baseT_Half |
10286 SUPPORTED_1000baseT_Full);
10288 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10289 cmd->supported |= (SUPPORTED_100baseT_Half |
10290 SUPPORTED_100baseT_Full |
10291 SUPPORTED_10baseT_Half |
10292 SUPPORTED_10baseT_Full |
10293 SUPPORTED_TP);
10294 cmd->port = PORT_TP;
10295 } else {
10296 cmd->supported |= SUPPORTED_FIBRE;
10297 cmd->port = PORT_FIBRE;
10300 cmd->advertising = tp->link_config.advertising;
10301 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10302 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10303 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10304 cmd->advertising |= ADVERTISED_Pause;
10305 } else {
10306 cmd->advertising |= ADVERTISED_Pause |
10307 ADVERTISED_Asym_Pause;
10309 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10310 cmd->advertising |= ADVERTISED_Asym_Pause;
10313 if (netif_running(dev)) {
10314 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10315 cmd->duplex = tp->link_config.active_duplex;
10316 } else {
10317 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10318 cmd->duplex = DUPLEX_INVALID;
10320 cmd->phy_address = tp->phy_addr;
10321 cmd->transceiver = XCVR_INTERNAL;
10322 cmd->autoneg = tp->link_config.autoneg;
10323 cmd->maxtxpkt = 0;
10324 cmd->maxrxpkt = 0;
10325 return 0;
10328 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10330 struct tg3 *tp = netdev_priv(dev);
10331 u32 speed = ethtool_cmd_speed(cmd);
10333 if (tg3_flag(tp, USE_PHYLIB)) {
10334 struct phy_device *phydev;
10335 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10336 return -EAGAIN;
10337 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10338 return phy_ethtool_sset(phydev, cmd);
10341 if (cmd->autoneg != AUTONEG_ENABLE &&
10342 cmd->autoneg != AUTONEG_DISABLE)
10343 return -EINVAL;
10345 if (cmd->autoneg == AUTONEG_DISABLE &&
10346 cmd->duplex != DUPLEX_FULL &&
10347 cmd->duplex != DUPLEX_HALF)
10348 return -EINVAL;
10350 if (cmd->autoneg == AUTONEG_ENABLE) {
10351 u32 mask = ADVERTISED_Autoneg |
10352 ADVERTISED_Pause |
10353 ADVERTISED_Asym_Pause;
10355 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10356 mask |= ADVERTISED_1000baseT_Half |
10357 ADVERTISED_1000baseT_Full;
10359 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10360 mask |= ADVERTISED_100baseT_Half |
10361 ADVERTISED_100baseT_Full |
10362 ADVERTISED_10baseT_Half |
10363 ADVERTISED_10baseT_Full |
10364 ADVERTISED_TP;
10365 else
10366 mask |= ADVERTISED_FIBRE;
10368 if (cmd->advertising & ~mask)
10369 return -EINVAL;
10371 mask &= (ADVERTISED_1000baseT_Half |
10372 ADVERTISED_1000baseT_Full |
10373 ADVERTISED_100baseT_Half |
10374 ADVERTISED_100baseT_Full |
10375 ADVERTISED_10baseT_Half |
10376 ADVERTISED_10baseT_Full);
10378 cmd->advertising &= mask;
10379 } else {
10380 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10381 if (speed != SPEED_1000)
10382 return -EINVAL;
10384 if (cmd->duplex != DUPLEX_FULL)
10385 return -EINVAL;
10386 } else {
10387 if (speed != SPEED_100 &&
10388 speed != SPEED_10)
10389 return -EINVAL;
10393 tg3_full_lock(tp, 0);
10395 tp->link_config.autoneg = cmd->autoneg;
10396 if (cmd->autoneg == AUTONEG_ENABLE) {
10397 tp->link_config.advertising = (cmd->advertising |
10398 ADVERTISED_Autoneg);
10399 tp->link_config.speed = SPEED_INVALID;
10400 tp->link_config.duplex = DUPLEX_INVALID;
10401 } else {
10402 tp->link_config.advertising = 0;
10403 tp->link_config.speed = speed;
10404 tp->link_config.duplex = cmd->duplex;
10407 tp->link_config.orig_speed = tp->link_config.speed;
10408 tp->link_config.orig_duplex = tp->link_config.duplex;
10409 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10411 if (netif_running(dev))
10412 tg3_setup_phy(tp, 1);
10414 tg3_full_unlock(tp);
10416 return 0;
10419 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10421 struct tg3 *tp = netdev_priv(dev);
10423 strcpy(info->driver, DRV_MODULE_NAME);
10424 strcpy(info->version, DRV_MODULE_VERSION);
10425 strcpy(info->fw_version, tp->fw_ver);
10426 strcpy(info->bus_info, pci_name(tp->pdev));
10429 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10431 struct tg3 *tp = netdev_priv(dev);
10433 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10434 wol->supported = WAKE_MAGIC;
10435 else
10436 wol->supported = 0;
10437 wol->wolopts = 0;
10438 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10439 wol->wolopts = WAKE_MAGIC;
10440 memset(&wol->sopass, 0, sizeof(wol->sopass));
10443 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10445 struct tg3 *tp = netdev_priv(dev);
10446 struct device *dp = &tp->pdev->dev;
10448 if (wol->wolopts & ~WAKE_MAGIC)
10449 return -EINVAL;
10450 if ((wol->wolopts & WAKE_MAGIC) &&
10451 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10452 return -EINVAL;
10454 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10456 spin_lock_bh(&tp->lock);
10457 if (device_may_wakeup(dp))
10458 tg3_flag_set(tp, WOL_ENABLE);
10459 else
10460 tg3_flag_clear(tp, WOL_ENABLE);
10461 spin_unlock_bh(&tp->lock);
10463 return 0;
10466 static u32 tg3_get_msglevel(struct net_device *dev)
10468 struct tg3 *tp = netdev_priv(dev);
10469 return tp->msg_enable;
10472 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10474 struct tg3 *tp = netdev_priv(dev);
10475 tp->msg_enable = value;
10478 static int tg3_nway_reset(struct net_device *dev)
10480 struct tg3 *tp = netdev_priv(dev);
10481 int r;
10483 if (!netif_running(dev))
10484 return -EAGAIN;
10486 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10487 return -EINVAL;
10489 if (tg3_flag(tp, USE_PHYLIB)) {
10490 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10491 return -EAGAIN;
10492 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10493 } else {
10494 u32 bmcr;
10496 spin_lock_bh(&tp->lock);
10497 r = -EINVAL;
10498 tg3_readphy(tp, MII_BMCR, &bmcr);
10499 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10500 ((bmcr & BMCR_ANENABLE) ||
10501 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10502 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10503 BMCR_ANENABLE);
10504 r = 0;
10506 spin_unlock_bh(&tp->lock);
10509 return r;
10512 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10514 struct tg3 *tp = netdev_priv(dev);
10516 ering->rx_max_pending = tp->rx_std_ring_mask;
10517 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10518 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10519 else
10520 ering->rx_jumbo_max_pending = 0;
10522 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10524 ering->rx_pending = tp->rx_pending;
10525 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10526 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10527 else
10528 ering->rx_jumbo_pending = 0;
10530 ering->tx_pending = tp->napi[0].tx_pending;
10533 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10535 struct tg3 *tp = netdev_priv(dev);
10536 int i, irq_sync = 0, err = 0;
10538 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10539 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10540 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10541 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10542 (tg3_flag(tp, TSO_BUG) &&
10543 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10544 return -EINVAL;
10546 if (netif_running(dev)) {
10547 tg3_phy_stop(tp);
10548 tg3_netif_stop(tp);
10549 irq_sync = 1;
10552 tg3_full_lock(tp, irq_sync);
10554 tp->rx_pending = ering->rx_pending;
10556 if (tg3_flag(tp, MAX_RXPEND_64) &&
10557 tp->rx_pending > 63)
10558 tp->rx_pending = 63;
10559 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10561 for (i = 0; i < tp->irq_max; i++)
10562 tp->napi[i].tx_pending = ering->tx_pending;
10564 if (netif_running(dev)) {
10565 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10566 err = tg3_restart_hw(tp, 1);
10567 if (!err)
10568 tg3_netif_start(tp);
10571 tg3_full_unlock(tp);
10573 if (irq_sync && !err)
10574 tg3_phy_start(tp);
10576 return err;
10579 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10581 struct tg3 *tp = netdev_priv(dev);
10583 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10585 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10586 epause->rx_pause = 1;
10587 else
10588 epause->rx_pause = 0;
10590 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10591 epause->tx_pause = 1;
10592 else
10593 epause->tx_pause = 0;
10596 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10598 struct tg3 *tp = netdev_priv(dev);
10599 int err = 0;
10601 if (tg3_flag(tp, USE_PHYLIB)) {
10602 u32 newadv;
10603 struct phy_device *phydev;
10605 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10607 if (!(phydev->supported & SUPPORTED_Pause) ||
10608 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10609 (epause->rx_pause != epause->tx_pause)))
10610 return -EINVAL;
10612 tp->link_config.flowctrl = 0;
10613 if (epause->rx_pause) {
10614 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10616 if (epause->tx_pause) {
10617 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10618 newadv = ADVERTISED_Pause;
10619 } else
10620 newadv = ADVERTISED_Pause |
10621 ADVERTISED_Asym_Pause;
10622 } else if (epause->tx_pause) {
10623 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10624 newadv = ADVERTISED_Asym_Pause;
10625 } else
10626 newadv = 0;
10628 if (epause->autoneg)
10629 tg3_flag_set(tp, PAUSE_AUTONEG);
10630 else
10631 tg3_flag_clear(tp, PAUSE_AUTONEG);
10633 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10634 u32 oldadv = phydev->advertising &
10635 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10636 if (oldadv != newadv) {
10637 phydev->advertising &=
10638 ~(ADVERTISED_Pause |
10639 ADVERTISED_Asym_Pause);
10640 phydev->advertising |= newadv;
10641 if (phydev->autoneg) {
10643 * Always renegotiate the link to
10644 * inform our link partner of our
10645 * flow control settings, even if the
10646 * flow control is forced. Let
10647 * tg3_adjust_link() do the final
10648 * flow control setup.
10650 return phy_start_aneg(phydev);
10654 if (!epause->autoneg)
10655 tg3_setup_flow_control(tp, 0, 0);
10656 } else {
10657 tp->link_config.orig_advertising &=
10658 ~(ADVERTISED_Pause |
10659 ADVERTISED_Asym_Pause);
10660 tp->link_config.orig_advertising |= newadv;
10662 } else {
10663 int irq_sync = 0;
10665 if (netif_running(dev)) {
10666 tg3_netif_stop(tp);
10667 irq_sync = 1;
10670 tg3_full_lock(tp, irq_sync);
10672 if (epause->autoneg)
10673 tg3_flag_set(tp, PAUSE_AUTONEG);
10674 else
10675 tg3_flag_clear(tp, PAUSE_AUTONEG);
10676 if (epause->rx_pause)
10677 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10678 else
10679 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10680 if (epause->tx_pause)
10681 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10682 else
10683 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10685 if (netif_running(dev)) {
10686 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10687 err = tg3_restart_hw(tp, 1);
10688 if (!err)
10689 tg3_netif_start(tp);
10692 tg3_full_unlock(tp);
10695 return err;
10698 static int tg3_get_sset_count(struct net_device *dev, int sset)
10700 switch (sset) {
10701 case ETH_SS_TEST:
10702 return TG3_NUM_TEST;
10703 case ETH_SS_STATS:
10704 return TG3_NUM_STATS;
10705 default:
10706 return -EOPNOTSUPP;
10710 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10712 switch (stringset) {
10713 case ETH_SS_STATS:
10714 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10715 break;
10716 case ETH_SS_TEST:
10717 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10718 break;
10719 default:
10720 WARN_ON(1); /* we need a WARN() */
10721 break;
10725 static int tg3_set_phys_id(struct net_device *dev,
10726 enum ethtool_phys_id_state state)
10728 struct tg3 *tp = netdev_priv(dev);
10730 if (!netif_running(tp->dev))
10731 return -EAGAIN;
10733 switch (state) {
10734 case ETHTOOL_ID_ACTIVE:
10735 return 1; /* cycle on/off once per second */
10737 case ETHTOOL_ID_ON:
10738 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10739 LED_CTRL_1000MBPS_ON |
10740 LED_CTRL_100MBPS_ON |
10741 LED_CTRL_10MBPS_ON |
10742 LED_CTRL_TRAFFIC_OVERRIDE |
10743 LED_CTRL_TRAFFIC_BLINK |
10744 LED_CTRL_TRAFFIC_LED);
10745 break;
10747 case ETHTOOL_ID_OFF:
10748 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10749 LED_CTRL_TRAFFIC_OVERRIDE);
10750 break;
10752 case ETHTOOL_ID_INACTIVE:
10753 tw32(MAC_LED_CTRL, tp->led_ctrl);
10754 break;
10757 return 0;
10760 static void tg3_get_ethtool_stats(struct net_device *dev,
10761 struct ethtool_stats *estats, u64 *tmp_stats)
10763 struct tg3 *tp = netdev_priv(dev);
10764 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10767 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10769 int i;
10770 __be32 *buf;
10771 u32 offset = 0, len = 0;
10772 u32 magic, val;
10774 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10775 return NULL;
10777 if (magic == TG3_EEPROM_MAGIC) {
10778 for (offset = TG3_NVM_DIR_START;
10779 offset < TG3_NVM_DIR_END;
10780 offset += TG3_NVM_DIRENT_SIZE) {
10781 if (tg3_nvram_read(tp, offset, &val))
10782 return NULL;
10784 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10785 TG3_NVM_DIRTYPE_EXTVPD)
10786 break;
10789 if (offset != TG3_NVM_DIR_END) {
10790 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10791 if (tg3_nvram_read(tp, offset + 4, &offset))
10792 return NULL;
10794 offset = tg3_nvram_logical_addr(tp, offset);
10798 if (!offset || !len) {
10799 offset = TG3_NVM_VPD_OFF;
10800 len = TG3_NVM_VPD_LEN;
10803 buf = kmalloc(len, GFP_KERNEL);
10804 if (buf == NULL)
10805 return NULL;
10807 if (magic == TG3_EEPROM_MAGIC) {
10808 for (i = 0; i < len; i += 4) {
10809 /* The data is in little-endian format in NVRAM.
10810 * Use the big-endian read routines to preserve
10811 * the byte order as it exists in NVRAM.
10813 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10814 goto error;
10816 } else {
10817 u8 *ptr;
10818 ssize_t cnt;
10819 unsigned int pos = 0;
10821 ptr = (u8 *)&buf[0];
10822 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10823 cnt = pci_read_vpd(tp->pdev, pos,
10824 len - pos, ptr);
10825 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10826 cnt = 0;
10827 else if (cnt < 0)
10828 goto error;
10830 if (pos != len)
10831 goto error;
10834 *vpdlen = len;
10836 return buf;
10838 error:
10839 kfree(buf);
10840 return NULL;
10843 #define NVRAM_TEST_SIZE 0x100
10844 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10845 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10846 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10847 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10848 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10849 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10850 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10851 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10853 static int tg3_test_nvram(struct tg3 *tp)
10855 u32 csum, magic, len;
10856 __be32 *buf;
10857 int i, j, k, err = 0, size;
10859 if (tg3_flag(tp, NO_NVRAM))
10860 return 0;
10862 if (tg3_nvram_read(tp, 0, &magic) != 0)
10863 return -EIO;
10865 if (magic == TG3_EEPROM_MAGIC)
10866 size = NVRAM_TEST_SIZE;
10867 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10868 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10869 TG3_EEPROM_SB_FORMAT_1) {
10870 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10871 case TG3_EEPROM_SB_REVISION_0:
10872 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10873 break;
10874 case TG3_EEPROM_SB_REVISION_2:
10875 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10876 break;
10877 case TG3_EEPROM_SB_REVISION_3:
10878 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10879 break;
10880 case TG3_EEPROM_SB_REVISION_4:
10881 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10882 break;
10883 case TG3_EEPROM_SB_REVISION_5:
10884 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10885 break;
10886 case TG3_EEPROM_SB_REVISION_6:
10887 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10888 break;
10889 default:
10890 return -EIO;
10892 } else
10893 return 0;
10894 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10895 size = NVRAM_SELFBOOT_HW_SIZE;
10896 else
10897 return -EIO;
10899 buf = kmalloc(size, GFP_KERNEL);
10900 if (buf == NULL)
10901 return -ENOMEM;
10903 err = -EIO;
10904 for (i = 0, j = 0; i < size; i += 4, j++) {
10905 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10906 if (err)
10907 break;
10909 if (i < size)
10910 goto out;
10912 /* Selfboot format */
10913 magic = be32_to_cpu(buf[0]);
10914 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10915 TG3_EEPROM_MAGIC_FW) {
10916 u8 *buf8 = (u8 *) buf, csum8 = 0;
10918 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10919 TG3_EEPROM_SB_REVISION_2) {
10920 /* For rev 2, the csum doesn't include the MBA. */
10921 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10922 csum8 += buf8[i];
10923 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10924 csum8 += buf8[i];
10925 } else {
10926 for (i = 0; i < size; i++)
10927 csum8 += buf8[i];
10930 if (csum8 == 0) {
10931 err = 0;
10932 goto out;
10935 err = -EIO;
10936 goto out;
10939 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10940 TG3_EEPROM_MAGIC_HW) {
10941 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10942 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10943 u8 *buf8 = (u8 *) buf;
10945 /* Separate the parity bits and the data bytes. */
10946 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10947 if ((i == 0) || (i == 8)) {
10948 int l;
10949 u8 msk;
10951 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10952 parity[k++] = buf8[i] & msk;
10953 i++;
10954 } else if (i == 16) {
10955 int l;
10956 u8 msk;
10958 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10959 parity[k++] = buf8[i] & msk;
10960 i++;
10962 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10963 parity[k++] = buf8[i] & msk;
10964 i++;
10966 data[j++] = buf8[i];
10969 err = -EIO;
10970 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10971 u8 hw8 = hweight8(data[i]);
10973 if ((hw8 & 0x1) && parity[i])
10974 goto out;
10975 else if (!(hw8 & 0x1) && !parity[i])
10976 goto out;
10978 err = 0;
10979 goto out;
10982 err = -EIO;
10984 /* Bootstrap checksum at offset 0x10 */
10985 csum = calc_crc((unsigned char *) buf, 0x10);
10986 if (csum != le32_to_cpu(buf[0x10/4]))
10987 goto out;
10989 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10990 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10991 if (csum != le32_to_cpu(buf[0xfc/4]))
10992 goto out;
10994 kfree(buf);
10996 buf = tg3_vpd_readblock(tp, &len);
10997 if (!buf)
10998 return -ENOMEM;
11000 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11001 if (i > 0) {
11002 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11003 if (j < 0)
11004 goto out;
11006 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11007 goto out;
11009 i += PCI_VPD_LRDT_TAG_SIZE;
11010 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11011 PCI_VPD_RO_KEYWORD_CHKSUM);
11012 if (j > 0) {
11013 u8 csum8 = 0;
11015 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11017 for (i = 0; i <= j; i++)
11018 csum8 += ((u8 *)buf)[i];
11020 if (csum8)
11021 goto out;
11025 err = 0;
11027 out:
11028 kfree(buf);
11029 return err;
11032 #define TG3_SERDES_TIMEOUT_SEC 2
11033 #define TG3_COPPER_TIMEOUT_SEC 6
11035 static int tg3_test_link(struct tg3 *tp)
11037 int i, max;
11039 if (!netif_running(tp->dev))
11040 return -ENODEV;
11042 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11043 max = TG3_SERDES_TIMEOUT_SEC;
11044 else
11045 max = TG3_COPPER_TIMEOUT_SEC;
11047 for (i = 0; i < max; i++) {
11048 if (netif_carrier_ok(tp->dev))
11049 return 0;
11051 if (msleep_interruptible(1000))
11052 break;
11055 return -EIO;
11058 /* Only test the commonly used registers */
11059 static int tg3_test_registers(struct tg3 *tp)
11061 int i, is_5705, is_5750;
11062 u32 offset, read_mask, write_mask, val, save_val, read_val;
11063 static struct {
11064 u16 offset;
11065 u16 flags;
11066 #define TG3_FL_5705 0x1
11067 #define TG3_FL_NOT_5705 0x2
11068 #define TG3_FL_NOT_5788 0x4
11069 #define TG3_FL_NOT_5750 0x8
11070 u32 read_mask;
11071 u32 write_mask;
11072 } reg_tbl[] = {
11073 /* MAC Control Registers */
11074 { MAC_MODE, TG3_FL_NOT_5705,
11075 0x00000000, 0x00ef6f8c },
11076 { MAC_MODE, TG3_FL_5705,
11077 0x00000000, 0x01ef6b8c },
11078 { MAC_STATUS, TG3_FL_NOT_5705,
11079 0x03800107, 0x00000000 },
11080 { MAC_STATUS, TG3_FL_5705,
11081 0x03800100, 0x00000000 },
11082 { MAC_ADDR_0_HIGH, 0x0000,
11083 0x00000000, 0x0000ffff },
11084 { MAC_ADDR_0_LOW, 0x0000,
11085 0x00000000, 0xffffffff },
11086 { MAC_RX_MTU_SIZE, 0x0000,
11087 0x00000000, 0x0000ffff },
11088 { MAC_TX_MODE, 0x0000,
11089 0x00000000, 0x00000070 },
11090 { MAC_TX_LENGTHS, 0x0000,
11091 0x00000000, 0x00003fff },
11092 { MAC_RX_MODE, TG3_FL_NOT_5705,
11093 0x00000000, 0x000007fc },
11094 { MAC_RX_MODE, TG3_FL_5705,
11095 0x00000000, 0x000007dc },
11096 { MAC_HASH_REG_0, 0x0000,
11097 0x00000000, 0xffffffff },
11098 { MAC_HASH_REG_1, 0x0000,
11099 0x00000000, 0xffffffff },
11100 { MAC_HASH_REG_2, 0x0000,
11101 0x00000000, 0xffffffff },
11102 { MAC_HASH_REG_3, 0x0000,
11103 0x00000000, 0xffffffff },
11105 /* Receive Data and Receive BD Initiator Control Registers. */
11106 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11107 0x00000000, 0xffffffff },
11108 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11109 0x00000000, 0xffffffff },
11110 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11111 0x00000000, 0x00000003 },
11112 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11113 0x00000000, 0xffffffff },
11114 { RCVDBDI_STD_BD+0, 0x0000,
11115 0x00000000, 0xffffffff },
11116 { RCVDBDI_STD_BD+4, 0x0000,
11117 0x00000000, 0xffffffff },
11118 { RCVDBDI_STD_BD+8, 0x0000,
11119 0x00000000, 0xffff0002 },
11120 { RCVDBDI_STD_BD+0xc, 0x0000,
11121 0x00000000, 0xffffffff },
11123 /* Receive BD Initiator Control Registers. */
11124 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11125 0x00000000, 0xffffffff },
11126 { RCVBDI_STD_THRESH, TG3_FL_5705,
11127 0x00000000, 0x000003ff },
11128 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11129 0x00000000, 0xffffffff },
11131 /* Host Coalescing Control Registers. */
11132 { HOSTCC_MODE, TG3_FL_NOT_5705,
11133 0x00000000, 0x00000004 },
11134 { HOSTCC_MODE, TG3_FL_5705,
11135 0x00000000, 0x000000f6 },
11136 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11137 0x00000000, 0xffffffff },
11138 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11139 0x00000000, 0x000003ff },
11140 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11141 0x00000000, 0xffffffff },
11142 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11143 0x00000000, 0x000003ff },
11144 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11145 0x00000000, 0xffffffff },
11146 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11147 0x00000000, 0x000000ff },
11148 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11149 0x00000000, 0xffffffff },
11150 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11151 0x00000000, 0x000000ff },
11152 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11153 0x00000000, 0xffffffff },
11154 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11155 0x00000000, 0xffffffff },
11156 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11157 0x00000000, 0xffffffff },
11158 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11159 0x00000000, 0x000000ff },
11160 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11161 0x00000000, 0xffffffff },
11162 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11163 0x00000000, 0x000000ff },
11164 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11165 0x00000000, 0xffffffff },
11166 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11167 0x00000000, 0xffffffff },
11168 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11169 0x00000000, 0xffffffff },
11170 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11171 0x00000000, 0xffffffff },
11172 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11173 0x00000000, 0xffffffff },
11174 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11175 0xffffffff, 0x00000000 },
11176 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11177 0xffffffff, 0x00000000 },
11179 /* Buffer Manager Control Registers. */
11180 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11181 0x00000000, 0x007fff80 },
11182 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11183 0x00000000, 0x007fffff },
11184 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11185 0x00000000, 0x0000003f },
11186 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11187 0x00000000, 0x000001ff },
11188 { BUFMGR_MB_HIGH_WATER, 0x0000,
11189 0x00000000, 0x000001ff },
11190 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11191 0xffffffff, 0x00000000 },
11192 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11193 0xffffffff, 0x00000000 },
11195 /* Mailbox Registers */
11196 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11197 0x00000000, 0x000001ff },
11198 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11199 0x00000000, 0x000001ff },
11200 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11201 0x00000000, 0x000007ff },
11202 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11203 0x00000000, 0x000001ff },
11205 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11208 is_5705 = is_5750 = 0;
11209 if (tg3_flag(tp, 5705_PLUS)) {
11210 is_5705 = 1;
11211 if (tg3_flag(tp, 5750_PLUS))
11212 is_5750 = 1;
11215 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11216 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11217 continue;
11219 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11220 continue;
11222 if (tg3_flag(tp, IS_5788) &&
11223 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11224 continue;
11226 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11227 continue;
11229 offset = (u32) reg_tbl[i].offset;
11230 read_mask = reg_tbl[i].read_mask;
11231 write_mask = reg_tbl[i].write_mask;
11233 /* Save the original register content */
11234 save_val = tr32(offset);
11236 /* Determine the read-only value. */
11237 read_val = save_val & read_mask;
11239 /* Write zero to the register, then make sure the read-only bits
11240 * are not changed and the read/write bits are all zeros.
11242 tw32(offset, 0);
11244 val = tr32(offset);
11246 /* Test the read-only and read/write bits. */
11247 if (((val & read_mask) != read_val) || (val & write_mask))
11248 goto out;
11250 /* Write ones to all the bits defined by RdMask and WrMask, then
11251 * make sure the read-only bits are not changed and the
11252 * read/write bits are all ones.
11254 tw32(offset, read_mask | write_mask);
11256 val = tr32(offset);
11258 /* Test the read-only bits. */
11259 if ((val & read_mask) != read_val)
11260 goto out;
11262 /* Test the read/write bits. */
11263 if ((val & write_mask) != write_mask)
11264 goto out;
11266 tw32(offset, save_val);
11269 return 0;
11271 out:
11272 if (netif_msg_hw(tp))
11273 netdev_err(tp->dev,
11274 "Register test failed at offset %x\n", offset);
11275 tw32(offset, save_val);
11276 return -EIO;
11279 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11281 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11282 int i;
11283 u32 j;
11285 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11286 for (j = 0; j < len; j += 4) {
11287 u32 val;
11289 tg3_write_mem(tp, offset + j, test_pattern[i]);
11290 tg3_read_mem(tp, offset + j, &val);
11291 if (val != test_pattern[i])
11292 return -EIO;
11295 return 0;
11298 static int tg3_test_memory(struct tg3 *tp)
11300 static struct mem_entry {
11301 u32 offset;
11302 u32 len;
11303 } mem_tbl_570x[] = {
11304 { 0x00000000, 0x00b50},
11305 { 0x00002000, 0x1c000},
11306 { 0xffffffff, 0x00000}
11307 }, mem_tbl_5705[] = {
11308 { 0x00000100, 0x0000c},
11309 { 0x00000200, 0x00008},
11310 { 0x00004000, 0x00800},
11311 { 0x00006000, 0x01000},
11312 { 0x00008000, 0x02000},
11313 { 0x00010000, 0x0e000},
11314 { 0xffffffff, 0x00000}
11315 }, mem_tbl_5755[] = {
11316 { 0x00000200, 0x00008},
11317 { 0x00004000, 0x00800},
11318 { 0x00006000, 0x00800},
11319 { 0x00008000, 0x02000},
11320 { 0x00010000, 0x0c000},
11321 { 0xffffffff, 0x00000}
11322 }, mem_tbl_5906[] = {
11323 { 0x00000200, 0x00008},
11324 { 0x00004000, 0x00400},
11325 { 0x00006000, 0x00400},
11326 { 0x00008000, 0x01000},
11327 { 0x00010000, 0x01000},
11328 { 0xffffffff, 0x00000}
11329 }, mem_tbl_5717[] = {
11330 { 0x00000200, 0x00008},
11331 { 0x00010000, 0x0a000},
11332 { 0x00020000, 0x13c00},
11333 { 0xffffffff, 0x00000}
11334 }, mem_tbl_57765[] = {
11335 { 0x00000200, 0x00008},
11336 { 0x00004000, 0x00800},
11337 { 0x00006000, 0x09800},
11338 { 0x00010000, 0x0a000},
11339 { 0xffffffff, 0x00000}
11341 struct mem_entry *mem_tbl;
11342 int err = 0;
11343 int i;
11345 if (tg3_flag(tp, 5717_PLUS))
11346 mem_tbl = mem_tbl_5717;
11347 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11348 mem_tbl = mem_tbl_57765;
11349 else if (tg3_flag(tp, 5755_PLUS))
11350 mem_tbl = mem_tbl_5755;
11351 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11352 mem_tbl = mem_tbl_5906;
11353 else if (tg3_flag(tp, 5705_PLUS))
11354 mem_tbl = mem_tbl_5705;
11355 else
11356 mem_tbl = mem_tbl_570x;
11358 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11359 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11360 if (err)
11361 break;
11364 return err;
11367 #define TG3_TSO_MSS 500
11369 #define TG3_TSO_IP_HDR_LEN 20
11370 #define TG3_TSO_TCP_HDR_LEN 20
11371 #define TG3_TSO_TCP_OPT_LEN 12
11373 static const u8 tg3_tso_header[] = {
11374 0x08, 0x00,
11375 0x45, 0x00, 0x00, 0x00,
11376 0x00, 0x00, 0x40, 0x00,
11377 0x40, 0x06, 0x00, 0x00,
11378 0x0a, 0x00, 0x00, 0x01,
11379 0x0a, 0x00, 0x00, 0x02,
11380 0x0d, 0x00, 0xe0, 0x00,
11381 0x00, 0x00, 0x01, 0x00,
11382 0x00, 0x00, 0x02, 0x00,
11383 0x80, 0x10, 0x10, 0x00,
11384 0x14, 0x09, 0x00, 0x00,
11385 0x01, 0x01, 0x08, 0x0a,
11386 0x11, 0x11, 0x11, 0x11,
11387 0x11, 0x11, 0x11, 0x11,
11390 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11392 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11393 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11394 u32 budget;
11395 struct sk_buff *skb, *rx_skb;
11396 u8 *tx_data;
11397 dma_addr_t map;
11398 int num_pkts, tx_len, rx_len, i, err;
11399 struct tg3_rx_buffer_desc *desc;
11400 struct tg3_napi *tnapi, *rnapi;
11401 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11403 tnapi = &tp->napi[0];
11404 rnapi = &tp->napi[0];
11405 if (tp->irq_cnt > 1) {
11406 if (tg3_flag(tp, ENABLE_RSS))
11407 rnapi = &tp->napi[1];
11408 if (tg3_flag(tp, ENABLE_TSS))
11409 tnapi = &tp->napi[1];
11411 coal_now = tnapi->coal_now | rnapi->coal_now;
11413 err = -EIO;
11415 tx_len = pktsz;
11416 skb = netdev_alloc_skb(tp->dev, tx_len);
11417 if (!skb)
11418 return -ENOMEM;
11420 tx_data = skb_put(skb, tx_len);
11421 memcpy(tx_data, tp->dev->dev_addr, 6);
11422 memset(tx_data + 6, 0x0, 8);
11424 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11426 if (tso_loopback) {
11427 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11429 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11430 TG3_TSO_TCP_OPT_LEN;
11432 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11433 sizeof(tg3_tso_header));
11434 mss = TG3_TSO_MSS;
11436 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11437 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11439 /* Set the total length field in the IP header */
11440 iph->tot_len = htons((u16)(mss + hdr_len));
11442 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11443 TXD_FLAG_CPU_POST_DMA);
11445 if (tg3_flag(tp, HW_TSO_1) ||
11446 tg3_flag(tp, HW_TSO_2) ||
11447 tg3_flag(tp, HW_TSO_3)) {
11448 struct tcphdr *th;
11449 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11450 th = (struct tcphdr *)&tx_data[val];
11451 th->check = 0;
11452 } else
11453 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11455 if (tg3_flag(tp, HW_TSO_3)) {
11456 mss |= (hdr_len & 0xc) << 12;
11457 if (hdr_len & 0x10)
11458 base_flags |= 0x00000010;
11459 base_flags |= (hdr_len & 0x3e0) << 5;
11460 } else if (tg3_flag(tp, HW_TSO_2))
11461 mss |= hdr_len << 9;
11462 else if (tg3_flag(tp, HW_TSO_1) ||
11463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11464 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11465 } else {
11466 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11469 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11470 } else {
11471 num_pkts = 1;
11472 data_off = ETH_HLEN;
11475 for (i = data_off; i < tx_len; i++)
11476 tx_data[i] = (u8) (i & 0xff);
11478 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11479 if (pci_dma_mapping_error(tp->pdev, map)) {
11480 dev_kfree_skb(skb);
11481 return -EIO;
11484 val = tnapi->tx_prod;
11485 tnapi->tx_buffers[val].skb = skb;
11486 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11488 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11489 rnapi->coal_now);
11491 udelay(10);
11493 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11495 budget = tg3_tx_avail(tnapi);
11496 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11497 base_flags | TXD_FLAG_END, mss, 0)) {
11498 tnapi->tx_buffers[val].skb = NULL;
11499 dev_kfree_skb(skb);
11500 return -EIO;
11503 tnapi->tx_prod++;
11505 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11506 tr32_mailbox(tnapi->prodmbox);
11508 udelay(10);
11510 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11511 for (i = 0; i < 35; i++) {
11512 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11513 coal_now);
11515 udelay(10);
11517 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11518 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11519 if ((tx_idx == tnapi->tx_prod) &&
11520 (rx_idx == (rx_start_idx + num_pkts)))
11521 break;
11524 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11525 dev_kfree_skb(skb);
11527 if (tx_idx != tnapi->tx_prod)
11528 goto out;
11530 if (rx_idx != rx_start_idx + num_pkts)
11531 goto out;
11533 val = data_off;
11534 while (rx_idx != rx_start_idx) {
11535 desc = &rnapi->rx_rcb[rx_start_idx++];
11536 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11537 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11539 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11540 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11541 goto out;
11543 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11544 - ETH_FCS_LEN;
11546 if (!tso_loopback) {
11547 if (rx_len != tx_len)
11548 goto out;
11550 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11551 if (opaque_key != RXD_OPAQUE_RING_STD)
11552 goto out;
11553 } else {
11554 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11555 goto out;
11557 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11558 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11559 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11560 goto out;
11563 if (opaque_key == RXD_OPAQUE_RING_STD) {
11564 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11565 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11566 mapping);
11567 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11568 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11569 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11570 mapping);
11571 } else
11572 goto out;
11574 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11575 PCI_DMA_FROMDEVICE);
11577 for (i = data_off; i < rx_len; i++, val++) {
11578 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11579 goto out;
11583 err = 0;
11585 /* tg3_free_rings will unmap and free the rx_skb */
11586 out:
11587 return err;
11590 #define TG3_STD_LOOPBACK_FAILED 1
11591 #define TG3_JMB_LOOPBACK_FAILED 2
11592 #define TG3_TSO_LOOPBACK_FAILED 4
11593 #define TG3_LOOPBACK_FAILED \
11594 (TG3_STD_LOOPBACK_FAILED | \
11595 TG3_JMB_LOOPBACK_FAILED | \
11596 TG3_TSO_LOOPBACK_FAILED)
11598 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11600 int err = -EIO;
11601 u32 eee_cap;
11603 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11604 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11606 if (!netif_running(tp->dev)) {
11607 data[0] = TG3_LOOPBACK_FAILED;
11608 data[1] = TG3_LOOPBACK_FAILED;
11609 if (do_extlpbk)
11610 data[2] = TG3_LOOPBACK_FAILED;
11611 goto done;
11614 err = tg3_reset_hw(tp, 1);
11615 if (err) {
11616 data[0] = TG3_LOOPBACK_FAILED;
11617 data[1] = TG3_LOOPBACK_FAILED;
11618 if (do_extlpbk)
11619 data[2] = TG3_LOOPBACK_FAILED;
11620 goto done;
11623 if (tg3_flag(tp, ENABLE_RSS)) {
11624 int i;
11626 /* Reroute all rx packets to the 1st queue */
11627 for (i = MAC_RSS_INDIR_TBL_0;
11628 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11629 tw32(i, 0x0);
11632 /* HW errata - mac loopback fails in some cases on 5780.
11633 * Normal traffic and PHY loopback are not affected by
11634 * errata. Also, the MAC loopback test is deprecated for
11635 * all newer ASIC revisions.
11637 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11638 !tg3_flag(tp, CPMU_PRESENT)) {
11639 tg3_mac_loopback(tp, true);
11641 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11642 data[0] |= TG3_STD_LOOPBACK_FAILED;
11644 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11645 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11646 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11648 tg3_mac_loopback(tp, false);
11651 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11652 !tg3_flag(tp, USE_PHYLIB)) {
11653 int i;
11655 tg3_phy_lpbk_set(tp, 0, false);
11657 /* Wait for link */
11658 for (i = 0; i < 100; i++) {
11659 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11660 break;
11661 mdelay(1);
11664 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11665 data[1] |= TG3_STD_LOOPBACK_FAILED;
11666 if (tg3_flag(tp, TSO_CAPABLE) &&
11667 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11668 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11669 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11670 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11671 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11673 if (do_extlpbk) {
11674 tg3_phy_lpbk_set(tp, 0, true);
11676 /* All link indications report up, but the hardware
11677 * isn't really ready for about 20 msec. Double it
11678 * to be sure.
11680 mdelay(40);
11682 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11683 data[2] |= TG3_STD_LOOPBACK_FAILED;
11684 if (tg3_flag(tp, TSO_CAPABLE) &&
11685 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11686 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11687 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11688 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11689 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11692 /* Re-enable gphy autopowerdown. */
11693 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11694 tg3_phy_toggle_apd(tp, true);
11697 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11699 done:
11700 tp->phy_flags |= eee_cap;
11702 return err;
11705 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11706 u64 *data)
11708 struct tg3 *tp = netdev_priv(dev);
11709 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11711 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11712 tg3_power_up(tp)) {
11713 etest->flags |= ETH_TEST_FL_FAILED;
11714 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11715 return;
11718 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11720 if (tg3_test_nvram(tp) != 0) {
11721 etest->flags |= ETH_TEST_FL_FAILED;
11722 data[0] = 1;
11724 if (!doextlpbk && tg3_test_link(tp)) {
11725 etest->flags |= ETH_TEST_FL_FAILED;
11726 data[1] = 1;
11728 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11729 int err, err2 = 0, irq_sync = 0;
11731 if (netif_running(dev)) {
11732 tg3_phy_stop(tp);
11733 tg3_netif_stop(tp);
11734 irq_sync = 1;
11737 tg3_full_lock(tp, irq_sync);
11739 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11740 err = tg3_nvram_lock(tp);
11741 tg3_halt_cpu(tp, RX_CPU_BASE);
11742 if (!tg3_flag(tp, 5705_PLUS))
11743 tg3_halt_cpu(tp, TX_CPU_BASE);
11744 if (!err)
11745 tg3_nvram_unlock(tp);
11747 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11748 tg3_phy_reset(tp);
11750 if (tg3_test_registers(tp) != 0) {
11751 etest->flags |= ETH_TEST_FL_FAILED;
11752 data[2] = 1;
11755 if (tg3_test_memory(tp) != 0) {
11756 etest->flags |= ETH_TEST_FL_FAILED;
11757 data[3] = 1;
11760 if (doextlpbk)
11761 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11763 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11764 etest->flags |= ETH_TEST_FL_FAILED;
11766 tg3_full_unlock(tp);
11768 if (tg3_test_interrupt(tp) != 0) {
11769 etest->flags |= ETH_TEST_FL_FAILED;
11770 data[7] = 1;
11773 tg3_full_lock(tp, 0);
11775 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11776 if (netif_running(dev)) {
11777 tg3_flag_set(tp, INIT_COMPLETE);
11778 err2 = tg3_restart_hw(tp, 1);
11779 if (!err2)
11780 tg3_netif_start(tp);
11783 tg3_full_unlock(tp);
11785 if (irq_sync && !err2)
11786 tg3_phy_start(tp);
11788 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11789 tg3_power_down(tp);
11793 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11795 struct mii_ioctl_data *data = if_mii(ifr);
11796 struct tg3 *tp = netdev_priv(dev);
11797 int err;
11799 if (tg3_flag(tp, USE_PHYLIB)) {
11800 struct phy_device *phydev;
11801 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11802 return -EAGAIN;
11803 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11804 return phy_mii_ioctl(phydev, ifr, cmd);
11807 switch (cmd) {
11808 case SIOCGMIIPHY:
11809 data->phy_id = tp->phy_addr;
11811 /* fallthru */
11812 case SIOCGMIIREG: {
11813 u32 mii_regval;
11815 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11816 break; /* We have no PHY */
11818 if (!netif_running(dev))
11819 return -EAGAIN;
11821 spin_lock_bh(&tp->lock);
11822 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11823 spin_unlock_bh(&tp->lock);
11825 data->val_out = mii_regval;
11827 return err;
11830 case SIOCSMIIREG:
11831 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11832 break; /* We have no PHY */
11834 if (!netif_running(dev))
11835 return -EAGAIN;
11837 spin_lock_bh(&tp->lock);
11838 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11839 spin_unlock_bh(&tp->lock);
11841 return err;
11843 default:
11844 /* do nothing */
11845 break;
11847 return -EOPNOTSUPP;
11850 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11852 struct tg3 *tp = netdev_priv(dev);
11854 memcpy(ec, &tp->coal, sizeof(*ec));
11855 return 0;
11858 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11860 struct tg3 *tp = netdev_priv(dev);
11861 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11862 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11864 if (!tg3_flag(tp, 5705_PLUS)) {
11865 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11866 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11867 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11868 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11871 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11872 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11873 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11874 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11875 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11876 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11877 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11878 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11879 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11880 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11881 return -EINVAL;
11883 /* No rx interrupts will be generated if both are zero */
11884 if ((ec->rx_coalesce_usecs == 0) &&
11885 (ec->rx_max_coalesced_frames == 0))
11886 return -EINVAL;
11888 /* No tx interrupts will be generated if both are zero */
11889 if ((ec->tx_coalesce_usecs == 0) &&
11890 (ec->tx_max_coalesced_frames == 0))
11891 return -EINVAL;
11893 /* Only copy relevant parameters, ignore all others. */
11894 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11895 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11896 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11897 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11898 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11899 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11900 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11901 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11902 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11904 if (netif_running(dev)) {
11905 tg3_full_lock(tp, 0);
11906 __tg3_set_coalesce(tp, &tp->coal);
11907 tg3_full_unlock(tp);
11909 return 0;
11912 static const struct ethtool_ops tg3_ethtool_ops = {
11913 .get_settings = tg3_get_settings,
11914 .set_settings = tg3_set_settings,
11915 .get_drvinfo = tg3_get_drvinfo,
11916 .get_regs_len = tg3_get_regs_len,
11917 .get_regs = tg3_get_regs,
11918 .get_wol = tg3_get_wol,
11919 .set_wol = tg3_set_wol,
11920 .get_msglevel = tg3_get_msglevel,
11921 .set_msglevel = tg3_set_msglevel,
11922 .nway_reset = tg3_nway_reset,
11923 .get_link = ethtool_op_get_link,
11924 .get_eeprom_len = tg3_get_eeprom_len,
11925 .get_eeprom = tg3_get_eeprom,
11926 .set_eeprom = tg3_set_eeprom,
11927 .get_ringparam = tg3_get_ringparam,
11928 .set_ringparam = tg3_set_ringparam,
11929 .get_pauseparam = tg3_get_pauseparam,
11930 .set_pauseparam = tg3_set_pauseparam,
11931 .self_test = tg3_self_test,
11932 .get_strings = tg3_get_strings,
11933 .set_phys_id = tg3_set_phys_id,
11934 .get_ethtool_stats = tg3_get_ethtool_stats,
11935 .get_coalesce = tg3_get_coalesce,
11936 .set_coalesce = tg3_set_coalesce,
11937 .get_sset_count = tg3_get_sset_count,
11940 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11942 u32 cursize, val, magic;
11944 tp->nvram_size = EEPROM_CHIP_SIZE;
11946 if (tg3_nvram_read(tp, 0, &magic) != 0)
11947 return;
11949 if ((magic != TG3_EEPROM_MAGIC) &&
11950 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11951 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11952 return;
11955 * Size the chip by reading offsets at increasing powers of two.
11956 * When we encounter our validation signature, we know the addressing
11957 * has wrapped around, and thus have our chip size.
11959 cursize = 0x10;
11961 while (cursize < tp->nvram_size) {
11962 if (tg3_nvram_read(tp, cursize, &val) != 0)
11963 return;
11965 if (val == magic)
11966 break;
11968 cursize <<= 1;
11971 tp->nvram_size = cursize;
11974 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11976 u32 val;
11978 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11979 return;
11981 /* Selfboot format */
11982 if (val != TG3_EEPROM_MAGIC) {
11983 tg3_get_eeprom_size(tp);
11984 return;
11987 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11988 if (val != 0) {
11989 /* This is confusing. We want to operate on the
11990 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11991 * call will read from NVRAM and byteswap the data
11992 * according to the byteswapping settings for all
11993 * other register accesses. This ensures the data we
11994 * want will always reside in the lower 16-bits.
11995 * However, the data in NVRAM is in LE format, which
11996 * means the data from the NVRAM read will always be
11997 * opposite the endianness of the CPU. The 16-bit
11998 * byteswap then brings the data to CPU endianness.
12000 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12001 return;
12004 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12007 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12009 u32 nvcfg1;
12011 nvcfg1 = tr32(NVRAM_CFG1);
12012 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12013 tg3_flag_set(tp, FLASH);
12014 } else {
12015 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12016 tw32(NVRAM_CFG1, nvcfg1);
12019 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12020 tg3_flag(tp, 5780_CLASS)) {
12021 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12022 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12023 tp->nvram_jedecnum = JEDEC_ATMEL;
12024 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12025 tg3_flag_set(tp, NVRAM_BUFFERED);
12026 break;
12027 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12028 tp->nvram_jedecnum = JEDEC_ATMEL;
12029 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12030 break;
12031 case FLASH_VENDOR_ATMEL_EEPROM:
12032 tp->nvram_jedecnum = JEDEC_ATMEL;
12033 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12034 tg3_flag_set(tp, NVRAM_BUFFERED);
12035 break;
12036 case FLASH_VENDOR_ST:
12037 tp->nvram_jedecnum = JEDEC_ST;
12038 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12039 tg3_flag_set(tp, NVRAM_BUFFERED);
12040 break;
12041 case FLASH_VENDOR_SAIFUN:
12042 tp->nvram_jedecnum = JEDEC_SAIFUN;
12043 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12044 break;
12045 case FLASH_VENDOR_SST_SMALL:
12046 case FLASH_VENDOR_SST_LARGE:
12047 tp->nvram_jedecnum = JEDEC_SST;
12048 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12049 break;
12051 } else {
12052 tp->nvram_jedecnum = JEDEC_ATMEL;
12053 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12054 tg3_flag_set(tp, NVRAM_BUFFERED);
12058 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12060 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12061 case FLASH_5752PAGE_SIZE_256:
12062 tp->nvram_pagesize = 256;
12063 break;
12064 case FLASH_5752PAGE_SIZE_512:
12065 tp->nvram_pagesize = 512;
12066 break;
12067 case FLASH_5752PAGE_SIZE_1K:
12068 tp->nvram_pagesize = 1024;
12069 break;
12070 case FLASH_5752PAGE_SIZE_2K:
12071 tp->nvram_pagesize = 2048;
12072 break;
12073 case FLASH_5752PAGE_SIZE_4K:
12074 tp->nvram_pagesize = 4096;
12075 break;
12076 case FLASH_5752PAGE_SIZE_264:
12077 tp->nvram_pagesize = 264;
12078 break;
12079 case FLASH_5752PAGE_SIZE_528:
12080 tp->nvram_pagesize = 528;
12081 break;
12085 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12087 u32 nvcfg1;
12089 nvcfg1 = tr32(NVRAM_CFG1);
12091 /* NVRAM protection for TPM */
12092 if (nvcfg1 & (1 << 27))
12093 tg3_flag_set(tp, PROTECTED_NVRAM);
12095 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12096 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12097 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12098 tp->nvram_jedecnum = JEDEC_ATMEL;
12099 tg3_flag_set(tp, NVRAM_BUFFERED);
12100 break;
12101 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12102 tp->nvram_jedecnum = JEDEC_ATMEL;
12103 tg3_flag_set(tp, NVRAM_BUFFERED);
12104 tg3_flag_set(tp, FLASH);
12105 break;
12106 case FLASH_5752VENDOR_ST_M45PE10:
12107 case FLASH_5752VENDOR_ST_M45PE20:
12108 case FLASH_5752VENDOR_ST_M45PE40:
12109 tp->nvram_jedecnum = JEDEC_ST;
12110 tg3_flag_set(tp, NVRAM_BUFFERED);
12111 tg3_flag_set(tp, FLASH);
12112 break;
12115 if (tg3_flag(tp, FLASH)) {
12116 tg3_nvram_get_pagesize(tp, nvcfg1);
12117 } else {
12118 /* For eeprom, set pagesize to maximum eeprom size */
12119 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12121 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12122 tw32(NVRAM_CFG1, nvcfg1);
12126 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12128 u32 nvcfg1, protect = 0;
12130 nvcfg1 = tr32(NVRAM_CFG1);
12132 /* NVRAM protection for TPM */
12133 if (nvcfg1 & (1 << 27)) {
12134 tg3_flag_set(tp, PROTECTED_NVRAM);
12135 protect = 1;
12138 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12139 switch (nvcfg1) {
12140 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12141 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12142 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12143 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12144 tp->nvram_jedecnum = JEDEC_ATMEL;
12145 tg3_flag_set(tp, NVRAM_BUFFERED);
12146 tg3_flag_set(tp, FLASH);
12147 tp->nvram_pagesize = 264;
12148 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12149 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12150 tp->nvram_size = (protect ? 0x3e200 :
12151 TG3_NVRAM_SIZE_512KB);
12152 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12153 tp->nvram_size = (protect ? 0x1f200 :
12154 TG3_NVRAM_SIZE_256KB);
12155 else
12156 tp->nvram_size = (protect ? 0x1f200 :
12157 TG3_NVRAM_SIZE_128KB);
12158 break;
12159 case FLASH_5752VENDOR_ST_M45PE10:
12160 case FLASH_5752VENDOR_ST_M45PE20:
12161 case FLASH_5752VENDOR_ST_M45PE40:
12162 tp->nvram_jedecnum = JEDEC_ST;
12163 tg3_flag_set(tp, NVRAM_BUFFERED);
12164 tg3_flag_set(tp, FLASH);
12165 tp->nvram_pagesize = 256;
12166 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12167 tp->nvram_size = (protect ?
12168 TG3_NVRAM_SIZE_64KB :
12169 TG3_NVRAM_SIZE_128KB);
12170 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12171 tp->nvram_size = (protect ?
12172 TG3_NVRAM_SIZE_64KB :
12173 TG3_NVRAM_SIZE_256KB);
12174 else
12175 tp->nvram_size = (protect ?
12176 TG3_NVRAM_SIZE_128KB :
12177 TG3_NVRAM_SIZE_512KB);
12178 break;
12182 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12184 u32 nvcfg1;
12186 nvcfg1 = tr32(NVRAM_CFG1);
12188 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12189 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12190 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12191 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12192 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12193 tp->nvram_jedecnum = JEDEC_ATMEL;
12194 tg3_flag_set(tp, NVRAM_BUFFERED);
12195 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12197 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12198 tw32(NVRAM_CFG1, nvcfg1);
12199 break;
12200 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12201 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12202 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12203 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12204 tp->nvram_jedecnum = JEDEC_ATMEL;
12205 tg3_flag_set(tp, NVRAM_BUFFERED);
12206 tg3_flag_set(tp, FLASH);
12207 tp->nvram_pagesize = 264;
12208 break;
12209 case FLASH_5752VENDOR_ST_M45PE10:
12210 case FLASH_5752VENDOR_ST_M45PE20:
12211 case FLASH_5752VENDOR_ST_M45PE40:
12212 tp->nvram_jedecnum = JEDEC_ST;
12213 tg3_flag_set(tp, NVRAM_BUFFERED);
12214 tg3_flag_set(tp, FLASH);
12215 tp->nvram_pagesize = 256;
12216 break;
12220 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12222 u32 nvcfg1, protect = 0;
12224 nvcfg1 = tr32(NVRAM_CFG1);
12226 /* NVRAM protection for TPM */
12227 if (nvcfg1 & (1 << 27)) {
12228 tg3_flag_set(tp, PROTECTED_NVRAM);
12229 protect = 1;
12232 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12233 switch (nvcfg1) {
12234 case FLASH_5761VENDOR_ATMEL_ADB021D:
12235 case FLASH_5761VENDOR_ATMEL_ADB041D:
12236 case FLASH_5761VENDOR_ATMEL_ADB081D:
12237 case FLASH_5761VENDOR_ATMEL_ADB161D:
12238 case FLASH_5761VENDOR_ATMEL_MDB021D:
12239 case FLASH_5761VENDOR_ATMEL_MDB041D:
12240 case FLASH_5761VENDOR_ATMEL_MDB081D:
12241 case FLASH_5761VENDOR_ATMEL_MDB161D:
12242 tp->nvram_jedecnum = JEDEC_ATMEL;
12243 tg3_flag_set(tp, NVRAM_BUFFERED);
12244 tg3_flag_set(tp, FLASH);
12245 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12246 tp->nvram_pagesize = 256;
12247 break;
12248 case FLASH_5761VENDOR_ST_A_M45PE20:
12249 case FLASH_5761VENDOR_ST_A_M45PE40:
12250 case FLASH_5761VENDOR_ST_A_M45PE80:
12251 case FLASH_5761VENDOR_ST_A_M45PE16:
12252 case FLASH_5761VENDOR_ST_M_M45PE20:
12253 case FLASH_5761VENDOR_ST_M_M45PE40:
12254 case FLASH_5761VENDOR_ST_M_M45PE80:
12255 case FLASH_5761VENDOR_ST_M_M45PE16:
12256 tp->nvram_jedecnum = JEDEC_ST;
12257 tg3_flag_set(tp, NVRAM_BUFFERED);
12258 tg3_flag_set(tp, FLASH);
12259 tp->nvram_pagesize = 256;
12260 break;
12263 if (protect) {
12264 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12265 } else {
12266 switch (nvcfg1) {
12267 case FLASH_5761VENDOR_ATMEL_ADB161D:
12268 case FLASH_5761VENDOR_ATMEL_MDB161D:
12269 case FLASH_5761VENDOR_ST_A_M45PE16:
12270 case FLASH_5761VENDOR_ST_M_M45PE16:
12271 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12272 break;
12273 case FLASH_5761VENDOR_ATMEL_ADB081D:
12274 case FLASH_5761VENDOR_ATMEL_MDB081D:
12275 case FLASH_5761VENDOR_ST_A_M45PE80:
12276 case FLASH_5761VENDOR_ST_M_M45PE80:
12277 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12278 break;
12279 case FLASH_5761VENDOR_ATMEL_ADB041D:
12280 case FLASH_5761VENDOR_ATMEL_MDB041D:
12281 case FLASH_5761VENDOR_ST_A_M45PE40:
12282 case FLASH_5761VENDOR_ST_M_M45PE40:
12283 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12284 break;
12285 case FLASH_5761VENDOR_ATMEL_ADB021D:
12286 case FLASH_5761VENDOR_ATMEL_MDB021D:
12287 case FLASH_5761VENDOR_ST_A_M45PE20:
12288 case FLASH_5761VENDOR_ST_M_M45PE20:
12289 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12290 break;
12295 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12297 tp->nvram_jedecnum = JEDEC_ATMEL;
12298 tg3_flag_set(tp, NVRAM_BUFFERED);
12299 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12302 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12304 u32 nvcfg1;
12306 nvcfg1 = tr32(NVRAM_CFG1);
12308 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12309 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12310 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12311 tp->nvram_jedecnum = JEDEC_ATMEL;
12312 tg3_flag_set(tp, NVRAM_BUFFERED);
12313 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12315 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12316 tw32(NVRAM_CFG1, nvcfg1);
12317 return;
12318 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12319 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12320 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12321 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12322 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12323 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12324 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12325 tp->nvram_jedecnum = JEDEC_ATMEL;
12326 tg3_flag_set(tp, NVRAM_BUFFERED);
12327 tg3_flag_set(tp, FLASH);
12329 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12330 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12331 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12332 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12333 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12334 break;
12335 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12336 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12337 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12338 break;
12339 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12340 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12341 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12342 break;
12344 break;
12345 case FLASH_5752VENDOR_ST_M45PE10:
12346 case FLASH_5752VENDOR_ST_M45PE20:
12347 case FLASH_5752VENDOR_ST_M45PE40:
12348 tp->nvram_jedecnum = JEDEC_ST;
12349 tg3_flag_set(tp, NVRAM_BUFFERED);
12350 tg3_flag_set(tp, FLASH);
12352 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12353 case FLASH_5752VENDOR_ST_M45PE10:
12354 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12355 break;
12356 case FLASH_5752VENDOR_ST_M45PE20:
12357 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12358 break;
12359 case FLASH_5752VENDOR_ST_M45PE40:
12360 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12361 break;
12363 break;
12364 default:
12365 tg3_flag_set(tp, NO_NVRAM);
12366 return;
12369 tg3_nvram_get_pagesize(tp, nvcfg1);
12370 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12371 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12375 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12377 u32 nvcfg1;
12379 nvcfg1 = tr32(NVRAM_CFG1);
12381 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12382 case FLASH_5717VENDOR_ATMEL_EEPROM:
12383 case FLASH_5717VENDOR_MICRO_EEPROM:
12384 tp->nvram_jedecnum = JEDEC_ATMEL;
12385 tg3_flag_set(tp, NVRAM_BUFFERED);
12386 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12388 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12389 tw32(NVRAM_CFG1, nvcfg1);
12390 return;
12391 case FLASH_5717VENDOR_ATMEL_MDB011D:
12392 case FLASH_5717VENDOR_ATMEL_ADB011B:
12393 case FLASH_5717VENDOR_ATMEL_ADB011D:
12394 case FLASH_5717VENDOR_ATMEL_MDB021D:
12395 case FLASH_5717VENDOR_ATMEL_ADB021B:
12396 case FLASH_5717VENDOR_ATMEL_ADB021D:
12397 case FLASH_5717VENDOR_ATMEL_45USPT:
12398 tp->nvram_jedecnum = JEDEC_ATMEL;
12399 tg3_flag_set(tp, NVRAM_BUFFERED);
12400 tg3_flag_set(tp, FLASH);
12402 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12403 case FLASH_5717VENDOR_ATMEL_MDB021D:
12404 /* Detect size with tg3_nvram_get_size() */
12405 break;
12406 case FLASH_5717VENDOR_ATMEL_ADB021B:
12407 case FLASH_5717VENDOR_ATMEL_ADB021D:
12408 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12409 break;
12410 default:
12411 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12412 break;
12414 break;
12415 case FLASH_5717VENDOR_ST_M_M25PE10:
12416 case FLASH_5717VENDOR_ST_A_M25PE10:
12417 case FLASH_5717VENDOR_ST_M_M45PE10:
12418 case FLASH_5717VENDOR_ST_A_M45PE10:
12419 case FLASH_5717VENDOR_ST_M_M25PE20:
12420 case FLASH_5717VENDOR_ST_A_M25PE20:
12421 case FLASH_5717VENDOR_ST_M_M45PE20:
12422 case FLASH_5717VENDOR_ST_A_M45PE20:
12423 case FLASH_5717VENDOR_ST_25USPT:
12424 case FLASH_5717VENDOR_ST_45USPT:
12425 tp->nvram_jedecnum = JEDEC_ST;
12426 tg3_flag_set(tp, NVRAM_BUFFERED);
12427 tg3_flag_set(tp, FLASH);
12429 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12430 case FLASH_5717VENDOR_ST_M_M25PE20:
12431 case FLASH_5717VENDOR_ST_M_M45PE20:
12432 /* Detect size with tg3_nvram_get_size() */
12433 break;
12434 case FLASH_5717VENDOR_ST_A_M25PE20:
12435 case FLASH_5717VENDOR_ST_A_M45PE20:
12436 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12437 break;
12438 default:
12439 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12440 break;
12442 break;
12443 default:
12444 tg3_flag_set(tp, NO_NVRAM);
12445 return;
12448 tg3_nvram_get_pagesize(tp, nvcfg1);
12449 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12450 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12453 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12455 u32 nvcfg1, nvmpinstrp;
12457 nvcfg1 = tr32(NVRAM_CFG1);
12458 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12460 switch (nvmpinstrp) {
12461 case FLASH_5720_EEPROM_HD:
12462 case FLASH_5720_EEPROM_LD:
12463 tp->nvram_jedecnum = JEDEC_ATMEL;
12464 tg3_flag_set(tp, NVRAM_BUFFERED);
12466 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12467 tw32(NVRAM_CFG1, nvcfg1);
12468 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12469 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12470 else
12471 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12472 return;
12473 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12474 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12475 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12476 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12477 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12478 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12479 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12480 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12481 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12482 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12483 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12484 case FLASH_5720VENDOR_ATMEL_45USPT:
12485 tp->nvram_jedecnum = JEDEC_ATMEL;
12486 tg3_flag_set(tp, NVRAM_BUFFERED);
12487 tg3_flag_set(tp, FLASH);
12489 switch (nvmpinstrp) {
12490 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12491 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12492 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12493 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12494 break;
12495 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12496 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12497 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12498 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12499 break;
12500 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12501 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12502 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12503 break;
12504 default:
12505 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12506 break;
12508 break;
12509 case FLASH_5720VENDOR_M_ST_M25PE10:
12510 case FLASH_5720VENDOR_M_ST_M45PE10:
12511 case FLASH_5720VENDOR_A_ST_M25PE10:
12512 case FLASH_5720VENDOR_A_ST_M45PE10:
12513 case FLASH_5720VENDOR_M_ST_M25PE20:
12514 case FLASH_5720VENDOR_M_ST_M45PE20:
12515 case FLASH_5720VENDOR_A_ST_M25PE20:
12516 case FLASH_5720VENDOR_A_ST_M45PE20:
12517 case FLASH_5720VENDOR_M_ST_M25PE40:
12518 case FLASH_5720VENDOR_M_ST_M45PE40:
12519 case FLASH_5720VENDOR_A_ST_M25PE40:
12520 case FLASH_5720VENDOR_A_ST_M45PE40:
12521 case FLASH_5720VENDOR_M_ST_M25PE80:
12522 case FLASH_5720VENDOR_M_ST_M45PE80:
12523 case FLASH_5720VENDOR_A_ST_M25PE80:
12524 case FLASH_5720VENDOR_A_ST_M45PE80:
12525 case FLASH_5720VENDOR_ST_25USPT:
12526 case FLASH_5720VENDOR_ST_45USPT:
12527 tp->nvram_jedecnum = JEDEC_ST;
12528 tg3_flag_set(tp, NVRAM_BUFFERED);
12529 tg3_flag_set(tp, FLASH);
12531 switch (nvmpinstrp) {
12532 case FLASH_5720VENDOR_M_ST_M25PE20:
12533 case FLASH_5720VENDOR_M_ST_M45PE20:
12534 case FLASH_5720VENDOR_A_ST_M25PE20:
12535 case FLASH_5720VENDOR_A_ST_M45PE20:
12536 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12537 break;
12538 case FLASH_5720VENDOR_M_ST_M25PE40:
12539 case FLASH_5720VENDOR_M_ST_M45PE40:
12540 case FLASH_5720VENDOR_A_ST_M25PE40:
12541 case FLASH_5720VENDOR_A_ST_M45PE40:
12542 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12543 break;
12544 case FLASH_5720VENDOR_M_ST_M25PE80:
12545 case FLASH_5720VENDOR_M_ST_M45PE80:
12546 case FLASH_5720VENDOR_A_ST_M25PE80:
12547 case FLASH_5720VENDOR_A_ST_M45PE80:
12548 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12549 break;
12550 default:
12551 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12552 break;
12554 break;
12555 default:
12556 tg3_flag_set(tp, NO_NVRAM);
12557 return;
12560 tg3_nvram_get_pagesize(tp, nvcfg1);
12561 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12562 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12565 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12566 static void __devinit tg3_nvram_init(struct tg3 *tp)
12568 tw32_f(GRC_EEPROM_ADDR,
12569 (EEPROM_ADDR_FSM_RESET |
12570 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12571 EEPROM_ADDR_CLKPERD_SHIFT)));
12573 msleep(1);
12575 /* Enable seeprom accesses. */
12576 tw32_f(GRC_LOCAL_CTRL,
12577 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12578 udelay(100);
12580 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12581 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12582 tg3_flag_set(tp, NVRAM);
12584 if (tg3_nvram_lock(tp)) {
12585 netdev_warn(tp->dev,
12586 "Cannot get nvram lock, %s failed\n",
12587 __func__);
12588 return;
12590 tg3_enable_nvram_access(tp);
12592 tp->nvram_size = 0;
12594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12595 tg3_get_5752_nvram_info(tp);
12596 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12597 tg3_get_5755_nvram_info(tp);
12598 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12600 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12601 tg3_get_5787_nvram_info(tp);
12602 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12603 tg3_get_5761_nvram_info(tp);
12604 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12605 tg3_get_5906_nvram_info(tp);
12606 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12607 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12608 tg3_get_57780_nvram_info(tp);
12609 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12610 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12611 tg3_get_5717_nvram_info(tp);
12612 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12613 tg3_get_5720_nvram_info(tp);
12614 else
12615 tg3_get_nvram_info(tp);
12617 if (tp->nvram_size == 0)
12618 tg3_get_nvram_size(tp);
12620 tg3_disable_nvram_access(tp);
12621 tg3_nvram_unlock(tp);
12623 } else {
12624 tg3_flag_clear(tp, NVRAM);
12625 tg3_flag_clear(tp, NVRAM_BUFFERED);
12627 tg3_get_eeprom_size(tp);
12631 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12632 u32 offset, u32 len, u8 *buf)
12634 int i, j, rc = 0;
12635 u32 val;
12637 for (i = 0; i < len; i += 4) {
12638 u32 addr;
12639 __be32 data;
12641 addr = offset + i;
12643 memcpy(&data, buf + i, 4);
12646 * The SEEPROM interface expects the data to always be opposite
12647 * the native endian format. We accomplish this by reversing
12648 * all the operations that would have been performed on the
12649 * data from a call to tg3_nvram_read_be32().
12651 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12653 val = tr32(GRC_EEPROM_ADDR);
12654 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12656 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12657 EEPROM_ADDR_READ);
12658 tw32(GRC_EEPROM_ADDR, val |
12659 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12660 (addr & EEPROM_ADDR_ADDR_MASK) |
12661 EEPROM_ADDR_START |
12662 EEPROM_ADDR_WRITE);
12664 for (j = 0; j < 1000; j++) {
12665 val = tr32(GRC_EEPROM_ADDR);
12667 if (val & EEPROM_ADDR_COMPLETE)
12668 break;
12669 msleep(1);
12671 if (!(val & EEPROM_ADDR_COMPLETE)) {
12672 rc = -EBUSY;
12673 break;
12677 return rc;
12680 /* offset and length are dword aligned */
12681 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12682 u8 *buf)
12684 int ret = 0;
12685 u32 pagesize = tp->nvram_pagesize;
12686 u32 pagemask = pagesize - 1;
12687 u32 nvram_cmd;
12688 u8 *tmp;
12690 tmp = kmalloc(pagesize, GFP_KERNEL);
12691 if (tmp == NULL)
12692 return -ENOMEM;
12694 while (len) {
12695 int j;
12696 u32 phy_addr, page_off, size;
12698 phy_addr = offset & ~pagemask;
12700 for (j = 0; j < pagesize; j += 4) {
12701 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12702 (__be32 *) (tmp + j));
12703 if (ret)
12704 break;
12706 if (ret)
12707 break;
12709 page_off = offset & pagemask;
12710 size = pagesize;
12711 if (len < size)
12712 size = len;
12714 len -= size;
12716 memcpy(tmp + page_off, buf, size);
12718 offset = offset + (pagesize - page_off);
12720 tg3_enable_nvram_access(tp);
12723 * Before we can erase the flash page, we need
12724 * to issue a special "write enable" command.
12726 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12728 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12729 break;
12731 /* Erase the target page */
12732 tw32(NVRAM_ADDR, phy_addr);
12734 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12735 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12737 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12738 break;
12740 /* Issue another write enable to start the write. */
12741 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12743 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12744 break;
12746 for (j = 0; j < pagesize; j += 4) {
12747 __be32 data;
12749 data = *((__be32 *) (tmp + j));
12751 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12753 tw32(NVRAM_ADDR, phy_addr + j);
12755 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12756 NVRAM_CMD_WR;
12758 if (j == 0)
12759 nvram_cmd |= NVRAM_CMD_FIRST;
12760 else if (j == (pagesize - 4))
12761 nvram_cmd |= NVRAM_CMD_LAST;
12763 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12764 break;
12766 if (ret)
12767 break;
12770 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12771 tg3_nvram_exec_cmd(tp, nvram_cmd);
12773 kfree(tmp);
12775 return ret;
12778 /* offset and length are dword aligned */
12779 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12780 u8 *buf)
12782 int i, ret = 0;
12784 for (i = 0; i < len; i += 4, offset += 4) {
12785 u32 page_off, phy_addr, nvram_cmd;
12786 __be32 data;
12788 memcpy(&data, buf + i, 4);
12789 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12791 page_off = offset % tp->nvram_pagesize;
12793 phy_addr = tg3_nvram_phys_addr(tp, offset);
12795 tw32(NVRAM_ADDR, phy_addr);
12797 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12799 if (page_off == 0 || i == 0)
12800 nvram_cmd |= NVRAM_CMD_FIRST;
12801 if (page_off == (tp->nvram_pagesize - 4))
12802 nvram_cmd |= NVRAM_CMD_LAST;
12804 if (i == (len - 4))
12805 nvram_cmd |= NVRAM_CMD_LAST;
12807 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12808 !tg3_flag(tp, 5755_PLUS) &&
12809 (tp->nvram_jedecnum == JEDEC_ST) &&
12810 (nvram_cmd & NVRAM_CMD_FIRST)) {
12812 if ((ret = tg3_nvram_exec_cmd(tp,
12813 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12814 NVRAM_CMD_DONE)))
12816 break;
12818 if (!tg3_flag(tp, FLASH)) {
12819 /* We always do complete word writes to eeprom. */
12820 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12823 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12824 break;
12826 return ret;
12829 /* offset and length are dword aligned */
12830 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12832 int ret;
12834 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12835 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12836 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12837 udelay(40);
12840 if (!tg3_flag(tp, NVRAM)) {
12841 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12842 } else {
12843 u32 grc_mode;
12845 ret = tg3_nvram_lock(tp);
12846 if (ret)
12847 return ret;
12849 tg3_enable_nvram_access(tp);
12850 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12851 tw32(NVRAM_WRITE1, 0x406);
12853 grc_mode = tr32(GRC_MODE);
12854 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12856 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12857 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12858 buf);
12859 } else {
12860 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12861 buf);
12864 grc_mode = tr32(GRC_MODE);
12865 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12867 tg3_disable_nvram_access(tp);
12868 tg3_nvram_unlock(tp);
12871 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12872 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12873 udelay(40);
12876 return ret;
12879 struct subsys_tbl_ent {
12880 u16 subsys_vendor, subsys_devid;
12881 u32 phy_id;
12884 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12885 /* Broadcom boards. */
12886 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12887 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12888 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12889 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12890 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12891 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12892 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12893 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12894 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12895 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12896 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12897 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12898 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12899 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12900 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12901 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12902 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12903 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12904 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12905 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12906 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12907 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12909 /* 3com boards. */
12910 { TG3PCI_SUBVENDOR_ID_3COM,
12911 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12912 { TG3PCI_SUBVENDOR_ID_3COM,
12913 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12914 { TG3PCI_SUBVENDOR_ID_3COM,
12915 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12916 { TG3PCI_SUBVENDOR_ID_3COM,
12917 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12918 { TG3PCI_SUBVENDOR_ID_3COM,
12919 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12921 /* DELL boards. */
12922 { TG3PCI_SUBVENDOR_ID_DELL,
12923 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12924 { TG3PCI_SUBVENDOR_ID_DELL,
12925 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12926 { TG3PCI_SUBVENDOR_ID_DELL,
12927 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12928 { TG3PCI_SUBVENDOR_ID_DELL,
12929 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12931 /* Compaq boards. */
12932 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12933 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12934 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12935 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12936 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12937 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12938 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12939 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12940 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12941 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12943 /* IBM boards. */
12944 { TG3PCI_SUBVENDOR_ID_IBM,
12945 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12948 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12950 int i;
12952 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12953 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12954 tp->pdev->subsystem_vendor) &&
12955 (subsys_id_to_phy_id[i].subsys_devid ==
12956 tp->pdev->subsystem_device))
12957 return &subsys_id_to_phy_id[i];
12959 return NULL;
12962 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12964 u32 val;
12966 tp->phy_id = TG3_PHY_ID_INVALID;
12967 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12969 /* Assume an onboard device and WOL capable by default. */
12970 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12971 tg3_flag_set(tp, WOL_CAP);
12973 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12974 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12975 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12976 tg3_flag_set(tp, IS_NIC);
12978 val = tr32(VCPU_CFGSHDW);
12979 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12980 tg3_flag_set(tp, ASPM_WORKAROUND);
12981 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12982 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12983 tg3_flag_set(tp, WOL_ENABLE);
12984 device_set_wakeup_enable(&tp->pdev->dev, true);
12986 goto done;
12989 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12990 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12991 u32 nic_cfg, led_cfg;
12992 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12993 int eeprom_phy_serdes = 0;
12995 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12996 tp->nic_sram_data_cfg = nic_cfg;
12998 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12999 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13000 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13001 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13002 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13003 (ver > 0) && (ver < 0x100))
13004 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13007 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13009 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13010 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13011 eeprom_phy_serdes = 1;
13013 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13014 if (nic_phy_id != 0) {
13015 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13016 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13018 eeprom_phy_id = (id1 >> 16) << 10;
13019 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13020 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13021 } else
13022 eeprom_phy_id = 0;
13024 tp->phy_id = eeprom_phy_id;
13025 if (eeprom_phy_serdes) {
13026 if (!tg3_flag(tp, 5705_PLUS))
13027 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13028 else
13029 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13032 if (tg3_flag(tp, 5750_PLUS))
13033 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13034 SHASTA_EXT_LED_MODE_MASK);
13035 else
13036 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13038 switch (led_cfg) {
13039 default:
13040 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13041 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13042 break;
13044 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13045 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13046 break;
13048 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13049 tp->led_ctrl = LED_CTRL_MODE_MAC;
13051 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13052 * read on some older 5700/5701 bootcode.
13054 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13055 ASIC_REV_5700 ||
13056 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13057 ASIC_REV_5701)
13058 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13060 break;
13062 case SHASTA_EXT_LED_SHARED:
13063 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13064 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13065 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13066 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13067 LED_CTRL_MODE_PHY_2);
13068 break;
13070 case SHASTA_EXT_LED_MAC:
13071 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13072 break;
13074 case SHASTA_EXT_LED_COMBO:
13075 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13076 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13077 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13078 LED_CTRL_MODE_PHY_2);
13079 break;
13083 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13085 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13086 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13088 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13089 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13091 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13092 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13093 if ((tp->pdev->subsystem_vendor ==
13094 PCI_VENDOR_ID_ARIMA) &&
13095 (tp->pdev->subsystem_device == 0x205a ||
13096 tp->pdev->subsystem_device == 0x2063))
13097 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13098 } else {
13099 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13100 tg3_flag_set(tp, IS_NIC);
13103 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13104 tg3_flag_set(tp, ENABLE_ASF);
13105 if (tg3_flag(tp, 5750_PLUS))
13106 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13109 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13110 tg3_flag(tp, 5750_PLUS))
13111 tg3_flag_set(tp, ENABLE_APE);
13113 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13114 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13115 tg3_flag_clear(tp, WOL_CAP);
13117 if (tg3_flag(tp, WOL_CAP) &&
13118 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13119 tg3_flag_set(tp, WOL_ENABLE);
13120 device_set_wakeup_enable(&tp->pdev->dev, true);
13123 if (cfg2 & (1 << 17))
13124 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13126 /* serdes signal pre-emphasis in register 0x590 set by */
13127 /* bootcode if bit 18 is set */
13128 if (cfg2 & (1 << 18))
13129 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13131 if ((tg3_flag(tp, 57765_PLUS) ||
13132 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13133 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13134 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13135 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13137 if (tg3_flag(tp, PCI_EXPRESS) &&
13138 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13139 !tg3_flag(tp, 57765_PLUS)) {
13140 u32 cfg3;
13142 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13143 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13144 tg3_flag_set(tp, ASPM_WORKAROUND);
13147 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13148 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13149 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13150 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13151 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13152 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13154 done:
13155 if (tg3_flag(tp, WOL_CAP))
13156 device_set_wakeup_enable(&tp->pdev->dev,
13157 tg3_flag(tp, WOL_ENABLE));
13158 else
13159 device_set_wakeup_capable(&tp->pdev->dev, false);
13162 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13164 int i;
13165 u32 val;
13167 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13168 tw32(OTP_CTRL, cmd);
13170 /* Wait for up to 1 ms for command to execute. */
13171 for (i = 0; i < 100; i++) {
13172 val = tr32(OTP_STATUS);
13173 if (val & OTP_STATUS_CMD_DONE)
13174 break;
13175 udelay(10);
13178 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13181 /* Read the gphy configuration from the OTP region of the chip. The gphy
13182 * configuration is a 32-bit value that straddles the alignment boundary.
13183 * We do two 32-bit reads and then shift and merge the results.
13185 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13187 u32 bhalf_otp, thalf_otp;
13189 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13191 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13192 return 0;
13194 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13196 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13197 return 0;
13199 thalf_otp = tr32(OTP_READ_DATA);
13201 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13203 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13204 return 0;
13206 bhalf_otp = tr32(OTP_READ_DATA);
13208 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13211 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13213 u32 adv = ADVERTISED_Autoneg |
13214 ADVERTISED_Pause;
13216 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13217 adv |= ADVERTISED_1000baseT_Half |
13218 ADVERTISED_1000baseT_Full;
13220 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13221 adv |= ADVERTISED_100baseT_Half |
13222 ADVERTISED_100baseT_Full |
13223 ADVERTISED_10baseT_Half |
13224 ADVERTISED_10baseT_Full |
13225 ADVERTISED_TP;
13226 else
13227 adv |= ADVERTISED_FIBRE;
13229 tp->link_config.advertising = adv;
13230 tp->link_config.speed = SPEED_INVALID;
13231 tp->link_config.duplex = DUPLEX_INVALID;
13232 tp->link_config.autoneg = AUTONEG_ENABLE;
13233 tp->link_config.active_speed = SPEED_INVALID;
13234 tp->link_config.active_duplex = DUPLEX_INVALID;
13235 tp->link_config.orig_speed = SPEED_INVALID;
13236 tp->link_config.orig_duplex = DUPLEX_INVALID;
13237 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13240 static int __devinit tg3_phy_probe(struct tg3 *tp)
13242 u32 hw_phy_id_1, hw_phy_id_2;
13243 u32 hw_phy_id, hw_phy_id_masked;
13244 int err;
13246 /* flow control autonegotiation is default behavior */
13247 tg3_flag_set(tp, PAUSE_AUTONEG);
13248 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13250 if (tg3_flag(tp, USE_PHYLIB))
13251 return tg3_phy_init(tp);
13253 /* Reading the PHY ID register can conflict with ASF
13254 * firmware access to the PHY hardware.
13256 err = 0;
13257 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13258 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13259 } else {
13260 /* Now read the physical PHY_ID from the chip and verify
13261 * that it is sane. If it doesn't look good, we fall back
13262 * to either the hard-coded table based PHY_ID and failing
13263 * that the value found in the eeprom area.
13265 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13266 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13268 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13269 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13270 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13272 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13275 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13276 tp->phy_id = hw_phy_id;
13277 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13278 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13279 else
13280 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13281 } else {
13282 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13283 /* Do nothing, phy ID already set up in
13284 * tg3_get_eeprom_hw_cfg().
13286 } else {
13287 struct subsys_tbl_ent *p;
13289 /* No eeprom signature? Try the hardcoded
13290 * subsys device table.
13292 p = tg3_lookup_by_subsys(tp);
13293 if (!p)
13294 return -ENODEV;
13296 tp->phy_id = p->phy_id;
13297 if (!tp->phy_id ||
13298 tp->phy_id == TG3_PHY_ID_BCM8002)
13299 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13303 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13304 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13306 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13307 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13308 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13309 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13310 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13312 tg3_phy_init_link_config(tp);
13314 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13315 !tg3_flag(tp, ENABLE_APE) &&
13316 !tg3_flag(tp, ENABLE_ASF)) {
13317 u32 bmsr, mask;
13319 tg3_readphy(tp, MII_BMSR, &bmsr);
13320 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13321 (bmsr & BMSR_LSTATUS))
13322 goto skip_phy_reset;
13324 err = tg3_phy_reset(tp);
13325 if (err)
13326 return err;
13328 tg3_phy_set_wirespeed(tp);
13330 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13331 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13332 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13333 if (!tg3_copper_is_advertising_all(tp, mask)) {
13334 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13335 tp->link_config.flowctrl);
13337 tg3_writephy(tp, MII_BMCR,
13338 BMCR_ANENABLE | BMCR_ANRESTART);
13342 skip_phy_reset:
13343 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13344 err = tg3_init_5401phy_dsp(tp);
13345 if (err)
13346 return err;
13348 err = tg3_init_5401phy_dsp(tp);
13351 return err;
13354 static void __devinit tg3_read_vpd(struct tg3 *tp)
13356 u8 *vpd_data;
13357 unsigned int block_end, rosize, len;
13358 u32 vpdlen;
13359 int j, i = 0;
13361 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13362 if (!vpd_data)
13363 goto out_no_vpd;
13365 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13366 if (i < 0)
13367 goto out_not_found;
13369 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13370 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13371 i += PCI_VPD_LRDT_TAG_SIZE;
13373 if (block_end > vpdlen)
13374 goto out_not_found;
13376 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13377 PCI_VPD_RO_KEYWORD_MFR_ID);
13378 if (j > 0) {
13379 len = pci_vpd_info_field_size(&vpd_data[j]);
13381 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13382 if (j + len > block_end || len != 4 ||
13383 memcmp(&vpd_data[j], "1028", 4))
13384 goto partno;
13386 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13387 PCI_VPD_RO_KEYWORD_VENDOR0);
13388 if (j < 0)
13389 goto partno;
13391 len = pci_vpd_info_field_size(&vpd_data[j]);
13393 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13394 if (j + len > block_end)
13395 goto partno;
13397 memcpy(tp->fw_ver, &vpd_data[j], len);
13398 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13401 partno:
13402 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13403 PCI_VPD_RO_KEYWORD_PARTNO);
13404 if (i < 0)
13405 goto out_not_found;
13407 len = pci_vpd_info_field_size(&vpd_data[i]);
13409 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13410 if (len > TG3_BPN_SIZE ||
13411 (len + i) > vpdlen)
13412 goto out_not_found;
13414 memcpy(tp->board_part_number, &vpd_data[i], len);
13416 out_not_found:
13417 kfree(vpd_data);
13418 if (tp->board_part_number[0])
13419 return;
13421 out_no_vpd:
13422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13423 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13424 strcpy(tp->board_part_number, "BCM5717");
13425 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13426 strcpy(tp->board_part_number, "BCM5718");
13427 else
13428 goto nomatch;
13429 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13430 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13431 strcpy(tp->board_part_number, "BCM57780");
13432 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13433 strcpy(tp->board_part_number, "BCM57760");
13434 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13435 strcpy(tp->board_part_number, "BCM57790");
13436 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13437 strcpy(tp->board_part_number, "BCM57788");
13438 else
13439 goto nomatch;
13440 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13441 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13442 strcpy(tp->board_part_number, "BCM57761");
13443 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13444 strcpy(tp->board_part_number, "BCM57765");
13445 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13446 strcpy(tp->board_part_number, "BCM57781");
13447 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13448 strcpy(tp->board_part_number, "BCM57785");
13449 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13450 strcpy(tp->board_part_number, "BCM57791");
13451 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13452 strcpy(tp->board_part_number, "BCM57795");
13453 else
13454 goto nomatch;
13455 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13456 strcpy(tp->board_part_number, "BCM95906");
13457 } else {
13458 nomatch:
13459 strcpy(tp->board_part_number, "none");
13463 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13465 u32 val;
13467 if (tg3_nvram_read(tp, offset, &val) ||
13468 (val & 0xfc000000) != 0x0c000000 ||
13469 tg3_nvram_read(tp, offset + 4, &val) ||
13470 val != 0)
13471 return 0;
13473 return 1;
13476 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13478 u32 val, offset, start, ver_offset;
13479 int i, dst_off;
13480 bool newver = false;
13482 if (tg3_nvram_read(tp, 0xc, &offset) ||
13483 tg3_nvram_read(tp, 0x4, &start))
13484 return;
13486 offset = tg3_nvram_logical_addr(tp, offset);
13488 if (tg3_nvram_read(tp, offset, &val))
13489 return;
13491 if ((val & 0xfc000000) == 0x0c000000) {
13492 if (tg3_nvram_read(tp, offset + 4, &val))
13493 return;
13495 if (val == 0)
13496 newver = true;
13499 dst_off = strlen(tp->fw_ver);
13501 if (newver) {
13502 if (TG3_VER_SIZE - dst_off < 16 ||
13503 tg3_nvram_read(tp, offset + 8, &ver_offset))
13504 return;
13506 offset = offset + ver_offset - start;
13507 for (i = 0; i < 16; i += 4) {
13508 __be32 v;
13509 if (tg3_nvram_read_be32(tp, offset + i, &v))
13510 return;
13512 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13514 } else {
13515 u32 major, minor;
13517 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13518 return;
13520 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13521 TG3_NVM_BCVER_MAJSFT;
13522 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13523 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13524 "v%d.%02d", major, minor);
13528 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13530 u32 val, major, minor;
13532 /* Use native endian representation */
13533 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13534 return;
13536 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13537 TG3_NVM_HWSB_CFG1_MAJSFT;
13538 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13539 TG3_NVM_HWSB_CFG1_MINSFT;
13541 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13544 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13546 u32 offset, major, minor, build;
13548 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13550 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13551 return;
13553 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13554 case TG3_EEPROM_SB_REVISION_0:
13555 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13556 break;
13557 case TG3_EEPROM_SB_REVISION_2:
13558 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13559 break;
13560 case TG3_EEPROM_SB_REVISION_3:
13561 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13562 break;
13563 case TG3_EEPROM_SB_REVISION_4:
13564 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13565 break;
13566 case TG3_EEPROM_SB_REVISION_5:
13567 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13568 break;
13569 case TG3_EEPROM_SB_REVISION_6:
13570 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13571 break;
13572 default:
13573 return;
13576 if (tg3_nvram_read(tp, offset, &val))
13577 return;
13579 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13580 TG3_EEPROM_SB_EDH_BLD_SHFT;
13581 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13582 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13583 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13585 if (minor > 99 || build > 26)
13586 return;
13588 offset = strlen(tp->fw_ver);
13589 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13590 " v%d.%02d", major, minor);
13592 if (build > 0) {
13593 offset = strlen(tp->fw_ver);
13594 if (offset < TG3_VER_SIZE - 1)
13595 tp->fw_ver[offset] = 'a' + build - 1;
13599 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13601 u32 val, offset, start;
13602 int i, vlen;
13604 for (offset = TG3_NVM_DIR_START;
13605 offset < TG3_NVM_DIR_END;
13606 offset += TG3_NVM_DIRENT_SIZE) {
13607 if (tg3_nvram_read(tp, offset, &val))
13608 return;
13610 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13611 break;
13614 if (offset == TG3_NVM_DIR_END)
13615 return;
13617 if (!tg3_flag(tp, 5705_PLUS))
13618 start = 0x08000000;
13619 else if (tg3_nvram_read(tp, offset - 4, &start))
13620 return;
13622 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13623 !tg3_fw_img_is_valid(tp, offset) ||
13624 tg3_nvram_read(tp, offset + 8, &val))
13625 return;
13627 offset += val - start;
13629 vlen = strlen(tp->fw_ver);
13631 tp->fw_ver[vlen++] = ',';
13632 tp->fw_ver[vlen++] = ' ';
13634 for (i = 0; i < 4; i++) {
13635 __be32 v;
13636 if (tg3_nvram_read_be32(tp, offset, &v))
13637 return;
13639 offset += sizeof(v);
13641 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13642 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13643 break;
13646 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13647 vlen += sizeof(v);
13651 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13653 int vlen;
13654 u32 apedata;
13655 char *fwtype;
13657 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13658 return;
13660 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13661 if (apedata != APE_SEG_SIG_MAGIC)
13662 return;
13664 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13665 if (!(apedata & APE_FW_STATUS_READY))
13666 return;
13668 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13670 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13671 tg3_flag_set(tp, APE_HAS_NCSI);
13672 fwtype = "NCSI";
13673 } else {
13674 fwtype = "DASH";
13677 vlen = strlen(tp->fw_ver);
13679 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13680 fwtype,
13681 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13682 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13683 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13684 (apedata & APE_FW_VERSION_BLDMSK));
13687 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13689 u32 val;
13690 bool vpd_vers = false;
13692 if (tp->fw_ver[0] != 0)
13693 vpd_vers = true;
13695 if (tg3_flag(tp, NO_NVRAM)) {
13696 strcat(tp->fw_ver, "sb");
13697 return;
13700 if (tg3_nvram_read(tp, 0, &val))
13701 return;
13703 if (val == TG3_EEPROM_MAGIC)
13704 tg3_read_bc_ver(tp);
13705 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13706 tg3_read_sb_ver(tp, val);
13707 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13708 tg3_read_hwsb_ver(tp);
13709 else
13710 return;
13712 if (vpd_vers)
13713 goto done;
13715 if (tg3_flag(tp, ENABLE_APE)) {
13716 if (tg3_flag(tp, ENABLE_ASF))
13717 tg3_read_dash_ver(tp);
13718 } else if (tg3_flag(tp, ENABLE_ASF)) {
13719 tg3_read_mgmtfw_ver(tp);
13722 done:
13723 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13726 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13728 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13730 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13731 return TG3_RX_RET_MAX_SIZE_5717;
13732 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13733 return TG3_RX_RET_MAX_SIZE_5700;
13734 else
13735 return TG3_RX_RET_MAX_SIZE_5705;
13738 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13739 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13740 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13741 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13742 { },
13745 static int __devinit tg3_get_invariants(struct tg3 *tp)
13747 u32 misc_ctrl_reg;
13748 u32 pci_state_reg, grc_misc_cfg;
13749 u32 val;
13750 u16 pci_cmd;
13751 int err;
13753 /* Force memory write invalidate off. If we leave it on,
13754 * then on 5700_BX chips we have to enable a workaround.
13755 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13756 * to match the cacheline size. The Broadcom driver have this
13757 * workaround but turns MWI off all the times so never uses
13758 * it. This seems to suggest that the workaround is insufficient.
13760 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13761 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13762 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13764 /* Important! -- Make sure register accesses are byteswapped
13765 * correctly. Also, for those chips that require it, make
13766 * sure that indirect register accesses are enabled before
13767 * the first operation.
13769 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13770 &misc_ctrl_reg);
13771 tp->misc_host_ctrl |= (misc_ctrl_reg &
13772 MISC_HOST_CTRL_CHIPREV);
13773 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13774 tp->misc_host_ctrl);
13776 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13777 MISC_HOST_CTRL_CHIPREV_SHIFT);
13778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13779 u32 prod_id_asic_rev;
13781 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13782 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13783 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13784 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13785 pci_read_config_dword(tp->pdev,
13786 TG3PCI_GEN2_PRODID_ASICREV,
13787 &prod_id_asic_rev);
13788 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13789 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13790 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13791 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13792 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13793 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13794 pci_read_config_dword(tp->pdev,
13795 TG3PCI_GEN15_PRODID_ASICREV,
13796 &prod_id_asic_rev);
13797 else
13798 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13799 &prod_id_asic_rev);
13801 tp->pci_chip_rev_id = prod_id_asic_rev;
13804 /* Wrong chip ID in 5752 A0. This code can be removed later
13805 * as A0 is not in production.
13807 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13808 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13810 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13811 * we need to disable memory and use config. cycles
13812 * only to access all registers. The 5702/03 chips
13813 * can mistakenly decode the special cycles from the
13814 * ICH chipsets as memory write cycles, causing corruption
13815 * of register and memory space. Only certain ICH bridges
13816 * will drive special cycles with non-zero data during the
13817 * address phase which can fall within the 5703's address
13818 * range. This is not an ICH bug as the PCI spec allows
13819 * non-zero address during special cycles. However, only
13820 * these ICH bridges are known to drive non-zero addresses
13821 * during special cycles.
13823 * Since special cycles do not cross PCI bridges, we only
13824 * enable this workaround if the 5703 is on the secondary
13825 * bus of these ICH bridges.
13827 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13828 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13829 static struct tg3_dev_id {
13830 u32 vendor;
13831 u32 device;
13832 u32 rev;
13833 } ich_chipsets[] = {
13834 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13835 PCI_ANY_ID },
13836 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13837 PCI_ANY_ID },
13838 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13839 0xa },
13840 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13841 PCI_ANY_ID },
13842 { },
13844 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13845 struct pci_dev *bridge = NULL;
13847 while (pci_id->vendor != 0) {
13848 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13849 bridge);
13850 if (!bridge) {
13851 pci_id++;
13852 continue;
13854 if (pci_id->rev != PCI_ANY_ID) {
13855 if (bridge->revision > pci_id->rev)
13856 continue;
13858 if (bridge->subordinate &&
13859 (bridge->subordinate->number ==
13860 tp->pdev->bus->number)) {
13861 tg3_flag_set(tp, ICH_WORKAROUND);
13862 pci_dev_put(bridge);
13863 break;
13868 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13869 static struct tg3_dev_id {
13870 u32 vendor;
13871 u32 device;
13872 } bridge_chipsets[] = {
13873 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13874 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13875 { },
13877 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13878 struct pci_dev *bridge = NULL;
13880 while (pci_id->vendor != 0) {
13881 bridge = pci_get_device(pci_id->vendor,
13882 pci_id->device,
13883 bridge);
13884 if (!bridge) {
13885 pci_id++;
13886 continue;
13888 if (bridge->subordinate &&
13889 (bridge->subordinate->number <=
13890 tp->pdev->bus->number) &&
13891 (bridge->subordinate->subordinate >=
13892 tp->pdev->bus->number)) {
13893 tg3_flag_set(tp, 5701_DMA_BUG);
13894 pci_dev_put(bridge);
13895 break;
13900 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13901 * DMA addresses > 40-bit. This bridge may have other additional
13902 * 57xx devices behind it in some 4-port NIC designs for example.
13903 * Any tg3 device found behind the bridge will also need the 40-bit
13904 * DMA workaround.
13906 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13908 tg3_flag_set(tp, 5780_CLASS);
13909 tg3_flag_set(tp, 40BIT_DMA_BUG);
13910 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13911 } else {
13912 struct pci_dev *bridge = NULL;
13914 do {
13915 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13916 PCI_DEVICE_ID_SERVERWORKS_EPB,
13917 bridge);
13918 if (bridge && bridge->subordinate &&
13919 (bridge->subordinate->number <=
13920 tp->pdev->bus->number) &&
13921 (bridge->subordinate->subordinate >=
13922 tp->pdev->bus->number)) {
13923 tg3_flag_set(tp, 40BIT_DMA_BUG);
13924 pci_dev_put(bridge);
13925 break;
13927 } while (bridge);
13930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13932 tp->pdev_peer = tg3_find_peer(tp);
13934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13937 tg3_flag_set(tp, 5717_PLUS);
13939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13940 tg3_flag(tp, 5717_PLUS))
13941 tg3_flag_set(tp, 57765_PLUS);
13943 /* Intentionally exclude ASIC_REV_5906 */
13944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13945 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13950 tg3_flag(tp, 57765_PLUS))
13951 tg3_flag_set(tp, 5755_PLUS);
13953 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13954 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13955 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13956 tg3_flag(tp, 5755_PLUS) ||
13957 tg3_flag(tp, 5780_CLASS))
13958 tg3_flag_set(tp, 5750_PLUS);
13960 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13961 tg3_flag(tp, 5750_PLUS))
13962 tg3_flag_set(tp, 5705_PLUS);
13964 /* Determine TSO capabilities */
13965 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13966 ; /* Do nothing. HW bug. */
13967 else if (tg3_flag(tp, 57765_PLUS))
13968 tg3_flag_set(tp, HW_TSO_3);
13969 else if (tg3_flag(tp, 5755_PLUS) ||
13970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13971 tg3_flag_set(tp, HW_TSO_2);
13972 else if (tg3_flag(tp, 5750_PLUS)) {
13973 tg3_flag_set(tp, HW_TSO_1);
13974 tg3_flag_set(tp, TSO_BUG);
13975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13976 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13977 tg3_flag_clear(tp, TSO_BUG);
13978 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13979 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13980 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13981 tg3_flag_set(tp, TSO_BUG);
13982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13983 tp->fw_needed = FIRMWARE_TG3TSO5;
13984 else
13985 tp->fw_needed = FIRMWARE_TG3TSO;
13988 /* Selectively allow TSO based on operating conditions */
13989 if (tg3_flag(tp, HW_TSO_1) ||
13990 tg3_flag(tp, HW_TSO_2) ||
13991 tg3_flag(tp, HW_TSO_3) ||
13992 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13993 tg3_flag_set(tp, TSO_CAPABLE);
13994 else {
13995 tg3_flag_clear(tp, TSO_CAPABLE);
13996 tg3_flag_clear(tp, TSO_BUG);
13997 tp->fw_needed = NULL;
14000 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14001 tp->fw_needed = FIRMWARE_TG3;
14003 tp->irq_max = 1;
14005 if (tg3_flag(tp, 5750_PLUS)) {
14006 tg3_flag_set(tp, SUPPORT_MSI);
14007 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14008 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14009 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14010 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14011 tp->pdev_peer == tp->pdev))
14012 tg3_flag_clear(tp, SUPPORT_MSI);
14014 if (tg3_flag(tp, 5755_PLUS) ||
14015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14016 tg3_flag_set(tp, 1SHOT_MSI);
14019 if (tg3_flag(tp, 57765_PLUS)) {
14020 tg3_flag_set(tp, SUPPORT_MSIX);
14021 tp->irq_max = TG3_IRQ_MAX_VECS;
14025 if (tg3_flag(tp, 5755_PLUS))
14026 tg3_flag_set(tp, SHORT_DMA_BUG);
14028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14029 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14031 if (tg3_flag(tp, 5717_PLUS))
14032 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14034 if (tg3_flag(tp, 57765_PLUS) &&
14035 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14036 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14038 if (!tg3_flag(tp, 5705_PLUS) ||
14039 tg3_flag(tp, 5780_CLASS) ||
14040 tg3_flag(tp, USE_JUMBO_BDFLAG))
14041 tg3_flag_set(tp, JUMBO_CAPABLE);
14043 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14044 &pci_state_reg);
14046 if (pci_is_pcie(tp->pdev)) {
14047 u16 lnkctl;
14049 tg3_flag_set(tp, PCI_EXPRESS);
14051 tp->pcie_readrq = 4096;
14052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14053 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14054 tp->pcie_readrq = 2048;
14056 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14058 pci_read_config_word(tp->pdev,
14059 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14060 &lnkctl);
14061 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14062 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14063 ASIC_REV_5906) {
14064 tg3_flag_clear(tp, HW_TSO_2);
14065 tg3_flag_clear(tp, TSO_CAPABLE);
14067 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14068 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14069 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14070 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14071 tg3_flag_set(tp, CLKREQ_BUG);
14072 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14073 tg3_flag_set(tp, L1PLLPD_EN);
14075 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14076 /* BCM5785 devices are effectively PCIe devices, and should
14077 * follow PCIe codepaths, but do not have a PCIe capabilities
14078 * section.
14080 tg3_flag_set(tp, PCI_EXPRESS);
14081 } else if (!tg3_flag(tp, 5705_PLUS) ||
14082 tg3_flag(tp, 5780_CLASS)) {
14083 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14084 if (!tp->pcix_cap) {
14085 dev_err(&tp->pdev->dev,
14086 "Cannot find PCI-X capability, aborting\n");
14087 return -EIO;
14090 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14091 tg3_flag_set(tp, PCIX_MODE);
14094 /* If we have an AMD 762 or VIA K8T800 chipset, write
14095 * reordering to the mailbox registers done by the host
14096 * controller can cause major troubles. We read back from
14097 * every mailbox register write to force the writes to be
14098 * posted to the chip in order.
14100 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14101 !tg3_flag(tp, PCI_EXPRESS))
14102 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14104 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14105 &tp->pci_cacheline_sz);
14106 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14107 &tp->pci_lat_timer);
14108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14109 tp->pci_lat_timer < 64) {
14110 tp->pci_lat_timer = 64;
14111 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14112 tp->pci_lat_timer);
14115 /* Important! -- It is critical that the PCI-X hw workaround
14116 * situation is decided before the first MMIO register access.
14118 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14119 /* 5700 BX chips need to have their TX producer index
14120 * mailboxes written twice to workaround a bug.
14122 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14124 /* If we are in PCI-X mode, enable register write workaround.
14126 * The workaround is to use indirect register accesses
14127 * for all chip writes not to mailbox registers.
14129 if (tg3_flag(tp, PCIX_MODE)) {
14130 u32 pm_reg;
14132 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14134 /* The chip can have it's power management PCI config
14135 * space registers clobbered due to this bug.
14136 * So explicitly force the chip into D0 here.
14138 pci_read_config_dword(tp->pdev,
14139 tp->pm_cap + PCI_PM_CTRL,
14140 &pm_reg);
14141 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14142 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14143 pci_write_config_dword(tp->pdev,
14144 tp->pm_cap + PCI_PM_CTRL,
14145 pm_reg);
14147 /* Also, force SERR#/PERR# in PCI command. */
14148 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14149 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14150 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14154 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14155 tg3_flag_set(tp, PCI_HIGH_SPEED);
14156 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14157 tg3_flag_set(tp, PCI_32BIT);
14159 /* Chip-specific fixup from Broadcom driver */
14160 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14161 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14162 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14163 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14166 /* Default fast path register access methods */
14167 tp->read32 = tg3_read32;
14168 tp->write32 = tg3_write32;
14169 tp->read32_mbox = tg3_read32;
14170 tp->write32_mbox = tg3_write32;
14171 tp->write32_tx_mbox = tg3_write32;
14172 tp->write32_rx_mbox = tg3_write32;
14174 /* Various workaround register access methods */
14175 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14176 tp->write32 = tg3_write_indirect_reg32;
14177 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14178 (tg3_flag(tp, PCI_EXPRESS) &&
14179 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14181 * Back to back register writes can cause problems on these
14182 * chips, the workaround is to read back all reg writes
14183 * except those to mailbox regs.
14185 * See tg3_write_indirect_reg32().
14187 tp->write32 = tg3_write_flush_reg32;
14190 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14191 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14192 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14193 tp->write32_rx_mbox = tg3_write_flush_reg32;
14196 if (tg3_flag(tp, ICH_WORKAROUND)) {
14197 tp->read32 = tg3_read_indirect_reg32;
14198 tp->write32 = tg3_write_indirect_reg32;
14199 tp->read32_mbox = tg3_read_indirect_mbox;
14200 tp->write32_mbox = tg3_write_indirect_mbox;
14201 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14202 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14204 iounmap(tp->regs);
14205 tp->regs = NULL;
14207 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14208 pci_cmd &= ~PCI_COMMAND_MEMORY;
14209 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14212 tp->read32_mbox = tg3_read32_mbox_5906;
14213 tp->write32_mbox = tg3_write32_mbox_5906;
14214 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14215 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14218 if (tp->write32 == tg3_write_indirect_reg32 ||
14219 (tg3_flag(tp, PCIX_MODE) &&
14220 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14222 tg3_flag_set(tp, SRAM_USE_CONFIG);
14224 /* The memory arbiter has to be enabled in order for SRAM accesses
14225 * to succeed. Normally on powerup the tg3 chip firmware will make
14226 * sure it is enabled, but other entities such as system netboot
14227 * code might disable it.
14229 val = tr32(MEMARB_MODE);
14230 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14232 if (tg3_flag(tp, PCIX_MODE)) {
14233 pci_read_config_dword(tp->pdev,
14234 tp->pcix_cap + PCI_X_STATUS, &val);
14235 tp->pci_fn = val & 0x7;
14236 } else {
14237 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14240 /* Get eeprom hw config before calling tg3_set_power_state().
14241 * In particular, the TG3_FLAG_IS_NIC flag must be
14242 * determined before calling tg3_set_power_state() so that
14243 * we know whether or not to switch out of Vaux power.
14244 * When the flag is set, it means that GPIO1 is used for eeprom
14245 * write protect and also implies that it is a LOM where GPIOs
14246 * are not used to switch power.
14248 tg3_get_eeprom_hw_cfg(tp);
14250 if (tg3_flag(tp, ENABLE_APE)) {
14251 /* Allow reads and writes to the
14252 * APE register and memory space.
14254 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14255 PCISTATE_ALLOW_APE_SHMEM_WR |
14256 PCISTATE_ALLOW_APE_PSPACE_WR;
14257 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14258 pci_state_reg);
14260 tg3_ape_lock_init(tp);
14263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14267 tg3_flag(tp, 57765_PLUS))
14268 tg3_flag_set(tp, CPMU_PRESENT);
14270 /* Set up tp->grc_local_ctrl before calling
14271 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14272 * will bring 5700's external PHY out of reset.
14273 * It is also used as eeprom write protect on LOMs.
14275 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14277 tg3_flag(tp, EEPROM_WRITE_PROT))
14278 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14279 GRC_LCLCTRL_GPIO_OUTPUT1);
14280 /* Unused GPIO3 must be driven as output on 5752 because there
14281 * are no pull-up resistors on unused GPIO pins.
14283 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14284 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14289 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14291 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14292 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14293 /* Turn off the debug UART. */
14294 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14295 if (tg3_flag(tp, IS_NIC))
14296 /* Keep VMain power. */
14297 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14298 GRC_LCLCTRL_GPIO_OUTPUT0;
14301 /* Switch out of Vaux if it is a NIC */
14302 tg3_pwrsrc_switch_to_vmain(tp);
14304 /* Derive initial jumbo mode from MTU assigned in
14305 * ether_setup() via the alloc_etherdev() call
14307 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14308 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14310 /* Determine WakeOnLan speed to use. */
14311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14312 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14313 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14314 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14315 tg3_flag_clear(tp, WOL_SPEED_100MB);
14316 } else {
14317 tg3_flag_set(tp, WOL_SPEED_100MB);
14320 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14321 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14323 /* A few boards don't want Ethernet@WireSpeed phy feature */
14324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14325 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14326 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14327 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14328 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14329 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14330 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14332 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14333 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14334 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14335 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14336 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14338 if (tg3_flag(tp, 5705_PLUS) &&
14339 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14340 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14341 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14342 !tg3_flag(tp, 57765_PLUS)) {
14343 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14344 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14347 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14348 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14349 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14350 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14351 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14352 } else
14353 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14357 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14358 tp->phy_otp = tg3_read_otp_phycfg(tp);
14359 if (tp->phy_otp == 0)
14360 tp->phy_otp = TG3_OTP_DEFAULT;
14363 if (tg3_flag(tp, CPMU_PRESENT))
14364 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14365 else
14366 tp->mi_mode = MAC_MI_MODE_BASE;
14368 tp->coalesce_mode = 0;
14369 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14370 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14371 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14373 /* Set these bits to enable statistics workaround. */
14374 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14375 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14376 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14377 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14378 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14382 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14383 tg3_flag_set(tp, USE_PHYLIB);
14385 err = tg3_mdio_init(tp);
14386 if (err)
14387 return err;
14389 /* Initialize data/descriptor byte/word swapping. */
14390 val = tr32(GRC_MODE);
14391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14392 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14393 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14394 GRC_MODE_B2HRX_ENABLE |
14395 GRC_MODE_HTX2B_ENABLE |
14396 GRC_MODE_HOST_STACKUP);
14397 else
14398 val &= GRC_MODE_HOST_STACKUP;
14400 tw32(GRC_MODE, val | tp->grc_mode);
14402 tg3_switch_clocks(tp);
14404 /* Clear this out for sanity. */
14405 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14407 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14408 &pci_state_reg);
14409 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14410 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14411 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14413 if (chiprevid == CHIPREV_ID_5701_A0 ||
14414 chiprevid == CHIPREV_ID_5701_B0 ||
14415 chiprevid == CHIPREV_ID_5701_B2 ||
14416 chiprevid == CHIPREV_ID_5701_B5) {
14417 void __iomem *sram_base;
14419 /* Write some dummy words into the SRAM status block
14420 * area, see if it reads back correctly. If the return
14421 * value is bad, force enable the PCIX workaround.
14423 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14425 writel(0x00000000, sram_base);
14426 writel(0x00000000, sram_base + 4);
14427 writel(0xffffffff, sram_base + 4);
14428 if (readl(sram_base) != 0x00000000)
14429 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14433 udelay(50);
14434 tg3_nvram_init(tp);
14436 grc_misc_cfg = tr32(GRC_MISC_CFG);
14437 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14440 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14441 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14442 tg3_flag_set(tp, IS_5788);
14444 if (!tg3_flag(tp, IS_5788) &&
14445 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14446 tg3_flag_set(tp, TAGGED_STATUS);
14447 if (tg3_flag(tp, TAGGED_STATUS)) {
14448 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14449 HOSTCC_MODE_CLRTICK_TXBD);
14451 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14452 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14453 tp->misc_host_ctrl);
14456 /* Preserve the APE MAC_MODE bits */
14457 if (tg3_flag(tp, ENABLE_APE))
14458 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14459 else
14460 tp->mac_mode = 0;
14462 /* these are limited to 10/100 only */
14463 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14464 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14465 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14466 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14467 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14468 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14469 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14470 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14471 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14472 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14473 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14474 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14475 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14476 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14477 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14478 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14480 err = tg3_phy_probe(tp);
14481 if (err) {
14482 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14483 /* ... but do not return immediately ... */
14484 tg3_mdio_fini(tp);
14487 tg3_read_vpd(tp);
14488 tg3_read_fw_ver(tp);
14490 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14491 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14492 } else {
14493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14494 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14495 else
14496 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14499 /* 5700 {AX,BX} chips have a broken status block link
14500 * change bit implementation, so we must use the
14501 * status register in those cases.
14503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14504 tg3_flag_set(tp, USE_LINKCHG_REG);
14505 else
14506 tg3_flag_clear(tp, USE_LINKCHG_REG);
14508 /* The led_ctrl is set during tg3_phy_probe, here we might
14509 * have to force the link status polling mechanism based
14510 * upon subsystem IDs.
14512 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14513 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14514 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14515 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14516 tg3_flag_set(tp, USE_LINKCHG_REG);
14519 /* For all SERDES we poll the MAC status register. */
14520 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14521 tg3_flag_set(tp, POLL_SERDES);
14522 else
14523 tg3_flag_clear(tp, POLL_SERDES);
14525 tp->rx_offset = NET_IP_ALIGN;
14526 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14528 tg3_flag(tp, PCIX_MODE)) {
14529 tp->rx_offset = 0;
14530 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14531 tp->rx_copy_thresh = ~(u16)0;
14532 #endif
14535 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14536 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14537 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14539 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14541 /* Increment the rx prod index on the rx std ring by at most
14542 * 8 for these chips to workaround hw errata.
14544 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14546 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14547 tp->rx_std_max_post = 8;
14549 if (tg3_flag(tp, ASPM_WORKAROUND))
14550 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14551 PCIE_PWR_MGMT_L1_THRESH_MSK;
14553 return err;
14556 #ifdef CONFIG_SPARC
14557 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14559 struct net_device *dev = tp->dev;
14560 struct pci_dev *pdev = tp->pdev;
14561 struct device_node *dp = pci_device_to_OF_node(pdev);
14562 const unsigned char *addr;
14563 int len;
14565 addr = of_get_property(dp, "local-mac-address", &len);
14566 if (addr && len == 6) {
14567 memcpy(dev->dev_addr, addr, 6);
14568 memcpy(dev->perm_addr, dev->dev_addr, 6);
14569 return 0;
14571 return -ENODEV;
14574 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14576 struct net_device *dev = tp->dev;
14578 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14579 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14580 return 0;
14582 #endif
14584 static int __devinit tg3_get_device_address(struct tg3 *tp)
14586 struct net_device *dev = tp->dev;
14587 u32 hi, lo, mac_offset;
14588 int addr_ok = 0;
14590 #ifdef CONFIG_SPARC
14591 if (!tg3_get_macaddr_sparc(tp))
14592 return 0;
14593 #endif
14595 mac_offset = 0x7c;
14596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14597 tg3_flag(tp, 5780_CLASS)) {
14598 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14599 mac_offset = 0xcc;
14600 if (tg3_nvram_lock(tp))
14601 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14602 else
14603 tg3_nvram_unlock(tp);
14604 } else if (tg3_flag(tp, 5717_PLUS)) {
14605 if (tp->pci_fn & 1)
14606 mac_offset = 0xcc;
14607 if (tp->pci_fn > 1)
14608 mac_offset += 0x18c;
14609 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14610 mac_offset = 0x10;
14612 /* First try to get it from MAC address mailbox. */
14613 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14614 if ((hi >> 16) == 0x484b) {
14615 dev->dev_addr[0] = (hi >> 8) & 0xff;
14616 dev->dev_addr[1] = (hi >> 0) & 0xff;
14618 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14619 dev->dev_addr[2] = (lo >> 24) & 0xff;
14620 dev->dev_addr[3] = (lo >> 16) & 0xff;
14621 dev->dev_addr[4] = (lo >> 8) & 0xff;
14622 dev->dev_addr[5] = (lo >> 0) & 0xff;
14624 /* Some old bootcode may report a 0 MAC address in SRAM */
14625 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14627 if (!addr_ok) {
14628 /* Next, try NVRAM. */
14629 if (!tg3_flag(tp, NO_NVRAM) &&
14630 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14631 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14632 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14633 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14635 /* Finally just fetch it out of the MAC control regs. */
14636 else {
14637 hi = tr32(MAC_ADDR_0_HIGH);
14638 lo = tr32(MAC_ADDR_0_LOW);
14640 dev->dev_addr[5] = lo & 0xff;
14641 dev->dev_addr[4] = (lo >> 8) & 0xff;
14642 dev->dev_addr[3] = (lo >> 16) & 0xff;
14643 dev->dev_addr[2] = (lo >> 24) & 0xff;
14644 dev->dev_addr[1] = hi & 0xff;
14645 dev->dev_addr[0] = (hi >> 8) & 0xff;
14649 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14650 #ifdef CONFIG_SPARC
14651 if (!tg3_get_default_macaddr_sparc(tp))
14652 return 0;
14653 #endif
14654 return -EINVAL;
14656 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14657 return 0;
14660 #define BOUNDARY_SINGLE_CACHELINE 1
14661 #define BOUNDARY_MULTI_CACHELINE 2
14663 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14665 int cacheline_size;
14666 u8 byte;
14667 int goal;
14669 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14670 if (byte == 0)
14671 cacheline_size = 1024;
14672 else
14673 cacheline_size = (int) byte * 4;
14675 /* On 5703 and later chips, the boundary bits have no
14676 * effect.
14678 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14679 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14680 !tg3_flag(tp, PCI_EXPRESS))
14681 goto out;
14683 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14684 goal = BOUNDARY_MULTI_CACHELINE;
14685 #else
14686 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14687 goal = BOUNDARY_SINGLE_CACHELINE;
14688 #else
14689 goal = 0;
14690 #endif
14691 #endif
14693 if (tg3_flag(tp, 57765_PLUS)) {
14694 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14695 goto out;
14698 if (!goal)
14699 goto out;
14701 /* PCI controllers on most RISC systems tend to disconnect
14702 * when a device tries to burst across a cache-line boundary.
14703 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14705 * Unfortunately, for PCI-E there are only limited
14706 * write-side controls for this, and thus for reads
14707 * we will still get the disconnects. We'll also waste
14708 * these PCI cycles for both read and write for chips
14709 * other than 5700 and 5701 which do not implement the
14710 * boundary bits.
14712 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14713 switch (cacheline_size) {
14714 case 16:
14715 case 32:
14716 case 64:
14717 case 128:
14718 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14719 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14720 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14721 } else {
14722 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14723 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14725 break;
14727 case 256:
14728 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14729 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14730 break;
14732 default:
14733 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14734 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14735 break;
14737 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14738 switch (cacheline_size) {
14739 case 16:
14740 case 32:
14741 case 64:
14742 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14743 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14744 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14745 break;
14747 /* fallthrough */
14748 case 128:
14749 default:
14750 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14751 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14752 break;
14754 } else {
14755 switch (cacheline_size) {
14756 case 16:
14757 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14758 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14759 DMA_RWCTRL_WRITE_BNDRY_16);
14760 break;
14762 /* fallthrough */
14763 case 32:
14764 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14765 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14766 DMA_RWCTRL_WRITE_BNDRY_32);
14767 break;
14769 /* fallthrough */
14770 case 64:
14771 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14772 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14773 DMA_RWCTRL_WRITE_BNDRY_64);
14774 break;
14776 /* fallthrough */
14777 case 128:
14778 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14779 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14780 DMA_RWCTRL_WRITE_BNDRY_128);
14781 break;
14783 /* fallthrough */
14784 case 256:
14785 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14786 DMA_RWCTRL_WRITE_BNDRY_256);
14787 break;
14788 case 512:
14789 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14790 DMA_RWCTRL_WRITE_BNDRY_512);
14791 break;
14792 case 1024:
14793 default:
14794 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14795 DMA_RWCTRL_WRITE_BNDRY_1024);
14796 break;
14800 out:
14801 return val;
14804 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14806 struct tg3_internal_buffer_desc test_desc;
14807 u32 sram_dma_descs;
14808 int i, ret;
14810 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14812 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14813 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14814 tw32(RDMAC_STATUS, 0);
14815 tw32(WDMAC_STATUS, 0);
14817 tw32(BUFMGR_MODE, 0);
14818 tw32(FTQ_RESET, 0);
14820 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14821 test_desc.addr_lo = buf_dma & 0xffffffff;
14822 test_desc.nic_mbuf = 0x00002100;
14823 test_desc.len = size;
14826 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14827 * the *second* time the tg3 driver was getting loaded after an
14828 * initial scan.
14830 * Broadcom tells me:
14831 * ...the DMA engine is connected to the GRC block and a DMA
14832 * reset may affect the GRC block in some unpredictable way...
14833 * The behavior of resets to individual blocks has not been tested.
14835 * Broadcom noted the GRC reset will also reset all sub-components.
14837 if (to_device) {
14838 test_desc.cqid_sqid = (13 << 8) | 2;
14840 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14841 udelay(40);
14842 } else {
14843 test_desc.cqid_sqid = (16 << 8) | 7;
14845 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14846 udelay(40);
14848 test_desc.flags = 0x00000005;
14850 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14851 u32 val;
14853 val = *(((u32 *)&test_desc) + i);
14854 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14855 sram_dma_descs + (i * sizeof(u32)));
14856 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14858 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14860 if (to_device)
14861 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14862 else
14863 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14865 ret = -ENODEV;
14866 for (i = 0; i < 40; i++) {
14867 u32 val;
14869 if (to_device)
14870 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14871 else
14872 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14873 if ((val & 0xffff) == sram_dma_descs) {
14874 ret = 0;
14875 break;
14878 udelay(100);
14881 return ret;
14884 #define TEST_BUFFER_SIZE 0x2000
14886 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14887 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14888 { },
14891 static int __devinit tg3_test_dma(struct tg3 *tp)
14893 dma_addr_t buf_dma;
14894 u32 *buf, saved_dma_rwctrl;
14895 int ret = 0;
14897 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14898 &buf_dma, GFP_KERNEL);
14899 if (!buf) {
14900 ret = -ENOMEM;
14901 goto out_nofree;
14904 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14905 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14907 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14909 if (tg3_flag(tp, 57765_PLUS))
14910 goto out;
14912 if (tg3_flag(tp, PCI_EXPRESS)) {
14913 /* DMA read watermark not used on PCIE */
14914 tp->dma_rwctrl |= 0x00180000;
14915 } else if (!tg3_flag(tp, PCIX_MODE)) {
14916 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14917 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14918 tp->dma_rwctrl |= 0x003f0000;
14919 else
14920 tp->dma_rwctrl |= 0x003f000f;
14921 } else {
14922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14923 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14924 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14925 u32 read_water = 0x7;
14927 /* If the 5704 is behind the EPB bridge, we can
14928 * do the less restrictive ONE_DMA workaround for
14929 * better performance.
14931 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14933 tp->dma_rwctrl |= 0x8000;
14934 else if (ccval == 0x6 || ccval == 0x7)
14935 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14938 read_water = 4;
14939 /* Set bit 23 to enable PCIX hw bug fix */
14940 tp->dma_rwctrl |=
14941 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14942 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14943 (1 << 23);
14944 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14945 /* 5780 always in PCIX mode */
14946 tp->dma_rwctrl |= 0x00144000;
14947 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14948 /* 5714 always in PCIX mode */
14949 tp->dma_rwctrl |= 0x00148000;
14950 } else {
14951 tp->dma_rwctrl |= 0x001b000f;
14955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14957 tp->dma_rwctrl &= 0xfffffff0;
14959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14961 /* Remove this if it causes problems for some boards. */
14962 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14964 /* On 5700/5701 chips, we need to set this bit.
14965 * Otherwise the chip will issue cacheline transactions
14966 * to streamable DMA memory with not all the byte
14967 * enables turned on. This is an error on several
14968 * RISC PCI controllers, in particular sparc64.
14970 * On 5703/5704 chips, this bit has been reassigned
14971 * a different meaning. In particular, it is used
14972 * on those chips to enable a PCI-X workaround.
14974 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14977 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14979 #if 0
14980 /* Unneeded, already done by tg3_get_invariants. */
14981 tg3_switch_clocks(tp);
14982 #endif
14984 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14985 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14986 goto out;
14988 /* It is best to perform DMA test with maximum write burst size
14989 * to expose the 5700/5701 write DMA bug.
14991 saved_dma_rwctrl = tp->dma_rwctrl;
14992 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14993 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14995 while (1) {
14996 u32 *p = buf, i;
14998 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14999 p[i] = i;
15001 /* Send the buffer to the chip. */
15002 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15003 if (ret) {
15004 dev_err(&tp->pdev->dev,
15005 "%s: Buffer write failed. err = %d\n",
15006 __func__, ret);
15007 break;
15010 #if 0
15011 /* validate data reached card RAM correctly. */
15012 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15013 u32 val;
15014 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15015 if (le32_to_cpu(val) != p[i]) {
15016 dev_err(&tp->pdev->dev,
15017 "%s: Buffer corrupted on device! "
15018 "(%d != %d)\n", __func__, val, i);
15019 /* ret = -ENODEV here? */
15021 p[i] = 0;
15023 #endif
15024 /* Now read it back. */
15025 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15026 if (ret) {
15027 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15028 "err = %d\n", __func__, ret);
15029 break;
15032 /* Verify it. */
15033 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15034 if (p[i] == i)
15035 continue;
15037 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15038 DMA_RWCTRL_WRITE_BNDRY_16) {
15039 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15040 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15041 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15042 break;
15043 } else {
15044 dev_err(&tp->pdev->dev,
15045 "%s: Buffer corrupted on read back! "
15046 "(%d != %d)\n", __func__, p[i], i);
15047 ret = -ENODEV;
15048 goto out;
15052 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15053 /* Success. */
15054 ret = 0;
15055 break;
15058 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15059 DMA_RWCTRL_WRITE_BNDRY_16) {
15060 /* DMA test passed without adjusting DMA boundary,
15061 * now look for chipsets that are known to expose the
15062 * DMA bug without failing the test.
15064 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15065 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15066 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15067 } else {
15068 /* Safe to use the calculated DMA boundary. */
15069 tp->dma_rwctrl = saved_dma_rwctrl;
15072 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15075 out:
15076 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15077 out_nofree:
15078 return ret;
15081 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15083 if (tg3_flag(tp, 57765_PLUS)) {
15084 tp->bufmgr_config.mbuf_read_dma_low_water =
15085 DEFAULT_MB_RDMA_LOW_WATER_5705;
15086 tp->bufmgr_config.mbuf_mac_rx_low_water =
15087 DEFAULT_MB_MACRX_LOW_WATER_57765;
15088 tp->bufmgr_config.mbuf_high_water =
15089 DEFAULT_MB_HIGH_WATER_57765;
15091 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15092 DEFAULT_MB_RDMA_LOW_WATER_5705;
15093 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15094 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15095 tp->bufmgr_config.mbuf_high_water_jumbo =
15096 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15097 } else if (tg3_flag(tp, 5705_PLUS)) {
15098 tp->bufmgr_config.mbuf_read_dma_low_water =
15099 DEFAULT_MB_RDMA_LOW_WATER_5705;
15100 tp->bufmgr_config.mbuf_mac_rx_low_water =
15101 DEFAULT_MB_MACRX_LOW_WATER_5705;
15102 tp->bufmgr_config.mbuf_high_water =
15103 DEFAULT_MB_HIGH_WATER_5705;
15104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15105 tp->bufmgr_config.mbuf_mac_rx_low_water =
15106 DEFAULT_MB_MACRX_LOW_WATER_5906;
15107 tp->bufmgr_config.mbuf_high_water =
15108 DEFAULT_MB_HIGH_WATER_5906;
15111 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15112 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15113 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15114 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15115 tp->bufmgr_config.mbuf_high_water_jumbo =
15116 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15117 } else {
15118 tp->bufmgr_config.mbuf_read_dma_low_water =
15119 DEFAULT_MB_RDMA_LOW_WATER;
15120 tp->bufmgr_config.mbuf_mac_rx_low_water =
15121 DEFAULT_MB_MACRX_LOW_WATER;
15122 tp->bufmgr_config.mbuf_high_water =
15123 DEFAULT_MB_HIGH_WATER;
15125 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15126 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15127 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15128 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15129 tp->bufmgr_config.mbuf_high_water_jumbo =
15130 DEFAULT_MB_HIGH_WATER_JUMBO;
15133 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15134 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15137 static char * __devinit tg3_phy_string(struct tg3 *tp)
15139 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15140 case TG3_PHY_ID_BCM5400: return "5400";
15141 case TG3_PHY_ID_BCM5401: return "5401";
15142 case TG3_PHY_ID_BCM5411: return "5411";
15143 case TG3_PHY_ID_BCM5701: return "5701";
15144 case TG3_PHY_ID_BCM5703: return "5703";
15145 case TG3_PHY_ID_BCM5704: return "5704";
15146 case TG3_PHY_ID_BCM5705: return "5705";
15147 case TG3_PHY_ID_BCM5750: return "5750";
15148 case TG3_PHY_ID_BCM5752: return "5752";
15149 case TG3_PHY_ID_BCM5714: return "5714";
15150 case TG3_PHY_ID_BCM5780: return "5780";
15151 case TG3_PHY_ID_BCM5755: return "5755";
15152 case TG3_PHY_ID_BCM5787: return "5787";
15153 case TG3_PHY_ID_BCM5784: return "5784";
15154 case TG3_PHY_ID_BCM5756: return "5722/5756";
15155 case TG3_PHY_ID_BCM5906: return "5906";
15156 case TG3_PHY_ID_BCM5761: return "5761";
15157 case TG3_PHY_ID_BCM5718C: return "5718C";
15158 case TG3_PHY_ID_BCM5718S: return "5718S";
15159 case TG3_PHY_ID_BCM57765: return "57765";
15160 case TG3_PHY_ID_BCM5719C: return "5719C";
15161 case TG3_PHY_ID_BCM5720C: return "5720C";
15162 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15163 case 0: return "serdes";
15164 default: return "unknown";
15168 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15170 if (tg3_flag(tp, PCI_EXPRESS)) {
15171 strcpy(str, "PCI Express");
15172 return str;
15173 } else if (tg3_flag(tp, PCIX_MODE)) {
15174 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15176 strcpy(str, "PCIX:");
15178 if ((clock_ctrl == 7) ||
15179 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15180 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15181 strcat(str, "133MHz");
15182 else if (clock_ctrl == 0)
15183 strcat(str, "33MHz");
15184 else if (clock_ctrl == 2)
15185 strcat(str, "50MHz");
15186 else if (clock_ctrl == 4)
15187 strcat(str, "66MHz");
15188 else if (clock_ctrl == 6)
15189 strcat(str, "100MHz");
15190 } else {
15191 strcpy(str, "PCI:");
15192 if (tg3_flag(tp, PCI_HIGH_SPEED))
15193 strcat(str, "66MHz");
15194 else
15195 strcat(str, "33MHz");
15197 if (tg3_flag(tp, PCI_32BIT))
15198 strcat(str, ":32-bit");
15199 else
15200 strcat(str, ":64-bit");
15201 return str;
15204 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15206 struct pci_dev *peer;
15207 unsigned int func, devnr = tp->pdev->devfn & ~7;
15209 for (func = 0; func < 8; func++) {
15210 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15211 if (peer && peer != tp->pdev)
15212 break;
15213 pci_dev_put(peer);
15215 /* 5704 can be configured in single-port mode, set peer to
15216 * tp->pdev in that case.
15218 if (!peer) {
15219 peer = tp->pdev;
15220 return peer;
15224 * We don't need to keep the refcount elevated; there's no way
15225 * to remove one half of this device without removing the other
15227 pci_dev_put(peer);
15229 return peer;
15232 static void __devinit tg3_init_coal(struct tg3 *tp)
15234 struct ethtool_coalesce *ec = &tp->coal;
15236 memset(ec, 0, sizeof(*ec));
15237 ec->cmd = ETHTOOL_GCOALESCE;
15238 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15239 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15240 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15241 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15242 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15243 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15244 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15245 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15246 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15248 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15249 HOSTCC_MODE_CLRTICK_TXBD)) {
15250 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15251 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15252 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15253 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15256 if (tg3_flag(tp, 5705_PLUS)) {
15257 ec->rx_coalesce_usecs_irq = 0;
15258 ec->tx_coalesce_usecs_irq = 0;
15259 ec->stats_block_coalesce_usecs = 0;
15263 static const struct net_device_ops tg3_netdev_ops = {
15264 .ndo_open = tg3_open,
15265 .ndo_stop = tg3_close,
15266 .ndo_start_xmit = tg3_start_xmit,
15267 .ndo_get_stats64 = tg3_get_stats64,
15268 .ndo_validate_addr = eth_validate_addr,
15269 .ndo_set_rx_mode = tg3_set_rx_mode,
15270 .ndo_set_mac_address = tg3_set_mac_addr,
15271 .ndo_do_ioctl = tg3_ioctl,
15272 .ndo_tx_timeout = tg3_tx_timeout,
15273 .ndo_change_mtu = tg3_change_mtu,
15274 .ndo_fix_features = tg3_fix_features,
15275 .ndo_set_features = tg3_set_features,
15276 #ifdef CONFIG_NET_POLL_CONTROLLER
15277 .ndo_poll_controller = tg3_poll_controller,
15278 #endif
15281 static int __devinit tg3_init_one(struct pci_dev *pdev,
15282 const struct pci_device_id *ent)
15284 struct net_device *dev;
15285 struct tg3 *tp;
15286 int i, err, pm_cap;
15287 u32 sndmbx, rcvmbx, intmbx;
15288 char str[40];
15289 u64 dma_mask, persist_dma_mask;
15290 u32 features = 0;
15292 printk_once(KERN_INFO "%s\n", version);
15294 err = pci_enable_device(pdev);
15295 if (err) {
15296 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15297 return err;
15300 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15301 if (err) {
15302 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15303 goto err_out_disable_pdev;
15306 pci_set_master(pdev);
15308 /* Find power-management capability. */
15309 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15310 if (pm_cap == 0) {
15311 dev_err(&pdev->dev,
15312 "Cannot find Power Management capability, aborting\n");
15313 err = -EIO;
15314 goto err_out_free_res;
15317 err = pci_set_power_state(pdev, PCI_D0);
15318 if (err) {
15319 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15320 goto err_out_free_res;
15323 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15324 if (!dev) {
15325 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15326 err = -ENOMEM;
15327 goto err_out_power_down;
15330 SET_NETDEV_DEV(dev, &pdev->dev);
15332 tp = netdev_priv(dev);
15333 tp->pdev = pdev;
15334 tp->dev = dev;
15335 tp->pm_cap = pm_cap;
15336 tp->rx_mode = TG3_DEF_RX_MODE;
15337 tp->tx_mode = TG3_DEF_TX_MODE;
15339 if (tg3_debug > 0)
15340 tp->msg_enable = tg3_debug;
15341 else
15342 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15344 /* The word/byte swap controls here control register access byte
15345 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15346 * setting below.
15348 tp->misc_host_ctrl =
15349 MISC_HOST_CTRL_MASK_PCI_INT |
15350 MISC_HOST_CTRL_WORD_SWAP |
15351 MISC_HOST_CTRL_INDIR_ACCESS |
15352 MISC_HOST_CTRL_PCISTATE_RW;
15354 /* The NONFRM (non-frame) byte/word swap controls take effect
15355 * on descriptor entries, anything which isn't packet data.
15357 * The StrongARM chips on the board (one for tx, one for rx)
15358 * are running in big-endian mode.
15360 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15361 GRC_MODE_WSWAP_NONFRM_DATA);
15362 #ifdef __BIG_ENDIAN
15363 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15364 #endif
15365 spin_lock_init(&tp->lock);
15366 spin_lock_init(&tp->indirect_lock);
15367 INIT_WORK(&tp->reset_task, tg3_reset_task);
15369 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15370 if (!tp->regs) {
15371 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15372 err = -ENOMEM;
15373 goto err_out_free_dev;
15376 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15377 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15378 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15379 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15380 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15381 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15382 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15383 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15384 tg3_flag_set(tp, ENABLE_APE);
15385 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15386 if (!tp->aperegs) {
15387 dev_err(&pdev->dev,
15388 "Cannot map APE registers, aborting\n");
15389 err = -ENOMEM;
15390 goto err_out_iounmap;
15394 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15395 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15397 dev->ethtool_ops = &tg3_ethtool_ops;
15398 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15399 dev->netdev_ops = &tg3_netdev_ops;
15400 dev->irq = pdev->irq;
15402 err = tg3_get_invariants(tp);
15403 if (err) {
15404 dev_err(&pdev->dev,
15405 "Problem fetching invariants of chip, aborting\n");
15406 goto err_out_apeunmap;
15409 /* The EPB bridge inside 5714, 5715, and 5780 and any
15410 * device behind the EPB cannot support DMA addresses > 40-bit.
15411 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15412 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15413 * do DMA address check in tg3_start_xmit().
15415 if (tg3_flag(tp, IS_5788))
15416 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15417 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15418 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15419 #ifdef CONFIG_HIGHMEM
15420 dma_mask = DMA_BIT_MASK(64);
15421 #endif
15422 } else
15423 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15425 /* Configure DMA attributes. */
15426 if (dma_mask > DMA_BIT_MASK(32)) {
15427 err = pci_set_dma_mask(pdev, dma_mask);
15428 if (!err) {
15429 features |= NETIF_F_HIGHDMA;
15430 err = pci_set_consistent_dma_mask(pdev,
15431 persist_dma_mask);
15432 if (err < 0) {
15433 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15434 "DMA for consistent allocations\n");
15435 goto err_out_apeunmap;
15439 if (err || dma_mask == DMA_BIT_MASK(32)) {
15440 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15441 if (err) {
15442 dev_err(&pdev->dev,
15443 "No usable DMA configuration, aborting\n");
15444 goto err_out_apeunmap;
15448 tg3_init_bufmgr_config(tp);
15450 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15452 /* 5700 B0 chips do not support checksumming correctly due
15453 * to hardware bugs.
15455 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15456 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15458 if (tg3_flag(tp, 5755_PLUS))
15459 features |= NETIF_F_IPV6_CSUM;
15462 /* TSO is on by default on chips that support hardware TSO.
15463 * Firmware TSO on older chips gives lower performance, so it
15464 * is off by default, but can be enabled using ethtool.
15466 if ((tg3_flag(tp, HW_TSO_1) ||
15467 tg3_flag(tp, HW_TSO_2) ||
15468 tg3_flag(tp, HW_TSO_3)) &&
15469 (features & NETIF_F_IP_CSUM))
15470 features |= NETIF_F_TSO;
15471 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15472 if (features & NETIF_F_IPV6_CSUM)
15473 features |= NETIF_F_TSO6;
15474 if (tg3_flag(tp, HW_TSO_3) ||
15475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15476 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15477 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15480 features |= NETIF_F_TSO_ECN;
15483 dev->features |= features;
15484 dev->vlan_features |= features;
15487 * Add loopback capability only for a subset of devices that support
15488 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15489 * loopback for the remaining devices.
15491 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15492 !tg3_flag(tp, CPMU_PRESENT))
15493 /* Add the loopback capability */
15494 features |= NETIF_F_LOOPBACK;
15496 dev->hw_features |= features;
15498 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15499 !tg3_flag(tp, TSO_CAPABLE) &&
15500 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15501 tg3_flag_set(tp, MAX_RXPEND_64);
15502 tp->rx_pending = 63;
15505 err = tg3_get_device_address(tp);
15506 if (err) {
15507 dev_err(&pdev->dev,
15508 "Could not obtain valid ethernet address, aborting\n");
15509 goto err_out_apeunmap;
15513 * Reset chip in case UNDI or EFI driver did not shutdown
15514 * DMA self test will enable WDMAC and we'll see (spurious)
15515 * pending DMA on the PCI bus at that point.
15517 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15518 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15519 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15520 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15523 err = tg3_test_dma(tp);
15524 if (err) {
15525 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15526 goto err_out_apeunmap;
15529 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15530 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15531 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15532 for (i = 0; i < tp->irq_max; i++) {
15533 struct tg3_napi *tnapi = &tp->napi[i];
15535 tnapi->tp = tp;
15536 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15538 tnapi->int_mbox = intmbx;
15539 if (i <= 4)
15540 intmbx += 0x8;
15541 else
15542 intmbx += 0x4;
15544 tnapi->consmbox = rcvmbx;
15545 tnapi->prodmbox = sndmbx;
15547 if (i)
15548 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15549 else
15550 tnapi->coal_now = HOSTCC_MODE_NOW;
15552 if (!tg3_flag(tp, SUPPORT_MSIX))
15553 break;
15556 * If we support MSIX, we'll be using RSS. If we're using
15557 * RSS, the first vector only handles link interrupts and the
15558 * remaining vectors handle rx and tx interrupts. Reuse the
15559 * mailbox values for the next iteration. The values we setup
15560 * above are still useful for the single vectored mode.
15562 if (!i)
15563 continue;
15565 rcvmbx += 0x8;
15567 if (sndmbx & 0x4)
15568 sndmbx -= 0x4;
15569 else
15570 sndmbx += 0xc;
15573 tg3_init_coal(tp);
15575 pci_set_drvdata(pdev, dev);
15577 if (tg3_flag(tp, 5717_PLUS)) {
15578 /* Resume a low-power mode */
15579 tg3_frob_aux_power(tp, false);
15582 err = register_netdev(dev);
15583 if (err) {
15584 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15585 goto err_out_apeunmap;
15588 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15589 tp->board_part_number,
15590 tp->pci_chip_rev_id,
15591 tg3_bus_string(tp, str),
15592 dev->dev_addr);
15594 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15595 struct phy_device *phydev;
15596 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15597 netdev_info(dev,
15598 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15599 phydev->drv->name, dev_name(&phydev->dev));
15600 } else {
15601 char *ethtype;
15603 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15604 ethtype = "10/100Base-TX";
15605 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15606 ethtype = "1000Base-SX";
15607 else
15608 ethtype = "10/100/1000Base-T";
15610 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15611 "(WireSpeed[%d], EEE[%d])\n",
15612 tg3_phy_string(tp), ethtype,
15613 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15614 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15617 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15618 (dev->features & NETIF_F_RXCSUM) != 0,
15619 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15620 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15621 tg3_flag(tp, ENABLE_ASF) != 0,
15622 tg3_flag(tp, TSO_CAPABLE) != 0);
15623 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15624 tp->dma_rwctrl,
15625 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15626 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15628 pci_save_state(pdev);
15630 return 0;
15632 err_out_apeunmap:
15633 if (tp->aperegs) {
15634 iounmap(tp->aperegs);
15635 tp->aperegs = NULL;
15638 err_out_iounmap:
15639 if (tp->regs) {
15640 iounmap(tp->regs);
15641 tp->regs = NULL;
15644 err_out_free_dev:
15645 free_netdev(dev);
15647 err_out_power_down:
15648 pci_set_power_state(pdev, PCI_D3hot);
15650 err_out_free_res:
15651 pci_release_regions(pdev);
15653 err_out_disable_pdev:
15654 pci_disable_device(pdev);
15655 pci_set_drvdata(pdev, NULL);
15656 return err;
15659 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15661 struct net_device *dev = pci_get_drvdata(pdev);
15663 if (dev) {
15664 struct tg3 *tp = netdev_priv(dev);
15666 if (tp->fw)
15667 release_firmware(tp->fw);
15669 cancel_work_sync(&tp->reset_task);
15671 if (!tg3_flag(tp, USE_PHYLIB)) {
15672 tg3_phy_fini(tp);
15673 tg3_mdio_fini(tp);
15676 unregister_netdev(dev);
15677 if (tp->aperegs) {
15678 iounmap(tp->aperegs);
15679 tp->aperegs = NULL;
15681 if (tp->regs) {
15682 iounmap(tp->regs);
15683 tp->regs = NULL;
15685 free_netdev(dev);
15686 pci_release_regions(pdev);
15687 pci_disable_device(pdev);
15688 pci_set_drvdata(pdev, NULL);
15692 #ifdef CONFIG_PM_SLEEP
15693 static int tg3_suspend(struct device *device)
15695 struct pci_dev *pdev = to_pci_dev(device);
15696 struct net_device *dev = pci_get_drvdata(pdev);
15697 struct tg3 *tp = netdev_priv(dev);
15698 int err;
15700 if (!netif_running(dev))
15701 return 0;
15703 flush_work_sync(&tp->reset_task);
15704 tg3_phy_stop(tp);
15705 tg3_netif_stop(tp);
15707 del_timer_sync(&tp->timer);
15709 tg3_full_lock(tp, 1);
15710 tg3_disable_ints(tp);
15711 tg3_full_unlock(tp);
15713 netif_device_detach(dev);
15715 tg3_full_lock(tp, 0);
15716 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15717 tg3_flag_clear(tp, INIT_COMPLETE);
15718 tg3_full_unlock(tp);
15720 err = tg3_power_down_prepare(tp);
15721 if (err) {
15722 int err2;
15724 tg3_full_lock(tp, 0);
15726 tg3_flag_set(tp, INIT_COMPLETE);
15727 err2 = tg3_restart_hw(tp, 1);
15728 if (err2)
15729 goto out;
15731 tp->timer.expires = jiffies + tp->timer_offset;
15732 add_timer(&tp->timer);
15734 netif_device_attach(dev);
15735 tg3_netif_start(tp);
15737 out:
15738 tg3_full_unlock(tp);
15740 if (!err2)
15741 tg3_phy_start(tp);
15744 return err;
15747 static int tg3_resume(struct device *device)
15749 struct pci_dev *pdev = to_pci_dev(device);
15750 struct net_device *dev = pci_get_drvdata(pdev);
15751 struct tg3 *tp = netdev_priv(dev);
15752 int err;
15754 if (!netif_running(dev))
15755 return 0;
15757 netif_device_attach(dev);
15759 tg3_full_lock(tp, 0);
15761 tg3_flag_set(tp, INIT_COMPLETE);
15762 err = tg3_restart_hw(tp, 1);
15763 if (err)
15764 goto out;
15766 tp->timer.expires = jiffies + tp->timer_offset;
15767 add_timer(&tp->timer);
15769 tg3_netif_start(tp);
15771 out:
15772 tg3_full_unlock(tp);
15774 if (!err)
15775 tg3_phy_start(tp);
15777 return err;
15780 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15781 #define TG3_PM_OPS (&tg3_pm_ops)
15783 #else
15785 #define TG3_PM_OPS NULL
15787 #endif /* CONFIG_PM_SLEEP */
15790 * tg3_io_error_detected - called when PCI error is detected
15791 * @pdev: Pointer to PCI device
15792 * @state: The current pci connection state
15794 * This function is called after a PCI bus error affecting
15795 * this device has been detected.
15797 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15798 pci_channel_state_t state)
15800 struct net_device *netdev = pci_get_drvdata(pdev);
15801 struct tg3 *tp = netdev_priv(netdev);
15802 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15804 netdev_info(netdev, "PCI I/O error detected\n");
15806 rtnl_lock();
15808 if (!netif_running(netdev))
15809 goto done;
15811 tg3_phy_stop(tp);
15813 tg3_netif_stop(tp);
15815 del_timer_sync(&tp->timer);
15816 tg3_flag_clear(tp, RESTART_TIMER);
15818 /* Want to make sure that the reset task doesn't run */
15819 cancel_work_sync(&tp->reset_task);
15820 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15821 tg3_flag_clear(tp, RESTART_TIMER);
15823 netif_device_detach(netdev);
15825 /* Clean up software state, even if MMIO is blocked */
15826 tg3_full_lock(tp, 0);
15827 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15828 tg3_full_unlock(tp);
15830 done:
15831 if (state == pci_channel_io_perm_failure)
15832 err = PCI_ERS_RESULT_DISCONNECT;
15833 else
15834 pci_disable_device(pdev);
15836 rtnl_unlock();
15838 return err;
15842 * tg3_io_slot_reset - called after the pci bus has been reset.
15843 * @pdev: Pointer to PCI device
15845 * Restart the card from scratch, as if from a cold-boot.
15846 * At this point, the card has exprienced a hard reset,
15847 * followed by fixups by BIOS, and has its config space
15848 * set up identically to what it was at cold boot.
15850 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15852 struct net_device *netdev = pci_get_drvdata(pdev);
15853 struct tg3 *tp = netdev_priv(netdev);
15854 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15855 int err;
15857 rtnl_lock();
15859 if (pci_enable_device(pdev)) {
15860 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15861 goto done;
15864 pci_set_master(pdev);
15865 pci_restore_state(pdev);
15866 pci_save_state(pdev);
15868 if (!netif_running(netdev)) {
15869 rc = PCI_ERS_RESULT_RECOVERED;
15870 goto done;
15873 err = tg3_power_up(tp);
15874 if (err)
15875 goto done;
15877 rc = PCI_ERS_RESULT_RECOVERED;
15879 done:
15880 rtnl_unlock();
15882 return rc;
15886 * tg3_io_resume - called when traffic can start flowing again.
15887 * @pdev: Pointer to PCI device
15889 * This callback is called when the error recovery driver tells
15890 * us that its OK to resume normal operation.
15892 static void tg3_io_resume(struct pci_dev *pdev)
15894 struct net_device *netdev = pci_get_drvdata(pdev);
15895 struct tg3 *tp = netdev_priv(netdev);
15896 int err;
15898 rtnl_lock();
15900 if (!netif_running(netdev))
15901 goto done;
15903 tg3_full_lock(tp, 0);
15904 tg3_flag_set(tp, INIT_COMPLETE);
15905 err = tg3_restart_hw(tp, 1);
15906 tg3_full_unlock(tp);
15907 if (err) {
15908 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15909 goto done;
15912 netif_device_attach(netdev);
15914 tp->timer.expires = jiffies + tp->timer_offset;
15915 add_timer(&tp->timer);
15917 tg3_netif_start(tp);
15919 tg3_phy_start(tp);
15921 done:
15922 rtnl_unlock();
15925 static struct pci_error_handlers tg3_err_handler = {
15926 .error_detected = tg3_io_error_detected,
15927 .slot_reset = tg3_io_slot_reset,
15928 .resume = tg3_io_resume
15931 static struct pci_driver tg3_driver = {
15932 .name = DRV_MODULE_NAME,
15933 .id_table = tg3_pci_tbl,
15934 .probe = tg3_init_one,
15935 .remove = __devexit_p(tg3_remove_one),
15936 .err_handler = &tg3_err_handler,
15937 .driver.pm = TG3_PM_OPS,
15940 static int __init tg3_init(void)
15942 return pci_register_driver(&tg3_driver);
15945 static void __exit tg3_cleanup(void)
15947 pci_unregister_driver(&tg3_driver);
15950 module_init(tg3_init);
15951 module_exit(tg3_cleanup);