tg3: fix tigon3_dma_hwbug_workaround()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tg3.c
blobd469004704ad77ec1617bb53400e9aaebedb6ce7
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
61 #define BAR_0 0
62 #define BAR_2 2
64 #include "tg3.h"
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 set_bit(flag, bits);
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
101 (NETIF_MSG_DRV | \
102 NETIF_MSG_PROBE | \
103 NETIF_MSG_LINK | \
104 NETIF_MSG_TIMER | \
105 NETIF_MSG_IFDOWN | \
106 NETIF_MSG_IFUP | \
107 NETIF_MSG_RX_ERR | \
108 NETIF_MSG_TX_ERR)
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
112 /* length of time before we decide the hardware is borked,
113 * and dev->tx_timeout() should be called to fix the problem
116 #define TG3_TX_TIMEOUT (5 * HZ)
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU 60
120 #define TG3_MAX_MTU(tp) \
121 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124 * You can't change the ring sizes, but you can change where you place
125 * them in the NIC onboard memory.
127 #define TG3_RX_STD_RING_SIZE(tp) \
128 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING 200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
135 #define TG3_RSS_INDIR_TBL_SIZE 128
137 /* Do not place this n-ring entries value into the tp struct itself,
138 * we really want to expose these constants to GCC so that modulo et
139 * al. operations are done with shifts and masks instead of with
140 * hw multiply/modulo instructions. Another solution would be to
141 * replace things like '% foo' with '& (foo - 1)'.
144 #define TG3_TX_RING_SIZE 512
145 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
147 #define TG3_RX_STD_RING_BYTES(tp) \
148 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
154 TG3_TX_RING_SIZE)
155 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157 #define TG3_DMA_BYTE_ENAB 64
159 #define TG3_RX_STD_DMA_SZ 1536
160 #define TG3_RX_JMB_DMA_SZ 9046
162 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
164 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174 * that are at least dword aligned when used in PCIX mode. The driver
175 * works around this bug by double copying the packet. This workaround
176 * is built into the normal double copy length check for efficiency.
178 * However, the double copy is only necessary on those architectures
179 * where unaligned memory accesses are inefficient. For those architectures
180 * where unaligned memory accesses incur little penalty, we can reintegrate
181 * the 5701 in the normal rx path. Doing so saves a device structure
182 * dereference by hardcoding the double copy threshold in place.
184 #define TG3_RX_COPY_THRESHOLD 256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
187 #else
188 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189 #endif
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
193 #define TG3_TX_BD_DMA_MAX 4096
195 #define TG3_RAW_IP_ALIGN 2
197 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
199 #define FIRMWARE_TG3 "tigon/tg3.bin"
200 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
201 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
203 static char version[] __devinitdata =
204 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
206 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
207 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
208 MODULE_LICENSE("GPL");
209 MODULE_VERSION(DRV_MODULE_VERSION);
210 MODULE_FIRMWARE(FIRMWARE_TG3);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
212 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
214 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
215 module_param(tg3_debug, int, 0);
216 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
218 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
293 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
297 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
298 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
299 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
303 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
305 static const struct {
306 const char string[ETH_GSTRING_LEN];
307 } ethtool_stats_keys[] = {
308 { "rx_octets" },
309 { "rx_fragments" },
310 { "rx_ucast_packets" },
311 { "rx_mcast_packets" },
312 { "rx_bcast_packets" },
313 { "rx_fcs_errors" },
314 { "rx_align_errors" },
315 { "rx_xon_pause_rcvd" },
316 { "rx_xoff_pause_rcvd" },
317 { "rx_mac_ctrl_rcvd" },
318 { "rx_xoff_entered" },
319 { "rx_frame_too_long_errors" },
320 { "rx_jabbers" },
321 { "rx_undersize_packets" },
322 { "rx_in_length_errors" },
323 { "rx_out_length_errors" },
324 { "rx_64_or_less_octet_packets" },
325 { "rx_65_to_127_octet_packets" },
326 { "rx_128_to_255_octet_packets" },
327 { "rx_256_to_511_octet_packets" },
328 { "rx_512_to_1023_octet_packets" },
329 { "rx_1024_to_1522_octet_packets" },
330 { "rx_1523_to_2047_octet_packets" },
331 { "rx_2048_to_4095_octet_packets" },
332 { "rx_4096_to_8191_octet_packets" },
333 { "rx_8192_to_9022_octet_packets" },
335 { "tx_octets" },
336 { "tx_collisions" },
338 { "tx_xon_sent" },
339 { "tx_xoff_sent" },
340 { "tx_flow_control" },
341 { "tx_mac_errors" },
342 { "tx_single_collisions" },
343 { "tx_mult_collisions" },
344 { "tx_deferred" },
345 { "tx_excessive_collisions" },
346 { "tx_late_collisions" },
347 { "tx_collide_2times" },
348 { "tx_collide_3times" },
349 { "tx_collide_4times" },
350 { "tx_collide_5times" },
351 { "tx_collide_6times" },
352 { "tx_collide_7times" },
353 { "tx_collide_8times" },
354 { "tx_collide_9times" },
355 { "tx_collide_10times" },
356 { "tx_collide_11times" },
357 { "tx_collide_12times" },
358 { "tx_collide_13times" },
359 { "tx_collide_14times" },
360 { "tx_collide_15times" },
361 { "tx_ucast_packets" },
362 { "tx_mcast_packets" },
363 { "tx_bcast_packets" },
364 { "tx_carrier_sense_errors" },
365 { "tx_discards" },
366 { "tx_errors" },
368 { "dma_writeq_full" },
369 { "dma_write_prioq_full" },
370 { "rxbds_empty" },
371 { "rx_discards" },
372 { "rx_errors" },
373 { "rx_threshold_hit" },
375 { "dma_readq_full" },
376 { "dma_read_prioq_full" },
377 { "tx_comp_queue_full" },
379 { "ring_set_send_prod_index" },
380 { "ring_status_update" },
381 { "nic_irqs" },
382 { "nic_avoided_irqs" },
383 { "nic_tx_threshold_hit" },
385 { "mbuf_lwm_thresh_hit" },
388 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
391 static const struct {
392 const char string[ETH_GSTRING_LEN];
393 } ethtool_test_keys[] = {
394 { "nvram test (online) " },
395 { "link test (online) " },
396 { "register test (offline)" },
397 { "memory test (offline)" },
398 { "loopback test (offline)" },
399 { "interrupt test (offline)" },
402 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
405 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
407 writel(val, tp->regs + off);
410 static u32 tg3_read32(struct tg3 *tp, u32 off)
412 return readl(tp->regs + off);
415 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
417 writel(val, tp->aperegs + off);
420 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
422 return readl(tp->aperegs + off);
425 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
427 unsigned long flags;
429 spin_lock_irqsave(&tp->indirect_lock, flags);
430 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
431 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
432 spin_unlock_irqrestore(&tp->indirect_lock, flags);
435 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
437 writel(val, tp->regs + off);
438 readl(tp->regs + off);
441 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
443 unsigned long flags;
444 u32 val;
446 spin_lock_irqsave(&tp->indirect_lock, flags);
447 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
448 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
449 spin_unlock_irqrestore(&tp->indirect_lock, flags);
450 return val;
453 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
455 unsigned long flags;
457 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
458 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
459 TG3_64BIT_REG_LOW, val);
460 return;
462 if (off == TG3_RX_STD_PROD_IDX_REG) {
463 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
464 TG3_64BIT_REG_LOW, val);
465 return;
468 spin_lock_irqsave(&tp->indirect_lock, flags);
469 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
470 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
471 spin_unlock_irqrestore(&tp->indirect_lock, flags);
473 /* In indirect mode when disabling interrupts, we also need
474 * to clear the interrupt bit in the GRC local ctrl register.
476 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
477 (val == 0x1)) {
478 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
479 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
483 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
485 unsigned long flags;
486 u32 val;
488 spin_lock_irqsave(&tp->indirect_lock, flags);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
490 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
491 spin_unlock_irqrestore(&tp->indirect_lock, flags);
492 return val;
495 /* usec_wait specifies the wait time in usec when writing to certain registers
496 * where it is unsafe to read back the register without some delay.
497 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
498 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
500 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
502 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
503 /* Non-posted methods */
504 tp->write32(tp, off, val);
505 else {
506 /* Posted method */
507 tg3_write32(tp, off, val);
508 if (usec_wait)
509 udelay(usec_wait);
510 tp->read32(tp, off);
512 /* Wait again after the read for the posted method to guarantee that
513 * the wait time is met.
515 if (usec_wait)
516 udelay(usec_wait);
519 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
521 tp->write32_mbox(tp, off, val);
522 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
523 tp->read32_mbox(tp, off);
526 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
528 void __iomem *mbox = tp->regs + off;
529 writel(val, mbox);
530 if (tg3_flag(tp, TXD_MBOX_HWBUG))
531 writel(val, mbox);
532 if (tg3_flag(tp, MBOX_WRITE_REORDER))
533 readl(mbox);
536 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
538 return readl(tp->regs + off + GRCMBOX_BASE);
541 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
543 writel(val, tp->regs + off + GRCMBOX_BASE);
546 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
547 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
548 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
549 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
550 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
552 #define tw32(reg, val) tp->write32(tp, reg, val)
553 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
554 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
555 #define tr32(reg) tp->read32(tp, reg)
557 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
559 unsigned long flags;
561 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
562 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
563 return;
565 spin_lock_irqsave(&tp->indirect_lock, flags);
566 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
567 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
568 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
570 /* Always leave this as zero. */
571 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
572 } else {
573 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
574 tw32_f(TG3PCI_MEM_WIN_DATA, val);
576 /* Always leave this as zero. */
577 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
579 spin_unlock_irqrestore(&tp->indirect_lock, flags);
582 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
584 unsigned long flags;
586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
587 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
588 *val = 0;
589 return;
592 spin_lock_irqsave(&tp->indirect_lock, flags);
593 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
594 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
595 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
597 /* Always leave this as zero. */
598 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
599 } else {
600 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
601 *val = tr32(TG3PCI_MEM_WIN_DATA);
603 /* Always leave this as zero. */
604 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
606 spin_unlock_irqrestore(&tp->indirect_lock, flags);
609 static void tg3_ape_lock_init(struct tg3 *tp)
611 int i;
612 u32 regbase, bit;
614 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
615 regbase = TG3_APE_LOCK_GRANT;
616 else
617 regbase = TG3_APE_PER_LOCK_GRANT;
619 /* Make sure the driver hasn't any stale locks. */
620 for (i = 0; i < 8; i++) {
621 if (i == TG3_APE_LOCK_GPIO)
622 continue;
623 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
626 /* Clear the correct bit of the GPIO lock too. */
627 if (!tp->pci_fn)
628 bit = APE_LOCK_GRANT_DRIVER;
629 else
630 bit = 1 << tp->pci_fn;
632 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
635 static int tg3_ape_lock(struct tg3 *tp, int locknum)
637 int i, off;
638 int ret = 0;
639 u32 status, req, gnt, bit;
641 if (!tg3_flag(tp, ENABLE_APE))
642 return 0;
644 switch (locknum) {
645 case TG3_APE_LOCK_GPIO:
646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
647 return 0;
648 case TG3_APE_LOCK_GRC:
649 case TG3_APE_LOCK_MEM:
650 break;
651 default:
652 return -EINVAL;
655 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
656 req = TG3_APE_LOCK_REQ;
657 gnt = TG3_APE_LOCK_GRANT;
658 } else {
659 req = TG3_APE_PER_LOCK_REQ;
660 gnt = TG3_APE_PER_LOCK_GRANT;
663 off = 4 * locknum;
665 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
666 bit = APE_LOCK_REQ_DRIVER;
667 else
668 bit = 1 << tp->pci_fn;
670 tg3_ape_write32(tp, req + off, bit);
672 /* Wait for up to 1 millisecond to acquire lock. */
673 for (i = 0; i < 100; i++) {
674 status = tg3_ape_read32(tp, gnt + off);
675 if (status == bit)
676 break;
677 udelay(10);
680 if (status != bit) {
681 /* Revoke the lock request. */
682 tg3_ape_write32(tp, gnt + off, bit);
683 ret = -EBUSY;
686 return ret;
689 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
691 u32 gnt, bit;
693 if (!tg3_flag(tp, ENABLE_APE))
694 return;
696 switch (locknum) {
697 case TG3_APE_LOCK_GPIO:
698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
699 return;
700 case TG3_APE_LOCK_GRC:
701 case TG3_APE_LOCK_MEM:
702 break;
703 default:
704 return;
707 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
708 gnt = TG3_APE_LOCK_GRANT;
709 else
710 gnt = TG3_APE_PER_LOCK_GRANT;
712 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
713 bit = APE_LOCK_GRANT_DRIVER;
714 else
715 bit = 1 << tp->pci_fn;
717 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
720 static void tg3_disable_ints(struct tg3 *tp)
722 int i;
724 tw32(TG3PCI_MISC_HOST_CTRL,
725 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
726 for (i = 0; i < tp->irq_max; i++)
727 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
730 static void tg3_enable_ints(struct tg3 *tp)
732 int i;
734 tp->irq_sync = 0;
735 wmb();
737 tw32(TG3PCI_MISC_HOST_CTRL,
738 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
740 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
741 for (i = 0; i < tp->irq_cnt; i++) {
742 struct tg3_napi *tnapi = &tp->napi[i];
744 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
745 if (tg3_flag(tp, 1SHOT_MSI))
746 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
748 tp->coal_now |= tnapi->coal_now;
751 /* Force an initial interrupt */
752 if (!tg3_flag(tp, TAGGED_STATUS) &&
753 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
754 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
755 else
756 tw32(HOSTCC_MODE, tp->coal_now);
758 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
761 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
763 struct tg3 *tp = tnapi->tp;
764 struct tg3_hw_status *sblk = tnapi->hw_status;
765 unsigned int work_exists = 0;
767 /* check for phy events */
768 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
769 if (sblk->status & SD_STATUS_LINK_CHG)
770 work_exists = 1;
772 /* check for RX/TX work to do */
773 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
774 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
775 work_exists = 1;
777 return work_exists;
780 /* tg3_int_reenable
781 * similar to tg3_enable_ints, but it accurately determines whether there
782 * is new work pending and can return without flushing the PIO write
783 * which reenables interrupts
785 static void tg3_int_reenable(struct tg3_napi *tnapi)
787 struct tg3 *tp = tnapi->tp;
789 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
790 mmiowb();
792 /* When doing tagged status, this work check is unnecessary.
793 * The last_tag we write above tells the chip which piece of
794 * work we've completed.
796 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
797 tw32(HOSTCC_MODE, tp->coalesce_mode |
798 HOSTCC_MODE_ENABLE | tnapi->coal_now);
801 static void tg3_switch_clocks(struct tg3 *tp)
803 u32 clock_ctrl;
804 u32 orig_clock_ctrl;
806 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
807 return;
809 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
811 orig_clock_ctrl = clock_ctrl;
812 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
813 CLOCK_CTRL_CLKRUN_OENABLE |
814 0x1f);
815 tp->pci_clock_ctrl = clock_ctrl;
817 if (tg3_flag(tp, 5705_PLUS)) {
818 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
819 tw32_wait_f(TG3PCI_CLOCK_CTRL,
820 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
822 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
823 tw32_wait_f(TG3PCI_CLOCK_CTRL,
824 clock_ctrl |
825 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
826 40);
827 tw32_wait_f(TG3PCI_CLOCK_CTRL,
828 clock_ctrl | (CLOCK_CTRL_ALTCLK),
829 40);
831 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
834 #define PHY_BUSY_LOOPS 5000
836 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
838 u32 frame_val;
839 unsigned int loops;
840 int ret;
842 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
843 tw32_f(MAC_MI_MODE,
844 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
845 udelay(80);
848 *val = 0x0;
850 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
851 MI_COM_PHY_ADDR_MASK);
852 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
853 MI_COM_REG_ADDR_MASK);
854 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
856 tw32_f(MAC_MI_COM, frame_val);
858 loops = PHY_BUSY_LOOPS;
859 while (loops != 0) {
860 udelay(10);
861 frame_val = tr32(MAC_MI_COM);
863 if ((frame_val & MI_COM_BUSY) == 0) {
864 udelay(5);
865 frame_val = tr32(MAC_MI_COM);
866 break;
868 loops -= 1;
871 ret = -EBUSY;
872 if (loops != 0) {
873 *val = frame_val & MI_COM_DATA_MASK;
874 ret = 0;
877 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
878 tw32_f(MAC_MI_MODE, tp->mi_mode);
879 udelay(80);
882 return ret;
885 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
887 u32 frame_val;
888 unsigned int loops;
889 int ret;
891 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
892 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
893 return 0;
895 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
896 tw32_f(MAC_MI_MODE,
897 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
898 udelay(80);
901 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
902 MI_COM_PHY_ADDR_MASK);
903 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
904 MI_COM_REG_ADDR_MASK);
905 frame_val |= (val & MI_COM_DATA_MASK);
906 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
908 tw32_f(MAC_MI_COM, frame_val);
910 loops = PHY_BUSY_LOOPS;
911 while (loops != 0) {
912 udelay(10);
913 frame_val = tr32(MAC_MI_COM);
914 if ((frame_val & MI_COM_BUSY) == 0) {
915 udelay(5);
916 frame_val = tr32(MAC_MI_COM);
917 break;
919 loops -= 1;
922 ret = -EBUSY;
923 if (loops != 0)
924 ret = 0;
926 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
927 tw32_f(MAC_MI_MODE, tp->mi_mode);
928 udelay(80);
931 return ret;
934 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
936 int err;
938 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
939 if (err)
940 goto done;
942 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
943 if (err)
944 goto done;
946 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
947 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
948 if (err)
949 goto done;
951 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
953 done:
954 return err;
957 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
959 int err;
961 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
962 if (err)
963 goto done;
965 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
966 if (err)
967 goto done;
969 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
970 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
971 if (err)
972 goto done;
974 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
976 done:
977 return err;
980 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
982 int err;
984 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
985 if (!err)
986 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
988 return err;
991 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
993 int err;
995 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
996 if (!err)
997 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
999 return err;
1002 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1004 int err;
1006 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1007 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1008 MII_TG3_AUXCTL_SHDWSEL_MISC);
1009 if (!err)
1010 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1012 return err;
1015 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1017 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1018 set |= MII_TG3_AUXCTL_MISC_WREN;
1020 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1023 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1024 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1025 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1026 MII_TG3_AUXCTL_ACTL_TX_6DB)
1028 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1029 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1030 MII_TG3_AUXCTL_ACTL_TX_6DB);
1032 static int tg3_bmcr_reset(struct tg3 *tp)
1034 u32 phy_control;
1035 int limit, err;
1037 /* OK, reset it, and poll the BMCR_RESET bit until it
1038 * clears or we time out.
1040 phy_control = BMCR_RESET;
1041 err = tg3_writephy(tp, MII_BMCR, phy_control);
1042 if (err != 0)
1043 return -EBUSY;
1045 limit = 5000;
1046 while (limit--) {
1047 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1048 if (err != 0)
1049 return -EBUSY;
1051 if ((phy_control & BMCR_RESET) == 0) {
1052 udelay(40);
1053 break;
1055 udelay(10);
1057 if (limit < 0)
1058 return -EBUSY;
1060 return 0;
1063 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1065 struct tg3 *tp = bp->priv;
1066 u32 val;
1068 spin_lock_bh(&tp->lock);
1070 if (tg3_readphy(tp, reg, &val))
1071 val = -EIO;
1073 spin_unlock_bh(&tp->lock);
1075 return val;
1078 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1080 struct tg3 *tp = bp->priv;
1081 u32 ret = 0;
1083 spin_lock_bh(&tp->lock);
1085 if (tg3_writephy(tp, reg, val))
1086 ret = -EIO;
1088 spin_unlock_bh(&tp->lock);
1090 return ret;
1093 static int tg3_mdio_reset(struct mii_bus *bp)
1095 return 0;
1098 static void tg3_mdio_config_5785(struct tg3 *tp)
1100 u32 val;
1101 struct phy_device *phydev;
1103 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1104 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1105 case PHY_ID_BCM50610:
1106 case PHY_ID_BCM50610M:
1107 val = MAC_PHYCFG2_50610_LED_MODES;
1108 break;
1109 case PHY_ID_BCMAC131:
1110 val = MAC_PHYCFG2_AC131_LED_MODES;
1111 break;
1112 case PHY_ID_RTL8211C:
1113 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1114 break;
1115 case PHY_ID_RTL8201E:
1116 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1117 break;
1118 default:
1119 return;
1122 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1123 tw32(MAC_PHYCFG2, val);
1125 val = tr32(MAC_PHYCFG1);
1126 val &= ~(MAC_PHYCFG1_RGMII_INT |
1127 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1128 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1129 tw32(MAC_PHYCFG1, val);
1131 return;
1134 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1135 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1136 MAC_PHYCFG2_FMODE_MASK_MASK |
1137 MAC_PHYCFG2_GMODE_MASK_MASK |
1138 MAC_PHYCFG2_ACT_MASK_MASK |
1139 MAC_PHYCFG2_QUAL_MASK_MASK |
1140 MAC_PHYCFG2_INBAND_ENABLE;
1142 tw32(MAC_PHYCFG2, val);
1144 val = tr32(MAC_PHYCFG1);
1145 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1146 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1147 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1148 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1149 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1150 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1151 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1153 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1154 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1155 tw32(MAC_PHYCFG1, val);
1157 val = tr32(MAC_EXT_RGMII_MODE);
1158 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1159 MAC_RGMII_MODE_RX_QUALITY |
1160 MAC_RGMII_MODE_RX_ACTIVITY |
1161 MAC_RGMII_MODE_RX_ENG_DET |
1162 MAC_RGMII_MODE_TX_ENABLE |
1163 MAC_RGMII_MODE_TX_LOWPWR |
1164 MAC_RGMII_MODE_TX_RESET);
1165 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1166 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1167 val |= MAC_RGMII_MODE_RX_INT_B |
1168 MAC_RGMII_MODE_RX_QUALITY |
1169 MAC_RGMII_MODE_RX_ACTIVITY |
1170 MAC_RGMII_MODE_RX_ENG_DET;
1171 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1172 val |= MAC_RGMII_MODE_TX_ENABLE |
1173 MAC_RGMII_MODE_TX_LOWPWR |
1174 MAC_RGMII_MODE_TX_RESET;
1176 tw32(MAC_EXT_RGMII_MODE, val);
1179 static void tg3_mdio_start(struct tg3 *tp)
1181 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1182 tw32_f(MAC_MI_MODE, tp->mi_mode);
1183 udelay(80);
1185 if (tg3_flag(tp, MDIOBUS_INITED) &&
1186 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1187 tg3_mdio_config_5785(tp);
1190 static int tg3_mdio_init(struct tg3 *tp)
1192 int i;
1193 u32 reg;
1194 struct phy_device *phydev;
1196 if (tg3_flag(tp, 5717_PLUS)) {
1197 u32 is_serdes;
1199 tp->phy_addr = tp->pci_fn + 1;
1201 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1202 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1203 else
1204 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1205 TG3_CPMU_PHY_STRAP_IS_SERDES;
1206 if (is_serdes)
1207 tp->phy_addr += 7;
1208 } else
1209 tp->phy_addr = TG3_PHY_MII_ADDR;
1211 tg3_mdio_start(tp);
1213 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1214 return 0;
1216 tp->mdio_bus = mdiobus_alloc();
1217 if (tp->mdio_bus == NULL)
1218 return -ENOMEM;
1220 tp->mdio_bus->name = "tg3 mdio bus";
1221 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1222 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1223 tp->mdio_bus->priv = tp;
1224 tp->mdio_bus->parent = &tp->pdev->dev;
1225 tp->mdio_bus->read = &tg3_mdio_read;
1226 tp->mdio_bus->write = &tg3_mdio_write;
1227 tp->mdio_bus->reset = &tg3_mdio_reset;
1228 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1229 tp->mdio_bus->irq = &tp->mdio_irq[0];
1231 for (i = 0; i < PHY_MAX_ADDR; i++)
1232 tp->mdio_bus->irq[i] = PHY_POLL;
1234 /* The bus registration will look for all the PHYs on the mdio bus.
1235 * Unfortunately, it does not ensure the PHY is powered up before
1236 * accessing the PHY ID registers. A chip reset is the
1237 * quickest way to bring the device back to an operational state..
1239 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1240 tg3_bmcr_reset(tp);
1242 i = mdiobus_register(tp->mdio_bus);
1243 if (i) {
1244 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1245 mdiobus_free(tp->mdio_bus);
1246 return i;
1249 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1251 if (!phydev || !phydev->drv) {
1252 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1253 mdiobus_unregister(tp->mdio_bus);
1254 mdiobus_free(tp->mdio_bus);
1255 return -ENODEV;
1258 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1259 case PHY_ID_BCM57780:
1260 phydev->interface = PHY_INTERFACE_MODE_GMII;
1261 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1262 break;
1263 case PHY_ID_BCM50610:
1264 case PHY_ID_BCM50610M:
1265 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1266 PHY_BRCM_RX_REFCLK_UNUSED |
1267 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1268 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1269 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1270 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1271 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1272 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1273 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1274 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1275 /* fallthru */
1276 case PHY_ID_RTL8211C:
1277 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1278 break;
1279 case PHY_ID_RTL8201E:
1280 case PHY_ID_BCMAC131:
1281 phydev->interface = PHY_INTERFACE_MODE_MII;
1282 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1283 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1284 break;
1287 tg3_flag_set(tp, MDIOBUS_INITED);
1289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1290 tg3_mdio_config_5785(tp);
1292 return 0;
1295 static void tg3_mdio_fini(struct tg3 *tp)
1297 if (tg3_flag(tp, MDIOBUS_INITED)) {
1298 tg3_flag_clear(tp, MDIOBUS_INITED);
1299 mdiobus_unregister(tp->mdio_bus);
1300 mdiobus_free(tp->mdio_bus);
1304 /* tp->lock is held. */
1305 static inline void tg3_generate_fw_event(struct tg3 *tp)
1307 u32 val;
1309 val = tr32(GRC_RX_CPU_EVENT);
1310 val |= GRC_RX_CPU_DRIVER_EVENT;
1311 tw32_f(GRC_RX_CPU_EVENT, val);
1313 tp->last_event_jiffies = jiffies;
1316 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1318 /* tp->lock is held. */
1319 static void tg3_wait_for_event_ack(struct tg3 *tp)
1321 int i;
1322 unsigned int delay_cnt;
1323 long time_remain;
1325 /* If enough time has passed, no wait is necessary. */
1326 time_remain = (long)(tp->last_event_jiffies + 1 +
1327 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1328 (long)jiffies;
1329 if (time_remain < 0)
1330 return;
1332 /* Check if we can shorten the wait time. */
1333 delay_cnt = jiffies_to_usecs(time_remain);
1334 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1335 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1336 delay_cnt = (delay_cnt >> 3) + 1;
1338 for (i = 0; i < delay_cnt; i++) {
1339 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1340 break;
1341 udelay(8);
1345 /* tp->lock is held. */
1346 static void tg3_ump_link_report(struct tg3 *tp)
1348 u32 reg;
1349 u32 val;
1351 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1352 return;
1354 tg3_wait_for_event_ack(tp);
1356 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1358 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1360 val = 0;
1361 if (!tg3_readphy(tp, MII_BMCR, &reg))
1362 val = reg << 16;
1363 if (!tg3_readphy(tp, MII_BMSR, &reg))
1364 val |= (reg & 0xffff);
1365 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1367 val = 0;
1368 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1369 val = reg << 16;
1370 if (!tg3_readphy(tp, MII_LPA, &reg))
1371 val |= (reg & 0xffff);
1372 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1374 val = 0;
1375 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1376 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1377 val = reg << 16;
1378 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1379 val |= (reg & 0xffff);
1381 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1383 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1384 val = reg << 16;
1385 else
1386 val = 0;
1387 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1389 tg3_generate_fw_event(tp);
1392 static void tg3_link_report(struct tg3 *tp)
1394 if (!netif_carrier_ok(tp->dev)) {
1395 netif_info(tp, link, tp->dev, "Link is down\n");
1396 tg3_ump_link_report(tp);
1397 } else if (netif_msg_link(tp)) {
1398 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1399 (tp->link_config.active_speed == SPEED_1000 ?
1400 1000 :
1401 (tp->link_config.active_speed == SPEED_100 ?
1402 100 : 10)),
1403 (tp->link_config.active_duplex == DUPLEX_FULL ?
1404 "full" : "half"));
1406 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1407 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1408 "on" : "off",
1409 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1410 "on" : "off");
1412 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1413 netdev_info(tp->dev, "EEE is %s\n",
1414 tp->setlpicnt ? "enabled" : "disabled");
1416 tg3_ump_link_report(tp);
1420 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1422 u16 miireg;
1424 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1425 miireg = ADVERTISE_PAUSE_CAP;
1426 else if (flow_ctrl & FLOW_CTRL_TX)
1427 miireg = ADVERTISE_PAUSE_ASYM;
1428 else if (flow_ctrl & FLOW_CTRL_RX)
1429 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1430 else
1431 miireg = 0;
1433 return miireg;
1436 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1438 u16 miireg;
1440 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1441 miireg = ADVERTISE_1000XPAUSE;
1442 else if (flow_ctrl & FLOW_CTRL_TX)
1443 miireg = ADVERTISE_1000XPSE_ASYM;
1444 else if (flow_ctrl & FLOW_CTRL_RX)
1445 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1446 else
1447 miireg = 0;
1449 return miireg;
1452 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1454 u8 cap = 0;
1456 if (lcladv & ADVERTISE_1000XPAUSE) {
1457 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1458 if (rmtadv & LPA_1000XPAUSE)
1459 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1460 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1461 cap = FLOW_CTRL_RX;
1462 } else {
1463 if (rmtadv & LPA_1000XPAUSE)
1464 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1466 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1467 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1468 cap = FLOW_CTRL_TX;
1471 return cap;
1474 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1476 u8 autoneg;
1477 u8 flowctrl = 0;
1478 u32 old_rx_mode = tp->rx_mode;
1479 u32 old_tx_mode = tp->tx_mode;
1481 if (tg3_flag(tp, USE_PHYLIB))
1482 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1483 else
1484 autoneg = tp->link_config.autoneg;
1486 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1487 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1488 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1489 else
1490 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1491 } else
1492 flowctrl = tp->link_config.flowctrl;
1494 tp->link_config.active_flowctrl = flowctrl;
1496 if (flowctrl & FLOW_CTRL_RX)
1497 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1498 else
1499 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1501 if (old_rx_mode != tp->rx_mode)
1502 tw32_f(MAC_RX_MODE, tp->rx_mode);
1504 if (flowctrl & FLOW_CTRL_TX)
1505 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1506 else
1507 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1509 if (old_tx_mode != tp->tx_mode)
1510 tw32_f(MAC_TX_MODE, tp->tx_mode);
1513 static void tg3_adjust_link(struct net_device *dev)
1515 u8 oldflowctrl, linkmesg = 0;
1516 u32 mac_mode, lcl_adv, rmt_adv;
1517 struct tg3 *tp = netdev_priv(dev);
1518 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1520 spin_lock_bh(&tp->lock);
1522 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1523 MAC_MODE_HALF_DUPLEX);
1525 oldflowctrl = tp->link_config.active_flowctrl;
1527 if (phydev->link) {
1528 lcl_adv = 0;
1529 rmt_adv = 0;
1531 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1532 mac_mode |= MAC_MODE_PORT_MODE_MII;
1533 else if (phydev->speed == SPEED_1000 ||
1534 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1535 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1536 else
1537 mac_mode |= MAC_MODE_PORT_MODE_MII;
1539 if (phydev->duplex == DUPLEX_HALF)
1540 mac_mode |= MAC_MODE_HALF_DUPLEX;
1541 else {
1542 lcl_adv = tg3_advert_flowctrl_1000T(
1543 tp->link_config.flowctrl);
1545 if (phydev->pause)
1546 rmt_adv = LPA_PAUSE_CAP;
1547 if (phydev->asym_pause)
1548 rmt_adv |= LPA_PAUSE_ASYM;
1551 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1552 } else
1553 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1555 if (mac_mode != tp->mac_mode) {
1556 tp->mac_mode = mac_mode;
1557 tw32_f(MAC_MODE, tp->mac_mode);
1558 udelay(40);
1561 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1562 if (phydev->speed == SPEED_10)
1563 tw32(MAC_MI_STAT,
1564 MAC_MI_STAT_10MBPS_MODE |
1565 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1566 else
1567 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1570 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1571 tw32(MAC_TX_LENGTHS,
1572 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1573 (6 << TX_LENGTHS_IPG_SHIFT) |
1574 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1575 else
1576 tw32(MAC_TX_LENGTHS,
1577 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1578 (6 << TX_LENGTHS_IPG_SHIFT) |
1579 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1581 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1582 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1583 phydev->speed != tp->link_config.active_speed ||
1584 phydev->duplex != tp->link_config.active_duplex ||
1585 oldflowctrl != tp->link_config.active_flowctrl)
1586 linkmesg = 1;
1588 tp->link_config.active_speed = phydev->speed;
1589 tp->link_config.active_duplex = phydev->duplex;
1591 spin_unlock_bh(&tp->lock);
1593 if (linkmesg)
1594 tg3_link_report(tp);
1597 static int tg3_phy_init(struct tg3 *tp)
1599 struct phy_device *phydev;
1601 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1602 return 0;
1604 /* Bring the PHY back to a known state. */
1605 tg3_bmcr_reset(tp);
1607 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1609 /* Attach the MAC to the PHY. */
1610 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1611 phydev->dev_flags, phydev->interface);
1612 if (IS_ERR(phydev)) {
1613 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1614 return PTR_ERR(phydev);
1617 /* Mask with MAC supported features. */
1618 switch (phydev->interface) {
1619 case PHY_INTERFACE_MODE_GMII:
1620 case PHY_INTERFACE_MODE_RGMII:
1621 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1622 phydev->supported &= (PHY_GBIT_FEATURES |
1623 SUPPORTED_Pause |
1624 SUPPORTED_Asym_Pause);
1625 break;
1627 /* fallthru */
1628 case PHY_INTERFACE_MODE_MII:
1629 phydev->supported &= (PHY_BASIC_FEATURES |
1630 SUPPORTED_Pause |
1631 SUPPORTED_Asym_Pause);
1632 break;
1633 default:
1634 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1635 return -EINVAL;
1638 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1640 phydev->advertising = phydev->supported;
1642 return 0;
1645 static void tg3_phy_start(struct tg3 *tp)
1647 struct phy_device *phydev;
1649 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1650 return;
1652 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1654 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1655 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1656 phydev->speed = tp->link_config.orig_speed;
1657 phydev->duplex = tp->link_config.orig_duplex;
1658 phydev->autoneg = tp->link_config.orig_autoneg;
1659 phydev->advertising = tp->link_config.orig_advertising;
1662 phy_start(phydev);
1664 phy_start_aneg(phydev);
1667 static void tg3_phy_stop(struct tg3 *tp)
1669 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1670 return;
1672 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1675 static void tg3_phy_fini(struct tg3 *tp)
1677 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1678 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1679 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1683 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1685 u32 phytest;
1687 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1688 u32 phy;
1690 tg3_writephy(tp, MII_TG3_FET_TEST,
1691 phytest | MII_TG3_FET_SHADOW_EN);
1692 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1693 if (enable)
1694 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1695 else
1696 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1697 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1699 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1703 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1705 u32 reg;
1707 if (!tg3_flag(tp, 5705_PLUS) ||
1708 (tg3_flag(tp, 5717_PLUS) &&
1709 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1710 return;
1712 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1713 tg3_phy_fet_toggle_apd(tp, enable);
1714 return;
1717 reg = MII_TG3_MISC_SHDW_WREN |
1718 MII_TG3_MISC_SHDW_SCR5_SEL |
1719 MII_TG3_MISC_SHDW_SCR5_LPED |
1720 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1721 MII_TG3_MISC_SHDW_SCR5_SDTL |
1722 MII_TG3_MISC_SHDW_SCR5_C125OE;
1723 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1724 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1726 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1729 reg = MII_TG3_MISC_SHDW_WREN |
1730 MII_TG3_MISC_SHDW_APD_SEL |
1731 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1732 if (enable)
1733 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1735 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1738 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1740 u32 phy;
1742 if (!tg3_flag(tp, 5705_PLUS) ||
1743 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1744 return;
1746 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1747 u32 ephy;
1749 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1750 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1752 tg3_writephy(tp, MII_TG3_FET_TEST,
1753 ephy | MII_TG3_FET_SHADOW_EN);
1754 if (!tg3_readphy(tp, reg, &phy)) {
1755 if (enable)
1756 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1757 else
1758 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1759 tg3_writephy(tp, reg, phy);
1761 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1763 } else {
1764 int ret;
1766 ret = tg3_phy_auxctl_read(tp,
1767 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1768 if (!ret) {
1769 if (enable)
1770 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1771 else
1772 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1773 tg3_phy_auxctl_write(tp,
1774 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1779 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1781 int ret;
1782 u32 val;
1784 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1785 return;
1787 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1788 if (!ret)
1789 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1790 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1793 static void tg3_phy_apply_otp(struct tg3 *tp)
1795 u32 otp, phy;
1797 if (!tp->phy_otp)
1798 return;
1800 otp = tp->phy_otp;
1802 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1803 return;
1805 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1806 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1807 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1809 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1810 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1811 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1813 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1814 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1815 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1817 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1818 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1820 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1821 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1823 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1824 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1825 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1827 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1830 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1832 u32 val;
1834 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1835 return;
1837 tp->setlpicnt = 0;
1839 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1840 current_link_up == 1 &&
1841 tp->link_config.active_duplex == DUPLEX_FULL &&
1842 (tp->link_config.active_speed == SPEED_100 ||
1843 tp->link_config.active_speed == SPEED_1000)) {
1844 u32 eeectl;
1846 if (tp->link_config.active_speed == SPEED_1000)
1847 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1848 else
1849 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1851 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1853 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1854 TG3_CL45_D7_EEERES_STAT, &val);
1856 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1857 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1858 tp->setlpicnt = 2;
1861 if (!tp->setlpicnt) {
1862 if (current_link_up == 1 &&
1863 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1864 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1865 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1868 val = tr32(TG3_CPMU_EEE_MODE);
1869 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1873 static void tg3_phy_eee_enable(struct tg3 *tp)
1875 u32 val;
1877 if (tp->link_config.active_speed == SPEED_1000 &&
1878 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1880 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1881 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1882 val = MII_TG3_DSP_TAP26_ALNOKO |
1883 MII_TG3_DSP_TAP26_RMRXSTO;
1884 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1885 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1888 val = tr32(TG3_CPMU_EEE_MODE);
1889 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1892 static int tg3_wait_macro_done(struct tg3 *tp)
1894 int limit = 100;
1896 while (limit--) {
1897 u32 tmp32;
1899 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1900 if ((tmp32 & 0x1000) == 0)
1901 break;
1904 if (limit < 0)
1905 return -EBUSY;
1907 return 0;
1910 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1912 static const u32 test_pat[4][6] = {
1913 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1914 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1915 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1916 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1918 int chan;
1920 for (chan = 0; chan < 4; chan++) {
1921 int i;
1923 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1924 (chan * 0x2000) | 0x0200);
1925 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1927 for (i = 0; i < 6; i++)
1928 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1929 test_pat[chan][i]);
1931 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1932 if (tg3_wait_macro_done(tp)) {
1933 *resetp = 1;
1934 return -EBUSY;
1937 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1938 (chan * 0x2000) | 0x0200);
1939 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1940 if (tg3_wait_macro_done(tp)) {
1941 *resetp = 1;
1942 return -EBUSY;
1945 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1946 if (tg3_wait_macro_done(tp)) {
1947 *resetp = 1;
1948 return -EBUSY;
1951 for (i = 0; i < 6; i += 2) {
1952 u32 low, high;
1954 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1955 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1956 tg3_wait_macro_done(tp)) {
1957 *resetp = 1;
1958 return -EBUSY;
1960 low &= 0x7fff;
1961 high &= 0x000f;
1962 if (low != test_pat[chan][i] ||
1963 high != test_pat[chan][i+1]) {
1964 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1965 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1966 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1968 return -EBUSY;
1973 return 0;
1976 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1978 int chan;
1980 for (chan = 0; chan < 4; chan++) {
1981 int i;
1983 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1984 (chan * 0x2000) | 0x0200);
1985 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1986 for (i = 0; i < 6; i++)
1987 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1988 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1989 if (tg3_wait_macro_done(tp))
1990 return -EBUSY;
1993 return 0;
1996 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1998 u32 reg32, phy9_orig;
1999 int retries, do_phy_reset, err;
2001 retries = 10;
2002 do_phy_reset = 1;
2003 do {
2004 if (do_phy_reset) {
2005 err = tg3_bmcr_reset(tp);
2006 if (err)
2007 return err;
2008 do_phy_reset = 0;
2011 /* Disable transmitter and interrupt. */
2012 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2013 continue;
2015 reg32 |= 0x3000;
2016 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2018 /* Set full-duplex, 1000 mbps. */
2019 tg3_writephy(tp, MII_BMCR,
2020 BMCR_FULLDPLX | BMCR_SPEED1000);
2022 /* Set to master mode. */
2023 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2024 continue;
2026 tg3_writephy(tp, MII_CTRL1000,
2027 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2029 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2030 if (err)
2031 return err;
2033 /* Block the PHY control access. */
2034 tg3_phydsp_write(tp, 0x8005, 0x0800);
2036 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2037 if (!err)
2038 break;
2039 } while (--retries);
2041 err = tg3_phy_reset_chanpat(tp);
2042 if (err)
2043 return err;
2045 tg3_phydsp_write(tp, 0x8005, 0x0000);
2047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2048 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2050 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2052 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2054 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2055 reg32 &= ~0x3000;
2056 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2057 } else if (!err)
2058 err = -EBUSY;
2060 return err;
2063 /* This will reset the tigon3 PHY if there is no valid
2064 * link unless the FORCE argument is non-zero.
2066 static int tg3_phy_reset(struct tg3 *tp)
2068 u32 val, cpmuctrl;
2069 int err;
2071 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2072 val = tr32(GRC_MISC_CFG);
2073 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2074 udelay(40);
2076 err = tg3_readphy(tp, MII_BMSR, &val);
2077 err |= tg3_readphy(tp, MII_BMSR, &val);
2078 if (err != 0)
2079 return -EBUSY;
2081 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2082 netif_carrier_off(tp->dev);
2083 tg3_link_report(tp);
2086 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2087 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2088 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2089 err = tg3_phy_reset_5703_4_5(tp);
2090 if (err)
2091 return err;
2092 goto out;
2095 cpmuctrl = 0;
2096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2097 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2098 cpmuctrl = tr32(TG3_CPMU_CTRL);
2099 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2100 tw32(TG3_CPMU_CTRL,
2101 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2104 err = tg3_bmcr_reset(tp);
2105 if (err)
2106 return err;
2108 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2109 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2110 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2112 tw32(TG3_CPMU_CTRL, cpmuctrl);
2115 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2116 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2117 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2118 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2119 CPMU_LSPD_1000MB_MACCLK_12_5) {
2120 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2121 udelay(40);
2122 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2126 if (tg3_flag(tp, 5717_PLUS) &&
2127 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2128 return 0;
2130 tg3_phy_apply_otp(tp);
2132 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2133 tg3_phy_toggle_apd(tp, true);
2134 else
2135 tg3_phy_toggle_apd(tp, false);
2137 out:
2138 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2139 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2140 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2141 tg3_phydsp_write(tp, 0x000a, 0x0323);
2142 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2145 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2146 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2147 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2150 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2151 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2152 tg3_phydsp_write(tp, 0x000a, 0x310b);
2153 tg3_phydsp_write(tp, 0x201f, 0x9506);
2154 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2155 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2157 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2158 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2159 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2160 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2161 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2162 tg3_writephy(tp, MII_TG3_TEST1,
2163 MII_TG3_TEST1_TRIM_EN | 0x4);
2164 } else
2165 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2167 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2171 /* Set Extended packet length bit (bit 14) on all chips that */
2172 /* support jumbo frames */
2173 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2174 /* Cannot do read-modify-write on 5401 */
2175 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2176 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2177 /* Set bit 14 with read-modify-write to preserve other bits */
2178 err = tg3_phy_auxctl_read(tp,
2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2180 if (!err)
2181 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2182 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2185 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2186 * jumbo frames transmission.
2188 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2189 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2190 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2191 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2194 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2195 /* adjust output voltage */
2196 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2199 tg3_phy_toggle_automdix(tp, 1);
2200 tg3_phy_set_wirespeed(tp);
2201 return 0;
2204 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2205 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2206 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2207 TG3_GPIO_MSG_NEED_VAUX)
2208 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2209 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2210 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2211 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2212 (TG3_GPIO_MSG_DRVR_PRES << 12))
2214 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2215 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2216 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2217 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2218 (TG3_GPIO_MSG_NEED_VAUX << 12))
2220 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2222 u32 status, shift;
2224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2225 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2226 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2227 else
2228 status = tr32(TG3_CPMU_DRV_STATUS);
2230 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2231 status &= ~(TG3_GPIO_MSG_MASK << shift);
2232 status |= (newstat << shift);
2234 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2235 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2236 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2237 else
2238 tw32(TG3_CPMU_DRV_STATUS, status);
2240 return status >> TG3_APE_GPIO_MSG_SHIFT;
2243 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2245 if (!tg3_flag(tp, IS_NIC))
2246 return 0;
2248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2249 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2251 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2252 return -EIO;
2254 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2256 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2257 TG3_GRC_LCLCTL_PWRSW_DELAY);
2259 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2260 } else {
2261 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2262 TG3_GRC_LCLCTL_PWRSW_DELAY);
2265 return 0;
2268 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2270 u32 grc_local_ctrl;
2272 if (!tg3_flag(tp, IS_NIC) ||
2273 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2274 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2275 return;
2277 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2279 tw32_wait_f(GRC_LOCAL_CTRL,
2280 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2281 TG3_GRC_LCLCTL_PWRSW_DELAY);
2283 tw32_wait_f(GRC_LOCAL_CTRL,
2284 grc_local_ctrl,
2285 TG3_GRC_LCLCTL_PWRSW_DELAY);
2287 tw32_wait_f(GRC_LOCAL_CTRL,
2288 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2289 TG3_GRC_LCLCTL_PWRSW_DELAY);
2292 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2294 if (!tg3_flag(tp, IS_NIC))
2295 return;
2297 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2298 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2299 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2300 (GRC_LCLCTRL_GPIO_OE0 |
2301 GRC_LCLCTRL_GPIO_OE1 |
2302 GRC_LCLCTRL_GPIO_OE2 |
2303 GRC_LCLCTRL_GPIO_OUTPUT0 |
2304 GRC_LCLCTRL_GPIO_OUTPUT1),
2305 TG3_GRC_LCLCTL_PWRSW_DELAY);
2306 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2307 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2308 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2309 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2310 GRC_LCLCTRL_GPIO_OE1 |
2311 GRC_LCLCTRL_GPIO_OE2 |
2312 GRC_LCLCTRL_GPIO_OUTPUT0 |
2313 GRC_LCLCTRL_GPIO_OUTPUT1 |
2314 tp->grc_local_ctrl;
2315 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2316 TG3_GRC_LCLCTL_PWRSW_DELAY);
2318 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2319 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2320 TG3_GRC_LCLCTL_PWRSW_DELAY);
2322 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2323 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2324 TG3_GRC_LCLCTL_PWRSW_DELAY);
2325 } else {
2326 u32 no_gpio2;
2327 u32 grc_local_ctrl = 0;
2329 /* Workaround to prevent overdrawing Amps. */
2330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2331 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2332 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2333 grc_local_ctrl,
2334 TG3_GRC_LCLCTL_PWRSW_DELAY);
2337 /* On 5753 and variants, GPIO2 cannot be used. */
2338 no_gpio2 = tp->nic_sram_data_cfg &
2339 NIC_SRAM_DATA_CFG_NO_GPIO2;
2341 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2342 GRC_LCLCTRL_GPIO_OE1 |
2343 GRC_LCLCTRL_GPIO_OE2 |
2344 GRC_LCLCTRL_GPIO_OUTPUT1 |
2345 GRC_LCLCTRL_GPIO_OUTPUT2;
2346 if (no_gpio2) {
2347 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2348 GRC_LCLCTRL_GPIO_OUTPUT2);
2350 tw32_wait_f(GRC_LOCAL_CTRL,
2351 tp->grc_local_ctrl | grc_local_ctrl,
2352 TG3_GRC_LCLCTL_PWRSW_DELAY);
2354 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2356 tw32_wait_f(GRC_LOCAL_CTRL,
2357 tp->grc_local_ctrl | grc_local_ctrl,
2358 TG3_GRC_LCLCTL_PWRSW_DELAY);
2360 if (!no_gpio2) {
2361 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2362 tw32_wait_f(GRC_LOCAL_CTRL,
2363 tp->grc_local_ctrl | grc_local_ctrl,
2364 TG3_GRC_LCLCTL_PWRSW_DELAY);
2369 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2371 u32 msg = 0;
2373 /* Serialize power state transitions */
2374 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2375 return;
2377 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2378 msg = TG3_GPIO_MSG_NEED_VAUX;
2380 msg = tg3_set_function_status(tp, msg);
2382 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2383 goto done;
2385 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2386 tg3_pwrsrc_switch_to_vaux(tp);
2387 else
2388 tg3_pwrsrc_die_with_vmain(tp);
2390 done:
2391 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2394 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2396 bool need_vaux = false;
2398 /* The GPIOs do something completely different on 57765. */
2399 if (!tg3_flag(tp, IS_NIC) ||
2400 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2401 return;
2403 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2405 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2406 tg3_frob_aux_power_5717(tp, include_wol ?
2407 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2408 return;
2411 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2412 struct net_device *dev_peer;
2414 dev_peer = pci_get_drvdata(tp->pdev_peer);
2416 /* remove_one() may have been run on the peer. */
2417 if (dev_peer) {
2418 struct tg3 *tp_peer = netdev_priv(dev_peer);
2420 if (tg3_flag(tp_peer, INIT_COMPLETE))
2421 return;
2423 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2424 tg3_flag(tp_peer, ENABLE_ASF))
2425 need_vaux = true;
2429 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2430 tg3_flag(tp, ENABLE_ASF))
2431 need_vaux = true;
2433 if (need_vaux)
2434 tg3_pwrsrc_switch_to_vaux(tp);
2435 else
2436 tg3_pwrsrc_die_with_vmain(tp);
2439 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2441 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2442 return 1;
2443 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2444 if (speed != SPEED_10)
2445 return 1;
2446 } else if (speed == SPEED_10)
2447 return 1;
2449 return 0;
2452 static int tg3_setup_phy(struct tg3 *, int);
2454 #define RESET_KIND_SHUTDOWN 0
2455 #define RESET_KIND_INIT 1
2456 #define RESET_KIND_SUSPEND 2
2458 static void tg3_write_sig_post_reset(struct tg3 *, int);
2459 static int tg3_halt_cpu(struct tg3 *, u32);
2461 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2463 u32 val;
2465 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2467 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2468 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2470 sg_dig_ctrl |=
2471 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2472 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2473 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2475 return;
2478 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2479 tg3_bmcr_reset(tp);
2480 val = tr32(GRC_MISC_CFG);
2481 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2482 udelay(40);
2483 return;
2484 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2485 u32 phytest;
2486 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2487 u32 phy;
2489 tg3_writephy(tp, MII_ADVERTISE, 0);
2490 tg3_writephy(tp, MII_BMCR,
2491 BMCR_ANENABLE | BMCR_ANRESTART);
2493 tg3_writephy(tp, MII_TG3_FET_TEST,
2494 phytest | MII_TG3_FET_SHADOW_EN);
2495 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2496 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2497 tg3_writephy(tp,
2498 MII_TG3_FET_SHDW_AUXMODE4,
2499 phy);
2501 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2503 return;
2504 } else if (do_low_power) {
2505 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2506 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2508 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2509 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2510 MII_TG3_AUXCTL_PCTL_VREG_11V;
2511 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2514 /* The PHY should not be powered down on some chips because
2515 * of bugs.
2517 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2518 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2519 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2520 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2521 return;
2523 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2524 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2525 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2526 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2527 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2528 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2531 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2534 /* tp->lock is held. */
2535 static int tg3_nvram_lock(struct tg3 *tp)
2537 if (tg3_flag(tp, NVRAM)) {
2538 int i;
2540 if (tp->nvram_lock_cnt == 0) {
2541 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2542 for (i = 0; i < 8000; i++) {
2543 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2544 break;
2545 udelay(20);
2547 if (i == 8000) {
2548 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2549 return -ENODEV;
2552 tp->nvram_lock_cnt++;
2554 return 0;
2557 /* tp->lock is held. */
2558 static void tg3_nvram_unlock(struct tg3 *tp)
2560 if (tg3_flag(tp, NVRAM)) {
2561 if (tp->nvram_lock_cnt > 0)
2562 tp->nvram_lock_cnt--;
2563 if (tp->nvram_lock_cnt == 0)
2564 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2568 /* tp->lock is held. */
2569 static void tg3_enable_nvram_access(struct tg3 *tp)
2571 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2572 u32 nvaccess = tr32(NVRAM_ACCESS);
2574 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2578 /* tp->lock is held. */
2579 static void tg3_disable_nvram_access(struct tg3 *tp)
2581 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2582 u32 nvaccess = tr32(NVRAM_ACCESS);
2584 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2588 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2589 u32 offset, u32 *val)
2591 u32 tmp;
2592 int i;
2594 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2595 return -EINVAL;
2597 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2598 EEPROM_ADDR_DEVID_MASK |
2599 EEPROM_ADDR_READ);
2600 tw32(GRC_EEPROM_ADDR,
2601 tmp |
2602 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2603 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2604 EEPROM_ADDR_ADDR_MASK) |
2605 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2607 for (i = 0; i < 1000; i++) {
2608 tmp = tr32(GRC_EEPROM_ADDR);
2610 if (tmp & EEPROM_ADDR_COMPLETE)
2611 break;
2612 msleep(1);
2614 if (!(tmp & EEPROM_ADDR_COMPLETE))
2615 return -EBUSY;
2617 tmp = tr32(GRC_EEPROM_DATA);
2620 * The data will always be opposite the native endian
2621 * format. Perform a blind byteswap to compensate.
2623 *val = swab32(tmp);
2625 return 0;
2628 #define NVRAM_CMD_TIMEOUT 10000
2630 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2632 int i;
2634 tw32(NVRAM_CMD, nvram_cmd);
2635 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2636 udelay(10);
2637 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2638 udelay(10);
2639 break;
2643 if (i == NVRAM_CMD_TIMEOUT)
2644 return -EBUSY;
2646 return 0;
2649 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2651 if (tg3_flag(tp, NVRAM) &&
2652 tg3_flag(tp, NVRAM_BUFFERED) &&
2653 tg3_flag(tp, FLASH) &&
2654 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2655 (tp->nvram_jedecnum == JEDEC_ATMEL))
2657 addr = ((addr / tp->nvram_pagesize) <<
2658 ATMEL_AT45DB0X1B_PAGE_POS) +
2659 (addr % tp->nvram_pagesize);
2661 return addr;
2664 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2666 if (tg3_flag(tp, NVRAM) &&
2667 tg3_flag(tp, NVRAM_BUFFERED) &&
2668 tg3_flag(tp, FLASH) &&
2669 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2670 (tp->nvram_jedecnum == JEDEC_ATMEL))
2672 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2673 tp->nvram_pagesize) +
2674 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2676 return addr;
2679 /* NOTE: Data read in from NVRAM is byteswapped according to
2680 * the byteswapping settings for all other register accesses.
2681 * tg3 devices are BE devices, so on a BE machine, the data
2682 * returned will be exactly as it is seen in NVRAM. On a LE
2683 * machine, the 32-bit value will be byteswapped.
2685 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2687 int ret;
2689 if (!tg3_flag(tp, NVRAM))
2690 return tg3_nvram_read_using_eeprom(tp, offset, val);
2692 offset = tg3_nvram_phys_addr(tp, offset);
2694 if (offset > NVRAM_ADDR_MSK)
2695 return -EINVAL;
2697 ret = tg3_nvram_lock(tp);
2698 if (ret)
2699 return ret;
2701 tg3_enable_nvram_access(tp);
2703 tw32(NVRAM_ADDR, offset);
2704 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2705 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2707 if (ret == 0)
2708 *val = tr32(NVRAM_RDDATA);
2710 tg3_disable_nvram_access(tp);
2712 tg3_nvram_unlock(tp);
2714 return ret;
2717 /* Ensures NVRAM data is in bytestream format. */
2718 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2720 u32 v;
2721 int res = tg3_nvram_read(tp, offset, &v);
2722 if (!res)
2723 *val = cpu_to_be32(v);
2724 return res;
2727 /* tp->lock is held. */
2728 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2730 u32 addr_high, addr_low;
2731 int i;
2733 addr_high = ((tp->dev->dev_addr[0] << 8) |
2734 tp->dev->dev_addr[1]);
2735 addr_low = ((tp->dev->dev_addr[2] << 24) |
2736 (tp->dev->dev_addr[3] << 16) |
2737 (tp->dev->dev_addr[4] << 8) |
2738 (tp->dev->dev_addr[5] << 0));
2739 for (i = 0; i < 4; i++) {
2740 if (i == 1 && skip_mac_1)
2741 continue;
2742 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2743 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2747 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2748 for (i = 0; i < 12; i++) {
2749 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2750 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2754 addr_high = (tp->dev->dev_addr[0] +
2755 tp->dev->dev_addr[1] +
2756 tp->dev->dev_addr[2] +
2757 tp->dev->dev_addr[3] +
2758 tp->dev->dev_addr[4] +
2759 tp->dev->dev_addr[5]) &
2760 TX_BACKOFF_SEED_MASK;
2761 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2764 static void tg3_enable_register_access(struct tg3 *tp)
2767 * Make sure register accesses (indirect or otherwise) will function
2768 * correctly.
2770 pci_write_config_dword(tp->pdev,
2771 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2774 static int tg3_power_up(struct tg3 *tp)
2776 int err;
2778 tg3_enable_register_access(tp);
2780 err = pci_set_power_state(tp->pdev, PCI_D0);
2781 if (!err) {
2782 /* Switch out of Vaux if it is a NIC */
2783 tg3_pwrsrc_switch_to_vmain(tp);
2784 } else {
2785 netdev_err(tp->dev, "Transition to D0 failed\n");
2788 return err;
2791 static int tg3_power_down_prepare(struct tg3 *tp)
2793 u32 misc_host_ctrl;
2794 bool device_should_wake, do_low_power;
2796 tg3_enable_register_access(tp);
2798 /* Restore the CLKREQ setting. */
2799 if (tg3_flag(tp, CLKREQ_BUG)) {
2800 u16 lnkctl;
2802 pci_read_config_word(tp->pdev,
2803 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2804 &lnkctl);
2805 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2806 pci_write_config_word(tp->pdev,
2807 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2808 lnkctl);
2811 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2812 tw32(TG3PCI_MISC_HOST_CTRL,
2813 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2815 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2816 tg3_flag(tp, WOL_ENABLE);
2818 if (tg3_flag(tp, USE_PHYLIB)) {
2819 do_low_power = false;
2820 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2821 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2822 struct phy_device *phydev;
2823 u32 phyid, advertising;
2825 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2827 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2829 tp->link_config.orig_speed = phydev->speed;
2830 tp->link_config.orig_duplex = phydev->duplex;
2831 tp->link_config.orig_autoneg = phydev->autoneg;
2832 tp->link_config.orig_advertising = phydev->advertising;
2834 advertising = ADVERTISED_TP |
2835 ADVERTISED_Pause |
2836 ADVERTISED_Autoneg |
2837 ADVERTISED_10baseT_Half;
2839 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2840 if (tg3_flag(tp, WOL_SPEED_100MB))
2841 advertising |=
2842 ADVERTISED_100baseT_Half |
2843 ADVERTISED_100baseT_Full |
2844 ADVERTISED_10baseT_Full;
2845 else
2846 advertising |= ADVERTISED_10baseT_Full;
2849 phydev->advertising = advertising;
2851 phy_start_aneg(phydev);
2853 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2854 if (phyid != PHY_ID_BCMAC131) {
2855 phyid &= PHY_BCM_OUI_MASK;
2856 if (phyid == PHY_BCM_OUI_1 ||
2857 phyid == PHY_BCM_OUI_2 ||
2858 phyid == PHY_BCM_OUI_3)
2859 do_low_power = true;
2862 } else {
2863 do_low_power = true;
2865 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2866 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2867 tp->link_config.orig_speed = tp->link_config.speed;
2868 tp->link_config.orig_duplex = tp->link_config.duplex;
2869 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2872 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2873 tp->link_config.speed = SPEED_10;
2874 tp->link_config.duplex = DUPLEX_HALF;
2875 tp->link_config.autoneg = AUTONEG_ENABLE;
2876 tg3_setup_phy(tp, 0);
2880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2881 u32 val;
2883 val = tr32(GRC_VCPU_EXT_CTRL);
2884 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2885 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2886 int i;
2887 u32 val;
2889 for (i = 0; i < 200; i++) {
2890 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2891 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2892 break;
2893 msleep(1);
2896 if (tg3_flag(tp, WOL_CAP))
2897 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2898 WOL_DRV_STATE_SHUTDOWN |
2899 WOL_DRV_WOL |
2900 WOL_SET_MAGIC_PKT);
2902 if (device_should_wake) {
2903 u32 mac_mode;
2905 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2906 if (do_low_power &&
2907 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2908 tg3_phy_auxctl_write(tp,
2909 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2910 MII_TG3_AUXCTL_PCTL_WOL_EN |
2911 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2912 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2913 udelay(40);
2916 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2917 mac_mode = MAC_MODE_PORT_MODE_GMII;
2918 else
2919 mac_mode = MAC_MODE_PORT_MODE_MII;
2921 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2922 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2923 ASIC_REV_5700) {
2924 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2925 SPEED_100 : SPEED_10;
2926 if (tg3_5700_link_polarity(tp, speed))
2927 mac_mode |= MAC_MODE_LINK_POLARITY;
2928 else
2929 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2931 } else {
2932 mac_mode = MAC_MODE_PORT_MODE_TBI;
2935 if (!tg3_flag(tp, 5750_PLUS))
2936 tw32(MAC_LED_CTRL, tp->led_ctrl);
2938 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2939 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2940 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2941 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2943 if (tg3_flag(tp, ENABLE_APE))
2944 mac_mode |= MAC_MODE_APE_TX_EN |
2945 MAC_MODE_APE_RX_EN |
2946 MAC_MODE_TDE_ENABLE;
2948 tw32_f(MAC_MODE, mac_mode);
2949 udelay(100);
2951 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2952 udelay(10);
2955 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2956 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2958 u32 base_val;
2960 base_val = tp->pci_clock_ctrl;
2961 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2962 CLOCK_CTRL_TXCLK_DISABLE);
2964 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2965 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2966 } else if (tg3_flag(tp, 5780_CLASS) ||
2967 tg3_flag(tp, CPMU_PRESENT) ||
2968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2969 /* do nothing */
2970 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2971 u32 newbits1, newbits2;
2973 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2974 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2975 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2976 CLOCK_CTRL_TXCLK_DISABLE |
2977 CLOCK_CTRL_ALTCLK);
2978 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2979 } else if (tg3_flag(tp, 5705_PLUS)) {
2980 newbits1 = CLOCK_CTRL_625_CORE;
2981 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2982 } else {
2983 newbits1 = CLOCK_CTRL_ALTCLK;
2984 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2987 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2988 40);
2990 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2991 40);
2993 if (!tg3_flag(tp, 5705_PLUS)) {
2994 u32 newbits3;
2996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2997 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2998 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2999 CLOCK_CTRL_TXCLK_DISABLE |
3000 CLOCK_CTRL_44MHZ_CORE);
3001 } else {
3002 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3005 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3006 tp->pci_clock_ctrl | newbits3, 40);
3010 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3011 tg3_power_down_phy(tp, do_low_power);
3013 tg3_frob_aux_power(tp, true);
3015 /* Workaround for unstable PLL clock */
3016 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3017 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3018 u32 val = tr32(0x7d00);
3020 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3021 tw32(0x7d00, val);
3022 if (!tg3_flag(tp, ENABLE_ASF)) {
3023 int err;
3025 err = tg3_nvram_lock(tp);
3026 tg3_halt_cpu(tp, RX_CPU_BASE);
3027 if (!err)
3028 tg3_nvram_unlock(tp);
3032 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3034 return 0;
3037 static void tg3_power_down(struct tg3 *tp)
3039 tg3_power_down_prepare(tp);
3041 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3042 pci_set_power_state(tp->pdev, PCI_D3hot);
3045 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3047 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3048 case MII_TG3_AUX_STAT_10HALF:
3049 *speed = SPEED_10;
3050 *duplex = DUPLEX_HALF;
3051 break;
3053 case MII_TG3_AUX_STAT_10FULL:
3054 *speed = SPEED_10;
3055 *duplex = DUPLEX_FULL;
3056 break;
3058 case MII_TG3_AUX_STAT_100HALF:
3059 *speed = SPEED_100;
3060 *duplex = DUPLEX_HALF;
3061 break;
3063 case MII_TG3_AUX_STAT_100FULL:
3064 *speed = SPEED_100;
3065 *duplex = DUPLEX_FULL;
3066 break;
3068 case MII_TG3_AUX_STAT_1000HALF:
3069 *speed = SPEED_1000;
3070 *duplex = DUPLEX_HALF;
3071 break;
3073 case MII_TG3_AUX_STAT_1000FULL:
3074 *speed = SPEED_1000;
3075 *duplex = DUPLEX_FULL;
3076 break;
3078 default:
3079 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3080 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3081 SPEED_10;
3082 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3083 DUPLEX_HALF;
3084 break;
3086 *speed = SPEED_INVALID;
3087 *duplex = DUPLEX_INVALID;
3088 break;
3092 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3094 int err = 0;
3095 u32 val, new_adv;
3097 new_adv = ADVERTISE_CSMA;
3098 if (advertise & ADVERTISED_10baseT_Half)
3099 new_adv |= ADVERTISE_10HALF;
3100 if (advertise & ADVERTISED_10baseT_Full)
3101 new_adv |= ADVERTISE_10FULL;
3102 if (advertise & ADVERTISED_100baseT_Half)
3103 new_adv |= ADVERTISE_100HALF;
3104 if (advertise & ADVERTISED_100baseT_Full)
3105 new_adv |= ADVERTISE_100FULL;
3107 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3109 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3110 if (err)
3111 goto done;
3113 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3114 goto done;
3116 new_adv = 0;
3117 if (advertise & ADVERTISED_1000baseT_Half)
3118 new_adv |= ADVERTISE_1000HALF;
3119 if (advertise & ADVERTISED_1000baseT_Full)
3120 new_adv |= ADVERTISE_1000FULL;
3122 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3123 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3124 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3126 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3127 if (err)
3128 goto done;
3130 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3131 goto done;
3133 tw32(TG3_CPMU_EEE_MODE,
3134 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3136 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3137 if (!err) {
3138 u32 err2;
3140 val = 0;
3141 /* Advertise 100-BaseTX EEE ability */
3142 if (advertise & ADVERTISED_100baseT_Full)
3143 val |= MDIO_AN_EEE_ADV_100TX;
3144 /* Advertise 1000-BaseT EEE ability */
3145 if (advertise & ADVERTISED_1000baseT_Full)
3146 val |= MDIO_AN_EEE_ADV_1000T;
3147 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3148 if (err)
3149 val = 0;
3151 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3152 case ASIC_REV_5717:
3153 case ASIC_REV_57765:
3154 case ASIC_REV_5719:
3155 /* If we advertised any eee advertisements above... */
3156 if (val)
3157 val = MII_TG3_DSP_TAP26_ALNOKO |
3158 MII_TG3_DSP_TAP26_RMRXSTO |
3159 MII_TG3_DSP_TAP26_OPCSINPT;
3160 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3161 /* Fall through */
3162 case ASIC_REV_5720:
3163 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3164 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3165 MII_TG3_DSP_CH34TP2_HIBW01);
3168 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3169 if (!err)
3170 err = err2;
3173 done:
3174 return err;
3177 static void tg3_phy_copper_begin(struct tg3 *tp)
3179 u32 new_adv;
3180 int i;
3182 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3183 new_adv = ADVERTISED_10baseT_Half |
3184 ADVERTISED_10baseT_Full;
3185 if (tg3_flag(tp, WOL_SPEED_100MB))
3186 new_adv |= ADVERTISED_100baseT_Half |
3187 ADVERTISED_100baseT_Full;
3189 tg3_phy_autoneg_cfg(tp, new_adv,
3190 FLOW_CTRL_TX | FLOW_CTRL_RX);
3191 } else if (tp->link_config.speed == SPEED_INVALID) {
3192 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3193 tp->link_config.advertising &=
3194 ~(ADVERTISED_1000baseT_Half |
3195 ADVERTISED_1000baseT_Full);
3197 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3198 tp->link_config.flowctrl);
3199 } else {
3200 /* Asking for a specific link mode. */
3201 if (tp->link_config.speed == SPEED_1000) {
3202 if (tp->link_config.duplex == DUPLEX_FULL)
3203 new_adv = ADVERTISED_1000baseT_Full;
3204 else
3205 new_adv = ADVERTISED_1000baseT_Half;
3206 } else if (tp->link_config.speed == SPEED_100) {
3207 if (tp->link_config.duplex == DUPLEX_FULL)
3208 new_adv = ADVERTISED_100baseT_Full;
3209 else
3210 new_adv = ADVERTISED_100baseT_Half;
3211 } else {
3212 if (tp->link_config.duplex == DUPLEX_FULL)
3213 new_adv = ADVERTISED_10baseT_Full;
3214 else
3215 new_adv = ADVERTISED_10baseT_Half;
3218 tg3_phy_autoneg_cfg(tp, new_adv,
3219 tp->link_config.flowctrl);
3222 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3223 tp->link_config.speed != SPEED_INVALID) {
3224 u32 bmcr, orig_bmcr;
3226 tp->link_config.active_speed = tp->link_config.speed;
3227 tp->link_config.active_duplex = tp->link_config.duplex;
3229 bmcr = 0;
3230 switch (tp->link_config.speed) {
3231 default:
3232 case SPEED_10:
3233 break;
3235 case SPEED_100:
3236 bmcr |= BMCR_SPEED100;
3237 break;
3239 case SPEED_1000:
3240 bmcr |= BMCR_SPEED1000;
3241 break;
3244 if (tp->link_config.duplex == DUPLEX_FULL)
3245 bmcr |= BMCR_FULLDPLX;
3247 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3248 (bmcr != orig_bmcr)) {
3249 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3250 for (i = 0; i < 1500; i++) {
3251 u32 tmp;
3253 udelay(10);
3254 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3255 tg3_readphy(tp, MII_BMSR, &tmp))
3256 continue;
3257 if (!(tmp & BMSR_LSTATUS)) {
3258 udelay(40);
3259 break;
3262 tg3_writephy(tp, MII_BMCR, bmcr);
3263 udelay(40);
3265 } else {
3266 tg3_writephy(tp, MII_BMCR,
3267 BMCR_ANENABLE | BMCR_ANRESTART);
3271 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3273 int err;
3275 /* Turn off tap power management. */
3276 /* Set Extended packet length bit */
3277 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3279 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3280 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3281 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3282 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3283 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3285 udelay(40);
3287 return err;
3290 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3292 u32 adv_reg, all_mask = 0;
3294 if (mask & ADVERTISED_10baseT_Half)
3295 all_mask |= ADVERTISE_10HALF;
3296 if (mask & ADVERTISED_10baseT_Full)
3297 all_mask |= ADVERTISE_10FULL;
3298 if (mask & ADVERTISED_100baseT_Half)
3299 all_mask |= ADVERTISE_100HALF;
3300 if (mask & ADVERTISED_100baseT_Full)
3301 all_mask |= ADVERTISE_100FULL;
3303 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3304 return 0;
3306 if ((adv_reg & all_mask) != all_mask)
3307 return 0;
3308 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3309 u32 tg3_ctrl;
3311 all_mask = 0;
3312 if (mask & ADVERTISED_1000baseT_Half)
3313 all_mask |= ADVERTISE_1000HALF;
3314 if (mask & ADVERTISED_1000baseT_Full)
3315 all_mask |= ADVERTISE_1000FULL;
3317 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3318 return 0;
3320 if ((tg3_ctrl & all_mask) != all_mask)
3321 return 0;
3323 return 1;
3326 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3328 u32 curadv, reqadv;
3330 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3331 return 1;
3333 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3334 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3336 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3337 if (curadv != reqadv)
3338 return 0;
3340 if (tg3_flag(tp, PAUSE_AUTONEG))
3341 tg3_readphy(tp, MII_LPA, rmtadv);
3342 } else {
3343 /* Reprogram the advertisement register, even if it
3344 * does not affect the current link. If the link
3345 * gets renegotiated in the future, we can save an
3346 * additional renegotiation cycle by advertising
3347 * it correctly in the first place.
3349 if (curadv != reqadv) {
3350 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3351 ADVERTISE_PAUSE_ASYM);
3352 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3356 return 1;
3359 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3361 int current_link_up;
3362 u32 bmsr, val;
3363 u32 lcl_adv, rmt_adv;
3364 u16 current_speed;
3365 u8 current_duplex;
3366 int i, err;
3368 tw32(MAC_EVENT, 0);
3370 tw32_f(MAC_STATUS,
3371 (MAC_STATUS_SYNC_CHANGED |
3372 MAC_STATUS_CFG_CHANGED |
3373 MAC_STATUS_MI_COMPLETION |
3374 MAC_STATUS_LNKSTATE_CHANGED));
3375 udelay(40);
3377 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3378 tw32_f(MAC_MI_MODE,
3379 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3380 udelay(80);
3383 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3385 /* Some third-party PHYs need to be reset on link going
3386 * down.
3388 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3390 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3391 netif_carrier_ok(tp->dev)) {
3392 tg3_readphy(tp, MII_BMSR, &bmsr);
3393 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3394 !(bmsr & BMSR_LSTATUS))
3395 force_reset = 1;
3397 if (force_reset)
3398 tg3_phy_reset(tp);
3400 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3401 tg3_readphy(tp, MII_BMSR, &bmsr);
3402 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3403 !tg3_flag(tp, INIT_COMPLETE))
3404 bmsr = 0;
3406 if (!(bmsr & BMSR_LSTATUS)) {
3407 err = tg3_init_5401phy_dsp(tp);
3408 if (err)
3409 return err;
3411 tg3_readphy(tp, MII_BMSR, &bmsr);
3412 for (i = 0; i < 1000; i++) {
3413 udelay(10);
3414 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3415 (bmsr & BMSR_LSTATUS)) {
3416 udelay(40);
3417 break;
3421 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3422 TG3_PHY_REV_BCM5401_B0 &&
3423 !(bmsr & BMSR_LSTATUS) &&
3424 tp->link_config.active_speed == SPEED_1000) {
3425 err = tg3_phy_reset(tp);
3426 if (!err)
3427 err = tg3_init_5401phy_dsp(tp);
3428 if (err)
3429 return err;
3432 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3433 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3434 /* 5701 {A0,B0} CRC bug workaround */
3435 tg3_writephy(tp, 0x15, 0x0a75);
3436 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3437 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3438 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3441 /* Clear pending interrupts... */
3442 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3443 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3445 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3446 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3447 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3448 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3450 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3452 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3453 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3454 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3455 else
3456 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3459 current_link_up = 0;
3460 current_speed = SPEED_INVALID;
3461 current_duplex = DUPLEX_INVALID;
3463 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3464 err = tg3_phy_auxctl_read(tp,
3465 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3466 &val);
3467 if (!err && !(val & (1 << 10))) {
3468 tg3_phy_auxctl_write(tp,
3469 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3470 val | (1 << 10));
3471 goto relink;
3475 bmsr = 0;
3476 for (i = 0; i < 100; i++) {
3477 tg3_readphy(tp, MII_BMSR, &bmsr);
3478 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3479 (bmsr & BMSR_LSTATUS))
3480 break;
3481 udelay(40);
3484 if (bmsr & BMSR_LSTATUS) {
3485 u32 aux_stat, bmcr;
3487 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3488 for (i = 0; i < 2000; i++) {
3489 udelay(10);
3490 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3491 aux_stat)
3492 break;
3495 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3496 &current_speed,
3497 &current_duplex);
3499 bmcr = 0;
3500 for (i = 0; i < 200; i++) {
3501 tg3_readphy(tp, MII_BMCR, &bmcr);
3502 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3503 continue;
3504 if (bmcr && bmcr != 0x7fff)
3505 break;
3506 udelay(10);
3509 lcl_adv = 0;
3510 rmt_adv = 0;
3512 tp->link_config.active_speed = current_speed;
3513 tp->link_config.active_duplex = current_duplex;
3515 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3516 if ((bmcr & BMCR_ANENABLE) &&
3517 tg3_copper_is_advertising_all(tp,
3518 tp->link_config.advertising)) {
3519 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3520 &rmt_adv))
3521 current_link_up = 1;
3523 } else {
3524 if (!(bmcr & BMCR_ANENABLE) &&
3525 tp->link_config.speed == current_speed &&
3526 tp->link_config.duplex == current_duplex &&
3527 tp->link_config.flowctrl ==
3528 tp->link_config.active_flowctrl) {
3529 current_link_up = 1;
3533 if (current_link_up == 1 &&
3534 tp->link_config.active_duplex == DUPLEX_FULL)
3535 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3538 relink:
3539 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3540 tg3_phy_copper_begin(tp);
3542 tg3_readphy(tp, MII_BMSR, &bmsr);
3543 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3544 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3545 current_link_up = 1;
3548 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3549 if (current_link_up == 1) {
3550 if (tp->link_config.active_speed == SPEED_100 ||
3551 tp->link_config.active_speed == SPEED_10)
3552 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3553 else
3554 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3555 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3556 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3557 else
3558 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3560 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3561 if (tp->link_config.active_duplex == DUPLEX_HALF)
3562 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3564 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3565 if (current_link_up == 1 &&
3566 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3567 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3568 else
3569 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3572 /* ??? Without this setting Netgear GA302T PHY does not
3573 * ??? send/receive packets...
3575 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3576 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3577 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3578 tw32_f(MAC_MI_MODE, tp->mi_mode);
3579 udelay(80);
3582 tw32_f(MAC_MODE, tp->mac_mode);
3583 udelay(40);
3585 tg3_phy_eee_adjust(tp, current_link_up);
3587 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3588 /* Polled via timer. */
3589 tw32_f(MAC_EVENT, 0);
3590 } else {
3591 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3593 udelay(40);
3595 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3596 current_link_up == 1 &&
3597 tp->link_config.active_speed == SPEED_1000 &&
3598 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3599 udelay(120);
3600 tw32_f(MAC_STATUS,
3601 (MAC_STATUS_SYNC_CHANGED |
3602 MAC_STATUS_CFG_CHANGED));
3603 udelay(40);
3604 tg3_write_mem(tp,
3605 NIC_SRAM_FIRMWARE_MBOX,
3606 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3609 /* Prevent send BD corruption. */
3610 if (tg3_flag(tp, CLKREQ_BUG)) {
3611 u16 oldlnkctl, newlnkctl;
3613 pci_read_config_word(tp->pdev,
3614 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3615 &oldlnkctl);
3616 if (tp->link_config.active_speed == SPEED_100 ||
3617 tp->link_config.active_speed == SPEED_10)
3618 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3619 else
3620 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3621 if (newlnkctl != oldlnkctl)
3622 pci_write_config_word(tp->pdev,
3623 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3624 newlnkctl);
3627 if (current_link_up != netif_carrier_ok(tp->dev)) {
3628 if (current_link_up)
3629 netif_carrier_on(tp->dev);
3630 else
3631 netif_carrier_off(tp->dev);
3632 tg3_link_report(tp);
3635 return 0;
3638 struct tg3_fiber_aneginfo {
3639 int state;
3640 #define ANEG_STATE_UNKNOWN 0
3641 #define ANEG_STATE_AN_ENABLE 1
3642 #define ANEG_STATE_RESTART_INIT 2
3643 #define ANEG_STATE_RESTART 3
3644 #define ANEG_STATE_DISABLE_LINK_OK 4
3645 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3646 #define ANEG_STATE_ABILITY_DETECT 6
3647 #define ANEG_STATE_ACK_DETECT_INIT 7
3648 #define ANEG_STATE_ACK_DETECT 8
3649 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3650 #define ANEG_STATE_COMPLETE_ACK 10
3651 #define ANEG_STATE_IDLE_DETECT_INIT 11
3652 #define ANEG_STATE_IDLE_DETECT 12
3653 #define ANEG_STATE_LINK_OK 13
3654 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3655 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3657 u32 flags;
3658 #define MR_AN_ENABLE 0x00000001
3659 #define MR_RESTART_AN 0x00000002
3660 #define MR_AN_COMPLETE 0x00000004
3661 #define MR_PAGE_RX 0x00000008
3662 #define MR_NP_LOADED 0x00000010
3663 #define MR_TOGGLE_TX 0x00000020
3664 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3665 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3666 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3667 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3668 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3669 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3670 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3671 #define MR_TOGGLE_RX 0x00002000
3672 #define MR_NP_RX 0x00004000
3674 #define MR_LINK_OK 0x80000000
3676 unsigned long link_time, cur_time;
3678 u32 ability_match_cfg;
3679 int ability_match_count;
3681 char ability_match, idle_match, ack_match;
3683 u32 txconfig, rxconfig;
3684 #define ANEG_CFG_NP 0x00000080
3685 #define ANEG_CFG_ACK 0x00000040
3686 #define ANEG_CFG_RF2 0x00000020
3687 #define ANEG_CFG_RF1 0x00000010
3688 #define ANEG_CFG_PS2 0x00000001
3689 #define ANEG_CFG_PS1 0x00008000
3690 #define ANEG_CFG_HD 0x00004000
3691 #define ANEG_CFG_FD 0x00002000
3692 #define ANEG_CFG_INVAL 0x00001f06
3695 #define ANEG_OK 0
3696 #define ANEG_DONE 1
3697 #define ANEG_TIMER_ENAB 2
3698 #define ANEG_FAILED -1
3700 #define ANEG_STATE_SETTLE_TIME 10000
3702 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3703 struct tg3_fiber_aneginfo *ap)
3705 u16 flowctrl;
3706 unsigned long delta;
3707 u32 rx_cfg_reg;
3708 int ret;
3710 if (ap->state == ANEG_STATE_UNKNOWN) {
3711 ap->rxconfig = 0;
3712 ap->link_time = 0;
3713 ap->cur_time = 0;
3714 ap->ability_match_cfg = 0;
3715 ap->ability_match_count = 0;
3716 ap->ability_match = 0;
3717 ap->idle_match = 0;
3718 ap->ack_match = 0;
3720 ap->cur_time++;
3722 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3723 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3725 if (rx_cfg_reg != ap->ability_match_cfg) {
3726 ap->ability_match_cfg = rx_cfg_reg;
3727 ap->ability_match = 0;
3728 ap->ability_match_count = 0;
3729 } else {
3730 if (++ap->ability_match_count > 1) {
3731 ap->ability_match = 1;
3732 ap->ability_match_cfg = rx_cfg_reg;
3735 if (rx_cfg_reg & ANEG_CFG_ACK)
3736 ap->ack_match = 1;
3737 else
3738 ap->ack_match = 0;
3740 ap->idle_match = 0;
3741 } else {
3742 ap->idle_match = 1;
3743 ap->ability_match_cfg = 0;
3744 ap->ability_match_count = 0;
3745 ap->ability_match = 0;
3746 ap->ack_match = 0;
3748 rx_cfg_reg = 0;
3751 ap->rxconfig = rx_cfg_reg;
3752 ret = ANEG_OK;
3754 switch (ap->state) {
3755 case ANEG_STATE_UNKNOWN:
3756 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3757 ap->state = ANEG_STATE_AN_ENABLE;
3759 /* fallthru */
3760 case ANEG_STATE_AN_ENABLE:
3761 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3762 if (ap->flags & MR_AN_ENABLE) {
3763 ap->link_time = 0;
3764 ap->cur_time = 0;
3765 ap->ability_match_cfg = 0;
3766 ap->ability_match_count = 0;
3767 ap->ability_match = 0;
3768 ap->idle_match = 0;
3769 ap->ack_match = 0;
3771 ap->state = ANEG_STATE_RESTART_INIT;
3772 } else {
3773 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3775 break;
3777 case ANEG_STATE_RESTART_INIT:
3778 ap->link_time = ap->cur_time;
3779 ap->flags &= ~(MR_NP_LOADED);
3780 ap->txconfig = 0;
3781 tw32(MAC_TX_AUTO_NEG, 0);
3782 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3783 tw32_f(MAC_MODE, tp->mac_mode);
3784 udelay(40);
3786 ret = ANEG_TIMER_ENAB;
3787 ap->state = ANEG_STATE_RESTART;
3789 /* fallthru */
3790 case ANEG_STATE_RESTART:
3791 delta = ap->cur_time - ap->link_time;
3792 if (delta > ANEG_STATE_SETTLE_TIME)
3793 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3794 else
3795 ret = ANEG_TIMER_ENAB;
3796 break;
3798 case ANEG_STATE_DISABLE_LINK_OK:
3799 ret = ANEG_DONE;
3800 break;
3802 case ANEG_STATE_ABILITY_DETECT_INIT:
3803 ap->flags &= ~(MR_TOGGLE_TX);
3804 ap->txconfig = ANEG_CFG_FD;
3805 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3806 if (flowctrl & ADVERTISE_1000XPAUSE)
3807 ap->txconfig |= ANEG_CFG_PS1;
3808 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3809 ap->txconfig |= ANEG_CFG_PS2;
3810 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3811 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3812 tw32_f(MAC_MODE, tp->mac_mode);
3813 udelay(40);
3815 ap->state = ANEG_STATE_ABILITY_DETECT;
3816 break;
3818 case ANEG_STATE_ABILITY_DETECT:
3819 if (ap->ability_match != 0 && ap->rxconfig != 0)
3820 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3821 break;
3823 case ANEG_STATE_ACK_DETECT_INIT:
3824 ap->txconfig |= ANEG_CFG_ACK;
3825 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3826 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3827 tw32_f(MAC_MODE, tp->mac_mode);
3828 udelay(40);
3830 ap->state = ANEG_STATE_ACK_DETECT;
3832 /* fallthru */
3833 case ANEG_STATE_ACK_DETECT:
3834 if (ap->ack_match != 0) {
3835 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3836 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3837 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3838 } else {
3839 ap->state = ANEG_STATE_AN_ENABLE;
3841 } else if (ap->ability_match != 0 &&
3842 ap->rxconfig == 0) {
3843 ap->state = ANEG_STATE_AN_ENABLE;
3845 break;
3847 case ANEG_STATE_COMPLETE_ACK_INIT:
3848 if (ap->rxconfig & ANEG_CFG_INVAL) {
3849 ret = ANEG_FAILED;
3850 break;
3852 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3853 MR_LP_ADV_HALF_DUPLEX |
3854 MR_LP_ADV_SYM_PAUSE |
3855 MR_LP_ADV_ASYM_PAUSE |
3856 MR_LP_ADV_REMOTE_FAULT1 |
3857 MR_LP_ADV_REMOTE_FAULT2 |
3858 MR_LP_ADV_NEXT_PAGE |
3859 MR_TOGGLE_RX |
3860 MR_NP_RX);
3861 if (ap->rxconfig & ANEG_CFG_FD)
3862 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3863 if (ap->rxconfig & ANEG_CFG_HD)
3864 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3865 if (ap->rxconfig & ANEG_CFG_PS1)
3866 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3867 if (ap->rxconfig & ANEG_CFG_PS2)
3868 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3869 if (ap->rxconfig & ANEG_CFG_RF1)
3870 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3871 if (ap->rxconfig & ANEG_CFG_RF2)
3872 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3873 if (ap->rxconfig & ANEG_CFG_NP)
3874 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3876 ap->link_time = ap->cur_time;
3878 ap->flags ^= (MR_TOGGLE_TX);
3879 if (ap->rxconfig & 0x0008)
3880 ap->flags |= MR_TOGGLE_RX;
3881 if (ap->rxconfig & ANEG_CFG_NP)
3882 ap->flags |= MR_NP_RX;
3883 ap->flags |= MR_PAGE_RX;
3885 ap->state = ANEG_STATE_COMPLETE_ACK;
3886 ret = ANEG_TIMER_ENAB;
3887 break;
3889 case ANEG_STATE_COMPLETE_ACK:
3890 if (ap->ability_match != 0 &&
3891 ap->rxconfig == 0) {
3892 ap->state = ANEG_STATE_AN_ENABLE;
3893 break;
3895 delta = ap->cur_time - ap->link_time;
3896 if (delta > ANEG_STATE_SETTLE_TIME) {
3897 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3898 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3899 } else {
3900 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3901 !(ap->flags & MR_NP_RX)) {
3902 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3903 } else {
3904 ret = ANEG_FAILED;
3908 break;
3910 case ANEG_STATE_IDLE_DETECT_INIT:
3911 ap->link_time = ap->cur_time;
3912 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3913 tw32_f(MAC_MODE, tp->mac_mode);
3914 udelay(40);
3916 ap->state = ANEG_STATE_IDLE_DETECT;
3917 ret = ANEG_TIMER_ENAB;
3918 break;
3920 case ANEG_STATE_IDLE_DETECT:
3921 if (ap->ability_match != 0 &&
3922 ap->rxconfig == 0) {
3923 ap->state = ANEG_STATE_AN_ENABLE;
3924 break;
3926 delta = ap->cur_time - ap->link_time;
3927 if (delta > ANEG_STATE_SETTLE_TIME) {
3928 /* XXX another gem from the Broadcom driver :( */
3929 ap->state = ANEG_STATE_LINK_OK;
3931 break;
3933 case ANEG_STATE_LINK_OK:
3934 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3935 ret = ANEG_DONE;
3936 break;
3938 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3939 /* ??? unimplemented */
3940 break;
3942 case ANEG_STATE_NEXT_PAGE_WAIT:
3943 /* ??? unimplemented */
3944 break;
3946 default:
3947 ret = ANEG_FAILED;
3948 break;
3951 return ret;
3954 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3956 int res = 0;
3957 struct tg3_fiber_aneginfo aninfo;
3958 int status = ANEG_FAILED;
3959 unsigned int tick;
3960 u32 tmp;
3962 tw32_f(MAC_TX_AUTO_NEG, 0);
3964 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3965 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3966 udelay(40);
3968 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3969 udelay(40);
3971 memset(&aninfo, 0, sizeof(aninfo));
3972 aninfo.flags |= MR_AN_ENABLE;
3973 aninfo.state = ANEG_STATE_UNKNOWN;
3974 aninfo.cur_time = 0;
3975 tick = 0;
3976 while (++tick < 195000) {
3977 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3978 if (status == ANEG_DONE || status == ANEG_FAILED)
3979 break;
3981 udelay(1);
3984 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3985 tw32_f(MAC_MODE, tp->mac_mode);
3986 udelay(40);
3988 *txflags = aninfo.txconfig;
3989 *rxflags = aninfo.flags;
3991 if (status == ANEG_DONE &&
3992 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3993 MR_LP_ADV_FULL_DUPLEX)))
3994 res = 1;
3996 return res;
3999 static void tg3_init_bcm8002(struct tg3 *tp)
4001 u32 mac_status = tr32(MAC_STATUS);
4002 int i;
4004 /* Reset when initting first time or we have a link. */
4005 if (tg3_flag(tp, INIT_COMPLETE) &&
4006 !(mac_status & MAC_STATUS_PCS_SYNCED))
4007 return;
4009 /* Set PLL lock range. */
4010 tg3_writephy(tp, 0x16, 0x8007);
4012 /* SW reset */
4013 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4015 /* Wait for reset to complete. */
4016 /* XXX schedule_timeout() ... */
4017 for (i = 0; i < 500; i++)
4018 udelay(10);
4020 /* Config mode; select PMA/Ch 1 regs. */
4021 tg3_writephy(tp, 0x10, 0x8411);
4023 /* Enable auto-lock and comdet, select txclk for tx. */
4024 tg3_writephy(tp, 0x11, 0x0a10);
4026 tg3_writephy(tp, 0x18, 0x00a0);
4027 tg3_writephy(tp, 0x16, 0x41ff);
4029 /* Assert and deassert POR. */
4030 tg3_writephy(tp, 0x13, 0x0400);
4031 udelay(40);
4032 tg3_writephy(tp, 0x13, 0x0000);
4034 tg3_writephy(tp, 0x11, 0x0a50);
4035 udelay(40);
4036 tg3_writephy(tp, 0x11, 0x0a10);
4038 /* Wait for signal to stabilize */
4039 /* XXX schedule_timeout() ... */
4040 for (i = 0; i < 15000; i++)
4041 udelay(10);
4043 /* Deselect the channel register so we can read the PHYID
4044 * later.
4046 tg3_writephy(tp, 0x10, 0x8011);
4049 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4051 u16 flowctrl;
4052 u32 sg_dig_ctrl, sg_dig_status;
4053 u32 serdes_cfg, expected_sg_dig_ctrl;
4054 int workaround, port_a;
4055 int current_link_up;
4057 serdes_cfg = 0;
4058 expected_sg_dig_ctrl = 0;
4059 workaround = 0;
4060 port_a = 1;
4061 current_link_up = 0;
4063 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4064 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4065 workaround = 1;
4066 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4067 port_a = 0;
4069 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4070 /* preserve bits 20-23 for voltage regulator */
4071 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4074 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4076 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4077 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4078 if (workaround) {
4079 u32 val = serdes_cfg;
4081 if (port_a)
4082 val |= 0xc010000;
4083 else
4084 val |= 0x4010000;
4085 tw32_f(MAC_SERDES_CFG, val);
4088 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4090 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4091 tg3_setup_flow_control(tp, 0, 0);
4092 current_link_up = 1;
4094 goto out;
4097 /* Want auto-negotiation. */
4098 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4100 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4101 if (flowctrl & ADVERTISE_1000XPAUSE)
4102 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4103 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4104 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4106 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4107 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4108 tp->serdes_counter &&
4109 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4110 MAC_STATUS_RCVD_CFG)) ==
4111 MAC_STATUS_PCS_SYNCED)) {
4112 tp->serdes_counter--;
4113 current_link_up = 1;
4114 goto out;
4116 restart_autoneg:
4117 if (workaround)
4118 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4119 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4120 udelay(5);
4121 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4123 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4124 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4125 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4126 MAC_STATUS_SIGNAL_DET)) {
4127 sg_dig_status = tr32(SG_DIG_STATUS);
4128 mac_status = tr32(MAC_STATUS);
4130 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4131 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4132 u32 local_adv = 0, remote_adv = 0;
4134 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4135 local_adv |= ADVERTISE_1000XPAUSE;
4136 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4137 local_adv |= ADVERTISE_1000XPSE_ASYM;
4139 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4140 remote_adv |= LPA_1000XPAUSE;
4141 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4142 remote_adv |= LPA_1000XPAUSE_ASYM;
4144 tg3_setup_flow_control(tp, local_adv, remote_adv);
4145 current_link_up = 1;
4146 tp->serdes_counter = 0;
4147 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4148 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4149 if (tp->serdes_counter)
4150 tp->serdes_counter--;
4151 else {
4152 if (workaround) {
4153 u32 val = serdes_cfg;
4155 if (port_a)
4156 val |= 0xc010000;
4157 else
4158 val |= 0x4010000;
4160 tw32_f(MAC_SERDES_CFG, val);
4163 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4164 udelay(40);
4166 /* Link parallel detection - link is up */
4167 /* only if we have PCS_SYNC and not */
4168 /* receiving config code words */
4169 mac_status = tr32(MAC_STATUS);
4170 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4171 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4172 tg3_setup_flow_control(tp, 0, 0);
4173 current_link_up = 1;
4174 tp->phy_flags |=
4175 TG3_PHYFLG_PARALLEL_DETECT;
4176 tp->serdes_counter =
4177 SERDES_PARALLEL_DET_TIMEOUT;
4178 } else
4179 goto restart_autoneg;
4182 } else {
4183 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4184 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4187 out:
4188 return current_link_up;
4191 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4193 int current_link_up = 0;
4195 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4196 goto out;
4198 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4199 u32 txflags, rxflags;
4200 int i;
4202 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4203 u32 local_adv = 0, remote_adv = 0;
4205 if (txflags & ANEG_CFG_PS1)
4206 local_adv |= ADVERTISE_1000XPAUSE;
4207 if (txflags & ANEG_CFG_PS2)
4208 local_adv |= ADVERTISE_1000XPSE_ASYM;
4210 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4211 remote_adv |= LPA_1000XPAUSE;
4212 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4213 remote_adv |= LPA_1000XPAUSE_ASYM;
4215 tg3_setup_flow_control(tp, local_adv, remote_adv);
4217 current_link_up = 1;
4219 for (i = 0; i < 30; i++) {
4220 udelay(20);
4221 tw32_f(MAC_STATUS,
4222 (MAC_STATUS_SYNC_CHANGED |
4223 MAC_STATUS_CFG_CHANGED));
4224 udelay(40);
4225 if ((tr32(MAC_STATUS) &
4226 (MAC_STATUS_SYNC_CHANGED |
4227 MAC_STATUS_CFG_CHANGED)) == 0)
4228 break;
4231 mac_status = tr32(MAC_STATUS);
4232 if (current_link_up == 0 &&
4233 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4234 !(mac_status & MAC_STATUS_RCVD_CFG))
4235 current_link_up = 1;
4236 } else {
4237 tg3_setup_flow_control(tp, 0, 0);
4239 /* Forcing 1000FD link up. */
4240 current_link_up = 1;
4242 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4243 udelay(40);
4245 tw32_f(MAC_MODE, tp->mac_mode);
4246 udelay(40);
4249 out:
4250 return current_link_up;
4253 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4255 u32 orig_pause_cfg;
4256 u16 orig_active_speed;
4257 u8 orig_active_duplex;
4258 u32 mac_status;
4259 int current_link_up;
4260 int i;
4262 orig_pause_cfg = tp->link_config.active_flowctrl;
4263 orig_active_speed = tp->link_config.active_speed;
4264 orig_active_duplex = tp->link_config.active_duplex;
4266 if (!tg3_flag(tp, HW_AUTONEG) &&
4267 netif_carrier_ok(tp->dev) &&
4268 tg3_flag(tp, INIT_COMPLETE)) {
4269 mac_status = tr32(MAC_STATUS);
4270 mac_status &= (MAC_STATUS_PCS_SYNCED |
4271 MAC_STATUS_SIGNAL_DET |
4272 MAC_STATUS_CFG_CHANGED |
4273 MAC_STATUS_RCVD_CFG);
4274 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4275 MAC_STATUS_SIGNAL_DET)) {
4276 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4277 MAC_STATUS_CFG_CHANGED));
4278 return 0;
4282 tw32_f(MAC_TX_AUTO_NEG, 0);
4284 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4285 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4286 tw32_f(MAC_MODE, tp->mac_mode);
4287 udelay(40);
4289 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4290 tg3_init_bcm8002(tp);
4292 /* Enable link change event even when serdes polling. */
4293 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4294 udelay(40);
4296 current_link_up = 0;
4297 mac_status = tr32(MAC_STATUS);
4299 if (tg3_flag(tp, HW_AUTONEG))
4300 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4301 else
4302 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4304 tp->napi[0].hw_status->status =
4305 (SD_STATUS_UPDATED |
4306 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4308 for (i = 0; i < 100; i++) {
4309 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4310 MAC_STATUS_CFG_CHANGED));
4311 udelay(5);
4312 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4313 MAC_STATUS_CFG_CHANGED |
4314 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4315 break;
4318 mac_status = tr32(MAC_STATUS);
4319 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4320 current_link_up = 0;
4321 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4322 tp->serdes_counter == 0) {
4323 tw32_f(MAC_MODE, (tp->mac_mode |
4324 MAC_MODE_SEND_CONFIGS));
4325 udelay(1);
4326 tw32_f(MAC_MODE, tp->mac_mode);
4330 if (current_link_up == 1) {
4331 tp->link_config.active_speed = SPEED_1000;
4332 tp->link_config.active_duplex = DUPLEX_FULL;
4333 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4334 LED_CTRL_LNKLED_OVERRIDE |
4335 LED_CTRL_1000MBPS_ON));
4336 } else {
4337 tp->link_config.active_speed = SPEED_INVALID;
4338 tp->link_config.active_duplex = DUPLEX_INVALID;
4339 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4340 LED_CTRL_LNKLED_OVERRIDE |
4341 LED_CTRL_TRAFFIC_OVERRIDE));
4344 if (current_link_up != netif_carrier_ok(tp->dev)) {
4345 if (current_link_up)
4346 netif_carrier_on(tp->dev);
4347 else
4348 netif_carrier_off(tp->dev);
4349 tg3_link_report(tp);
4350 } else {
4351 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4352 if (orig_pause_cfg != now_pause_cfg ||
4353 orig_active_speed != tp->link_config.active_speed ||
4354 orig_active_duplex != tp->link_config.active_duplex)
4355 tg3_link_report(tp);
4358 return 0;
4361 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4363 int current_link_up, err = 0;
4364 u32 bmsr, bmcr;
4365 u16 current_speed;
4366 u8 current_duplex;
4367 u32 local_adv, remote_adv;
4369 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4370 tw32_f(MAC_MODE, tp->mac_mode);
4371 udelay(40);
4373 tw32(MAC_EVENT, 0);
4375 tw32_f(MAC_STATUS,
4376 (MAC_STATUS_SYNC_CHANGED |
4377 MAC_STATUS_CFG_CHANGED |
4378 MAC_STATUS_MI_COMPLETION |
4379 MAC_STATUS_LNKSTATE_CHANGED));
4380 udelay(40);
4382 if (force_reset)
4383 tg3_phy_reset(tp);
4385 current_link_up = 0;
4386 current_speed = SPEED_INVALID;
4387 current_duplex = DUPLEX_INVALID;
4389 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4390 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4392 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4393 bmsr |= BMSR_LSTATUS;
4394 else
4395 bmsr &= ~BMSR_LSTATUS;
4398 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4400 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4401 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4402 /* do nothing, just check for link up at the end */
4403 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4404 u32 adv, new_adv;
4406 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4407 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4408 ADVERTISE_1000XPAUSE |
4409 ADVERTISE_1000XPSE_ASYM |
4410 ADVERTISE_SLCT);
4412 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4414 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4415 new_adv |= ADVERTISE_1000XHALF;
4416 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4417 new_adv |= ADVERTISE_1000XFULL;
4419 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4420 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4421 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4422 tg3_writephy(tp, MII_BMCR, bmcr);
4424 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4425 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4426 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4428 return err;
4430 } else {
4431 u32 new_bmcr;
4433 bmcr &= ~BMCR_SPEED1000;
4434 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4436 if (tp->link_config.duplex == DUPLEX_FULL)
4437 new_bmcr |= BMCR_FULLDPLX;
4439 if (new_bmcr != bmcr) {
4440 /* BMCR_SPEED1000 is a reserved bit that needs
4441 * to be set on write.
4443 new_bmcr |= BMCR_SPEED1000;
4445 /* Force a linkdown */
4446 if (netif_carrier_ok(tp->dev)) {
4447 u32 adv;
4449 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4450 adv &= ~(ADVERTISE_1000XFULL |
4451 ADVERTISE_1000XHALF |
4452 ADVERTISE_SLCT);
4453 tg3_writephy(tp, MII_ADVERTISE, adv);
4454 tg3_writephy(tp, MII_BMCR, bmcr |
4455 BMCR_ANRESTART |
4456 BMCR_ANENABLE);
4457 udelay(10);
4458 netif_carrier_off(tp->dev);
4460 tg3_writephy(tp, MII_BMCR, new_bmcr);
4461 bmcr = new_bmcr;
4462 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4463 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4464 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4465 ASIC_REV_5714) {
4466 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4467 bmsr |= BMSR_LSTATUS;
4468 else
4469 bmsr &= ~BMSR_LSTATUS;
4471 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4475 if (bmsr & BMSR_LSTATUS) {
4476 current_speed = SPEED_1000;
4477 current_link_up = 1;
4478 if (bmcr & BMCR_FULLDPLX)
4479 current_duplex = DUPLEX_FULL;
4480 else
4481 current_duplex = DUPLEX_HALF;
4483 local_adv = 0;
4484 remote_adv = 0;
4486 if (bmcr & BMCR_ANENABLE) {
4487 u32 common;
4489 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4490 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4491 common = local_adv & remote_adv;
4492 if (common & (ADVERTISE_1000XHALF |
4493 ADVERTISE_1000XFULL)) {
4494 if (common & ADVERTISE_1000XFULL)
4495 current_duplex = DUPLEX_FULL;
4496 else
4497 current_duplex = DUPLEX_HALF;
4498 } else if (!tg3_flag(tp, 5780_CLASS)) {
4499 /* Link is up via parallel detect */
4500 } else {
4501 current_link_up = 0;
4506 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4507 tg3_setup_flow_control(tp, local_adv, remote_adv);
4509 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4510 if (tp->link_config.active_duplex == DUPLEX_HALF)
4511 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4513 tw32_f(MAC_MODE, tp->mac_mode);
4514 udelay(40);
4516 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4518 tp->link_config.active_speed = current_speed;
4519 tp->link_config.active_duplex = current_duplex;
4521 if (current_link_up != netif_carrier_ok(tp->dev)) {
4522 if (current_link_up)
4523 netif_carrier_on(tp->dev);
4524 else {
4525 netif_carrier_off(tp->dev);
4526 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4528 tg3_link_report(tp);
4530 return err;
4533 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4535 if (tp->serdes_counter) {
4536 /* Give autoneg time to complete. */
4537 tp->serdes_counter--;
4538 return;
4541 if (!netif_carrier_ok(tp->dev) &&
4542 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4543 u32 bmcr;
4545 tg3_readphy(tp, MII_BMCR, &bmcr);
4546 if (bmcr & BMCR_ANENABLE) {
4547 u32 phy1, phy2;
4549 /* Select shadow register 0x1f */
4550 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4551 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4553 /* Select expansion interrupt status register */
4554 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4555 MII_TG3_DSP_EXP1_INT_STAT);
4556 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4557 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4559 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4560 /* We have signal detect and not receiving
4561 * config code words, link is up by parallel
4562 * detection.
4565 bmcr &= ~BMCR_ANENABLE;
4566 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4567 tg3_writephy(tp, MII_BMCR, bmcr);
4568 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4571 } else if (netif_carrier_ok(tp->dev) &&
4572 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4573 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4574 u32 phy2;
4576 /* Select expansion interrupt status register */
4577 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4578 MII_TG3_DSP_EXP1_INT_STAT);
4579 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4580 if (phy2 & 0x20) {
4581 u32 bmcr;
4583 /* Config code words received, turn on autoneg. */
4584 tg3_readphy(tp, MII_BMCR, &bmcr);
4585 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4587 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4593 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4595 u32 val;
4596 int err;
4598 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4599 err = tg3_setup_fiber_phy(tp, force_reset);
4600 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4601 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4602 else
4603 err = tg3_setup_copper_phy(tp, force_reset);
4605 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4606 u32 scale;
4608 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4609 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4610 scale = 65;
4611 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4612 scale = 6;
4613 else
4614 scale = 12;
4616 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4617 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4618 tw32(GRC_MISC_CFG, val);
4621 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4622 (6 << TX_LENGTHS_IPG_SHIFT);
4623 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4624 val |= tr32(MAC_TX_LENGTHS) &
4625 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4626 TX_LENGTHS_CNT_DWN_VAL_MSK);
4628 if (tp->link_config.active_speed == SPEED_1000 &&
4629 tp->link_config.active_duplex == DUPLEX_HALF)
4630 tw32(MAC_TX_LENGTHS, val |
4631 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4632 else
4633 tw32(MAC_TX_LENGTHS, val |
4634 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4636 if (!tg3_flag(tp, 5705_PLUS)) {
4637 if (netif_carrier_ok(tp->dev)) {
4638 tw32(HOSTCC_STAT_COAL_TICKS,
4639 tp->coal.stats_block_coalesce_usecs);
4640 } else {
4641 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4645 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4646 val = tr32(PCIE_PWR_MGMT_THRESH);
4647 if (!netif_carrier_ok(tp->dev))
4648 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4649 tp->pwrmgmt_thresh;
4650 else
4651 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4652 tw32(PCIE_PWR_MGMT_THRESH, val);
4655 return err;
4658 static inline int tg3_irq_sync(struct tg3 *tp)
4660 return tp->irq_sync;
4663 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4665 int i;
4667 dst = (u32 *)((u8 *)dst + off);
4668 for (i = 0; i < len; i += sizeof(u32))
4669 *dst++ = tr32(off + i);
4672 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4674 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4675 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4676 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4677 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4678 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4679 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4680 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4681 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4682 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4683 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4684 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4685 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4686 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4687 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4688 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4689 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4690 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4691 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4692 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4694 if (tg3_flag(tp, SUPPORT_MSIX))
4695 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4697 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4698 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4699 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4700 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4701 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4702 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4703 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4704 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4706 if (!tg3_flag(tp, 5705_PLUS)) {
4707 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4708 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4709 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4712 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4713 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4714 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4715 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4716 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4718 if (tg3_flag(tp, NVRAM))
4719 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4722 static void tg3_dump_state(struct tg3 *tp)
4724 int i;
4725 u32 *regs;
4727 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4728 if (!regs) {
4729 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4730 return;
4733 if (tg3_flag(tp, PCI_EXPRESS)) {
4734 /* Read up to but not including private PCI registers */
4735 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4736 regs[i / sizeof(u32)] = tr32(i);
4737 } else
4738 tg3_dump_legacy_regs(tp, regs);
4740 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4741 if (!regs[i + 0] && !regs[i + 1] &&
4742 !regs[i + 2] && !regs[i + 3])
4743 continue;
4745 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4746 i * 4,
4747 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4750 kfree(regs);
4752 for (i = 0; i < tp->irq_cnt; i++) {
4753 struct tg3_napi *tnapi = &tp->napi[i];
4755 /* SW status block */
4756 netdev_err(tp->dev,
4757 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4759 tnapi->hw_status->status,
4760 tnapi->hw_status->status_tag,
4761 tnapi->hw_status->rx_jumbo_consumer,
4762 tnapi->hw_status->rx_consumer,
4763 tnapi->hw_status->rx_mini_consumer,
4764 tnapi->hw_status->idx[0].rx_producer,
4765 tnapi->hw_status->idx[0].tx_consumer);
4767 netdev_err(tp->dev,
4768 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4770 tnapi->last_tag, tnapi->last_irq_tag,
4771 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4772 tnapi->rx_rcb_ptr,
4773 tnapi->prodring.rx_std_prod_idx,
4774 tnapi->prodring.rx_std_cons_idx,
4775 tnapi->prodring.rx_jmb_prod_idx,
4776 tnapi->prodring.rx_jmb_cons_idx);
4780 /* This is called whenever we suspect that the system chipset is re-
4781 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4782 * is bogus tx completions. We try to recover by setting the
4783 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4784 * in the workqueue.
4786 static void tg3_tx_recover(struct tg3 *tp)
4788 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4789 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4791 netdev_warn(tp->dev,
4792 "The system may be re-ordering memory-mapped I/O "
4793 "cycles to the network device, attempting to recover. "
4794 "Please report the problem to the driver maintainer "
4795 "and include system chipset information.\n");
4797 spin_lock(&tp->lock);
4798 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4799 spin_unlock(&tp->lock);
4802 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4804 /* Tell compiler to fetch tx indices from memory. */
4805 barrier();
4806 return tnapi->tx_pending -
4807 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4810 /* Tigon3 never reports partial packet sends. So we do not
4811 * need special logic to handle SKBs that have not had all
4812 * of their frags sent yet, like SunGEM does.
4814 static void tg3_tx(struct tg3_napi *tnapi)
4816 struct tg3 *tp = tnapi->tp;
4817 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4818 u32 sw_idx = tnapi->tx_cons;
4819 struct netdev_queue *txq;
4820 int index = tnapi - tp->napi;
4822 if (tg3_flag(tp, ENABLE_TSS))
4823 index--;
4825 txq = netdev_get_tx_queue(tp->dev, index);
4827 while (sw_idx != hw_idx) {
4828 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4829 struct sk_buff *skb = ri->skb;
4830 int i, tx_bug = 0;
4832 if (unlikely(skb == NULL)) {
4833 tg3_tx_recover(tp);
4834 return;
4837 pci_unmap_single(tp->pdev,
4838 dma_unmap_addr(ri, mapping),
4839 skb_headlen(skb),
4840 PCI_DMA_TODEVICE);
4842 ri->skb = NULL;
4844 while (ri->fragmented) {
4845 ri->fragmented = false;
4846 sw_idx = NEXT_TX(sw_idx);
4847 ri = &tnapi->tx_buffers[sw_idx];
4850 sw_idx = NEXT_TX(sw_idx);
4852 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4853 ri = &tnapi->tx_buffers[sw_idx];
4854 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4855 tx_bug = 1;
4857 pci_unmap_page(tp->pdev,
4858 dma_unmap_addr(ri, mapping),
4859 skb_shinfo(skb)->frags[i].size,
4860 PCI_DMA_TODEVICE);
4862 while (ri->fragmented) {
4863 ri->fragmented = false;
4864 sw_idx = NEXT_TX(sw_idx);
4865 ri = &tnapi->tx_buffers[sw_idx];
4868 sw_idx = NEXT_TX(sw_idx);
4871 dev_kfree_skb(skb);
4873 if (unlikely(tx_bug)) {
4874 tg3_tx_recover(tp);
4875 return;
4879 tnapi->tx_cons = sw_idx;
4881 /* Need to make the tx_cons update visible to tg3_start_xmit()
4882 * before checking for netif_queue_stopped(). Without the
4883 * memory barrier, there is a small possibility that tg3_start_xmit()
4884 * will miss it and cause the queue to be stopped forever.
4886 smp_mb();
4888 if (unlikely(netif_tx_queue_stopped(txq) &&
4889 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4890 __netif_tx_lock(txq, smp_processor_id());
4891 if (netif_tx_queue_stopped(txq) &&
4892 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4893 netif_tx_wake_queue(txq);
4894 __netif_tx_unlock(txq);
4898 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4900 if (!ri->skb)
4901 return;
4903 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4904 map_sz, PCI_DMA_FROMDEVICE);
4905 dev_kfree_skb_any(ri->skb);
4906 ri->skb = NULL;
4909 /* Returns size of skb allocated or < 0 on error.
4911 * We only need to fill in the address because the other members
4912 * of the RX descriptor are invariant, see tg3_init_rings.
4914 * Note the purposeful assymetry of cpu vs. chip accesses. For
4915 * posting buffers we only dirty the first cache line of the RX
4916 * descriptor (containing the address). Whereas for the RX status
4917 * buffers the cpu only reads the last cacheline of the RX descriptor
4918 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4920 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4921 u32 opaque_key, u32 dest_idx_unmasked)
4923 struct tg3_rx_buffer_desc *desc;
4924 struct ring_info *map;
4925 struct sk_buff *skb;
4926 dma_addr_t mapping;
4927 int skb_size, dest_idx;
4929 switch (opaque_key) {
4930 case RXD_OPAQUE_RING_STD:
4931 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4932 desc = &tpr->rx_std[dest_idx];
4933 map = &tpr->rx_std_buffers[dest_idx];
4934 skb_size = tp->rx_pkt_map_sz;
4935 break;
4937 case RXD_OPAQUE_RING_JUMBO:
4938 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4939 desc = &tpr->rx_jmb[dest_idx].std;
4940 map = &tpr->rx_jmb_buffers[dest_idx];
4941 skb_size = TG3_RX_JMB_MAP_SZ;
4942 break;
4944 default:
4945 return -EINVAL;
4948 /* Do not overwrite any of the map or rp information
4949 * until we are sure we can commit to a new buffer.
4951 * Callers depend upon this behavior and assume that
4952 * we leave everything unchanged if we fail.
4954 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4955 if (skb == NULL)
4956 return -ENOMEM;
4958 skb_reserve(skb, tp->rx_offset);
4960 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4961 PCI_DMA_FROMDEVICE);
4962 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4963 dev_kfree_skb(skb);
4964 return -EIO;
4967 map->skb = skb;
4968 dma_unmap_addr_set(map, mapping, mapping);
4970 desc->addr_hi = ((u64)mapping >> 32);
4971 desc->addr_lo = ((u64)mapping & 0xffffffff);
4973 return skb_size;
4976 /* We only need to move over in the address because the other
4977 * members of the RX descriptor are invariant. See notes above
4978 * tg3_alloc_rx_skb for full details.
4980 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4981 struct tg3_rx_prodring_set *dpr,
4982 u32 opaque_key, int src_idx,
4983 u32 dest_idx_unmasked)
4985 struct tg3 *tp = tnapi->tp;
4986 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4987 struct ring_info *src_map, *dest_map;
4988 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4989 int dest_idx;
4991 switch (opaque_key) {
4992 case RXD_OPAQUE_RING_STD:
4993 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4994 dest_desc = &dpr->rx_std[dest_idx];
4995 dest_map = &dpr->rx_std_buffers[dest_idx];
4996 src_desc = &spr->rx_std[src_idx];
4997 src_map = &spr->rx_std_buffers[src_idx];
4998 break;
5000 case RXD_OPAQUE_RING_JUMBO:
5001 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5002 dest_desc = &dpr->rx_jmb[dest_idx].std;
5003 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5004 src_desc = &spr->rx_jmb[src_idx].std;
5005 src_map = &spr->rx_jmb_buffers[src_idx];
5006 break;
5008 default:
5009 return;
5012 dest_map->skb = src_map->skb;
5013 dma_unmap_addr_set(dest_map, mapping,
5014 dma_unmap_addr(src_map, mapping));
5015 dest_desc->addr_hi = src_desc->addr_hi;
5016 dest_desc->addr_lo = src_desc->addr_lo;
5018 /* Ensure that the update to the skb happens after the physical
5019 * addresses have been transferred to the new BD location.
5021 smp_wmb();
5023 src_map->skb = NULL;
5026 /* The RX ring scheme is composed of multiple rings which post fresh
5027 * buffers to the chip, and one special ring the chip uses to report
5028 * status back to the host.
5030 * The special ring reports the status of received packets to the
5031 * host. The chip does not write into the original descriptor the
5032 * RX buffer was obtained from. The chip simply takes the original
5033 * descriptor as provided by the host, updates the status and length
5034 * field, then writes this into the next status ring entry.
5036 * Each ring the host uses to post buffers to the chip is described
5037 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5038 * it is first placed into the on-chip ram. When the packet's length
5039 * is known, it walks down the TG3_BDINFO entries to select the ring.
5040 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5041 * which is within the range of the new packet's length is chosen.
5043 * The "separate ring for rx status" scheme may sound queer, but it makes
5044 * sense from a cache coherency perspective. If only the host writes
5045 * to the buffer post rings, and only the chip writes to the rx status
5046 * rings, then cache lines never move beyond shared-modified state.
5047 * If both the host and chip were to write into the same ring, cache line
5048 * eviction could occur since both entities want it in an exclusive state.
5050 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5052 struct tg3 *tp = tnapi->tp;
5053 u32 work_mask, rx_std_posted = 0;
5054 u32 std_prod_idx, jmb_prod_idx;
5055 u32 sw_idx = tnapi->rx_rcb_ptr;
5056 u16 hw_idx;
5057 int received;
5058 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5060 hw_idx = *(tnapi->rx_rcb_prod_idx);
5062 * We need to order the read of hw_idx and the read of
5063 * the opaque cookie.
5065 rmb();
5066 work_mask = 0;
5067 received = 0;
5068 std_prod_idx = tpr->rx_std_prod_idx;
5069 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5070 while (sw_idx != hw_idx && budget > 0) {
5071 struct ring_info *ri;
5072 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5073 unsigned int len;
5074 struct sk_buff *skb;
5075 dma_addr_t dma_addr;
5076 u32 opaque_key, desc_idx, *post_ptr;
5078 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5079 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5080 if (opaque_key == RXD_OPAQUE_RING_STD) {
5081 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5082 dma_addr = dma_unmap_addr(ri, mapping);
5083 skb = ri->skb;
5084 post_ptr = &std_prod_idx;
5085 rx_std_posted++;
5086 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5087 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5088 dma_addr = dma_unmap_addr(ri, mapping);
5089 skb = ri->skb;
5090 post_ptr = &jmb_prod_idx;
5091 } else
5092 goto next_pkt_nopost;
5094 work_mask |= opaque_key;
5096 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5097 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5098 drop_it:
5099 tg3_recycle_rx(tnapi, tpr, opaque_key,
5100 desc_idx, *post_ptr);
5101 drop_it_no_recycle:
5102 /* Other statistics kept track of by card. */
5103 tp->rx_dropped++;
5104 goto next_pkt;
5107 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5108 ETH_FCS_LEN;
5110 if (len > TG3_RX_COPY_THRESH(tp)) {
5111 int skb_size;
5113 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5114 *post_ptr);
5115 if (skb_size < 0)
5116 goto drop_it;
5118 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5119 PCI_DMA_FROMDEVICE);
5121 /* Ensure that the update to the skb happens
5122 * after the usage of the old DMA mapping.
5124 smp_wmb();
5126 ri->skb = NULL;
5128 skb_put(skb, len);
5129 } else {
5130 struct sk_buff *copy_skb;
5132 tg3_recycle_rx(tnapi, tpr, opaque_key,
5133 desc_idx, *post_ptr);
5135 copy_skb = netdev_alloc_skb(tp->dev, len +
5136 TG3_RAW_IP_ALIGN);
5137 if (copy_skb == NULL)
5138 goto drop_it_no_recycle;
5140 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5141 skb_put(copy_skb, len);
5142 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5143 skb_copy_from_linear_data(skb, copy_skb->data, len);
5144 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5146 /* We'll reuse the original ring buffer. */
5147 skb = copy_skb;
5150 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5151 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5152 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5153 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5154 skb->ip_summed = CHECKSUM_UNNECESSARY;
5155 else
5156 skb_checksum_none_assert(skb);
5158 skb->protocol = eth_type_trans(skb, tp->dev);
5160 if (len > (tp->dev->mtu + ETH_HLEN) &&
5161 skb->protocol != htons(ETH_P_8021Q)) {
5162 dev_kfree_skb(skb);
5163 goto drop_it_no_recycle;
5166 if (desc->type_flags & RXD_FLAG_VLAN &&
5167 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5168 __vlan_hwaccel_put_tag(skb,
5169 desc->err_vlan & RXD_VLAN_MASK);
5171 napi_gro_receive(&tnapi->napi, skb);
5173 received++;
5174 budget--;
5176 next_pkt:
5177 (*post_ptr)++;
5179 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5180 tpr->rx_std_prod_idx = std_prod_idx &
5181 tp->rx_std_ring_mask;
5182 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5183 tpr->rx_std_prod_idx);
5184 work_mask &= ~RXD_OPAQUE_RING_STD;
5185 rx_std_posted = 0;
5187 next_pkt_nopost:
5188 sw_idx++;
5189 sw_idx &= tp->rx_ret_ring_mask;
5191 /* Refresh hw_idx to see if there is new work */
5192 if (sw_idx == hw_idx) {
5193 hw_idx = *(tnapi->rx_rcb_prod_idx);
5194 rmb();
5198 /* ACK the status ring. */
5199 tnapi->rx_rcb_ptr = sw_idx;
5200 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5202 /* Refill RX ring(s). */
5203 if (!tg3_flag(tp, ENABLE_RSS)) {
5204 if (work_mask & RXD_OPAQUE_RING_STD) {
5205 tpr->rx_std_prod_idx = std_prod_idx &
5206 tp->rx_std_ring_mask;
5207 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5208 tpr->rx_std_prod_idx);
5210 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5211 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5212 tp->rx_jmb_ring_mask;
5213 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5214 tpr->rx_jmb_prod_idx);
5216 mmiowb();
5217 } else if (work_mask) {
5218 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5219 * updated before the producer indices can be updated.
5221 smp_wmb();
5223 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5224 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5226 if (tnapi != &tp->napi[1])
5227 napi_schedule(&tp->napi[1].napi);
5230 return received;
5233 static void tg3_poll_link(struct tg3 *tp)
5235 /* handle link change and other phy events */
5236 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5237 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5239 if (sblk->status & SD_STATUS_LINK_CHG) {
5240 sblk->status = SD_STATUS_UPDATED |
5241 (sblk->status & ~SD_STATUS_LINK_CHG);
5242 spin_lock(&tp->lock);
5243 if (tg3_flag(tp, USE_PHYLIB)) {
5244 tw32_f(MAC_STATUS,
5245 (MAC_STATUS_SYNC_CHANGED |
5246 MAC_STATUS_CFG_CHANGED |
5247 MAC_STATUS_MI_COMPLETION |
5248 MAC_STATUS_LNKSTATE_CHANGED));
5249 udelay(40);
5250 } else
5251 tg3_setup_phy(tp, 0);
5252 spin_unlock(&tp->lock);
5257 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5258 struct tg3_rx_prodring_set *dpr,
5259 struct tg3_rx_prodring_set *spr)
5261 u32 si, di, cpycnt, src_prod_idx;
5262 int i, err = 0;
5264 while (1) {
5265 src_prod_idx = spr->rx_std_prod_idx;
5267 /* Make sure updates to the rx_std_buffers[] entries and the
5268 * standard producer index are seen in the correct order.
5270 smp_rmb();
5272 if (spr->rx_std_cons_idx == src_prod_idx)
5273 break;
5275 if (spr->rx_std_cons_idx < src_prod_idx)
5276 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5277 else
5278 cpycnt = tp->rx_std_ring_mask + 1 -
5279 spr->rx_std_cons_idx;
5281 cpycnt = min(cpycnt,
5282 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5284 si = spr->rx_std_cons_idx;
5285 di = dpr->rx_std_prod_idx;
5287 for (i = di; i < di + cpycnt; i++) {
5288 if (dpr->rx_std_buffers[i].skb) {
5289 cpycnt = i - di;
5290 err = -ENOSPC;
5291 break;
5295 if (!cpycnt)
5296 break;
5298 /* Ensure that updates to the rx_std_buffers ring and the
5299 * shadowed hardware producer ring from tg3_recycle_skb() are
5300 * ordered correctly WRT the skb check above.
5302 smp_rmb();
5304 memcpy(&dpr->rx_std_buffers[di],
5305 &spr->rx_std_buffers[si],
5306 cpycnt * sizeof(struct ring_info));
5308 for (i = 0; i < cpycnt; i++, di++, si++) {
5309 struct tg3_rx_buffer_desc *sbd, *dbd;
5310 sbd = &spr->rx_std[si];
5311 dbd = &dpr->rx_std[di];
5312 dbd->addr_hi = sbd->addr_hi;
5313 dbd->addr_lo = sbd->addr_lo;
5316 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5317 tp->rx_std_ring_mask;
5318 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5319 tp->rx_std_ring_mask;
5322 while (1) {
5323 src_prod_idx = spr->rx_jmb_prod_idx;
5325 /* Make sure updates to the rx_jmb_buffers[] entries and
5326 * the jumbo producer index are seen in the correct order.
5328 smp_rmb();
5330 if (spr->rx_jmb_cons_idx == src_prod_idx)
5331 break;
5333 if (spr->rx_jmb_cons_idx < src_prod_idx)
5334 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5335 else
5336 cpycnt = tp->rx_jmb_ring_mask + 1 -
5337 spr->rx_jmb_cons_idx;
5339 cpycnt = min(cpycnt,
5340 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5342 si = spr->rx_jmb_cons_idx;
5343 di = dpr->rx_jmb_prod_idx;
5345 for (i = di; i < di + cpycnt; i++) {
5346 if (dpr->rx_jmb_buffers[i].skb) {
5347 cpycnt = i - di;
5348 err = -ENOSPC;
5349 break;
5353 if (!cpycnt)
5354 break;
5356 /* Ensure that updates to the rx_jmb_buffers ring and the
5357 * shadowed hardware producer ring from tg3_recycle_skb() are
5358 * ordered correctly WRT the skb check above.
5360 smp_rmb();
5362 memcpy(&dpr->rx_jmb_buffers[di],
5363 &spr->rx_jmb_buffers[si],
5364 cpycnt * sizeof(struct ring_info));
5366 for (i = 0; i < cpycnt; i++, di++, si++) {
5367 struct tg3_rx_buffer_desc *sbd, *dbd;
5368 sbd = &spr->rx_jmb[si].std;
5369 dbd = &dpr->rx_jmb[di].std;
5370 dbd->addr_hi = sbd->addr_hi;
5371 dbd->addr_lo = sbd->addr_lo;
5374 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5375 tp->rx_jmb_ring_mask;
5376 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5377 tp->rx_jmb_ring_mask;
5380 return err;
5383 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5385 struct tg3 *tp = tnapi->tp;
5387 /* run TX completion thread */
5388 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5389 tg3_tx(tnapi);
5390 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5391 return work_done;
5394 /* run RX thread, within the bounds set by NAPI.
5395 * All RX "locking" is done by ensuring outside
5396 * code synchronizes with tg3->napi.poll()
5398 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5399 work_done += tg3_rx(tnapi, budget - work_done);
5401 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5402 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5403 int i, err = 0;
5404 u32 std_prod_idx = dpr->rx_std_prod_idx;
5405 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5407 for (i = 1; i < tp->irq_cnt; i++)
5408 err |= tg3_rx_prodring_xfer(tp, dpr,
5409 &tp->napi[i].prodring);
5411 wmb();
5413 if (std_prod_idx != dpr->rx_std_prod_idx)
5414 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5415 dpr->rx_std_prod_idx);
5417 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5418 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5419 dpr->rx_jmb_prod_idx);
5421 mmiowb();
5423 if (err)
5424 tw32_f(HOSTCC_MODE, tp->coal_now);
5427 return work_done;
5430 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5432 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5433 struct tg3 *tp = tnapi->tp;
5434 int work_done = 0;
5435 struct tg3_hw_status *sblk = tnapi->hw_status;
5437 while (1) {
5438 work_done = tg3_poll_work(tnapi, work_done, budget);
5440 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5441 goto tx_recovery;
5443 if (unlikely(work_done >= budget))
5444 break;
5446 /* tp->last_tag is used in tg3_int_reenable() below
5447 * to tell the hw how much work has been processed,
5448 * so we must read it before checking for more work.
5450 tnapi->last_tag = sblk->status_tag;
5451 tnapi->last_irq_tag = tnapi->last_tag;
5452 rmb();
5454 /* check for RX/TX work to do */
5455 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5456 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5457 napi_complete(napi);
5458 /* Reenable interrupts. */
5459 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5460 mmiowb();
5461 break;
5465 return work_done;
5467 tx_recovery:
5468 /* work_done is guaranteed to be less than budget. */
5469 napi_complete(napi);
5470 schedule_work(&tp->reset_task);
5471 return work_done;
5474 static void tg3_process_error(struct tg3 *tp)
5476 u32 val;
5477 bool real_error = false;
5479 if (tg3_flag(tp, ERROR_PROCESSED))
5480 return;
5482 /* Check Flow Attention register */
5483 val = tr32(HOSTCC_FLOW_ATTN);
5484 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5485 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5486 real_error = true;
5489 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5490 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5491 real_error = true;
5494 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5495 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5496 real_error = true;
5499 if (!real_error)
5500 return;
5502 tg3_dump_state(tp);
5504 tg3_flag_set(tp, ERROR_PROCESSED);
5505 schedule_work(&tp->reset_task);
5508 static int tg3_poll(struct napi_struct *napi, int budget)
5510 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5511 struct tg3 *tp = tnapi->tp;
5512 int work_done = 0;
5513 struct tg3_hw_status *sblk = tnapi->hw_status;
5515 while (1) {
5516 if (sblk->status & SD_STATUS_ERROR)
5517 tg3_process_error(tp);
5519 tg3_poll_link(tp);
5521 work_done = tg3_poll_work(tnapi, work_done, budget);
5523 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5524 goto tx_recovery;
5526 if (unlikely(work_done >= budget))
5527 break;
5529 if (tg3_flag(tp, TAGGED_STATUS)) {
5530 /* tp->last_tag is used in tg3_int_reenable() below
5531 * to tell the hw how much work has been processed,
5532 * so we must read it before checking for more work.
5534 tnapi->last_tag = sblk->status_tag;
5535 tnapi->last_irq_tag = tnapi->last_tag;
5536 rmb();
5537 } else
5538 sblk->status &= ~SD_STATUS_UPDATED;
5540 if (likely(!tg3_has_work(tnapi))) {
5541 napi_complete(napi);
5542 tg3_int_reenable(tnapi);
5543 break;
5547 return work_done;
5549 tx_recovery:
5550 /* work_done is guaranteed to be less than budget. */
5551 napi_complete(napi);
5552 schedule_work(&tp->reset_task);
5553 return work_done;
5556 static void tg3_napi_disable(struct tg3 *tp)
5558 int i;
5560 for (i = tp->irq_cnt - 1; i >= 0; i--)
5561 napi_disable(&tp->napi[i].napi);
5564 static void tg3_napi_enable(struct tg3 *tp)
5566 int i;
5568 for (i = 0; i < tp->irq_cnt; i++)
5569 napi_enable(&tp->napi[i].napi);
5572 static void tg3_napi_init(struct tg3 *tp)
5574 int i;
5576 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5577 for (i = 1; i < tp->irq_cnt; i++)
5578 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5581 static void tg3_napi_fini(struct tg3 *tp)
5583 int i;
5585 for (i = 0; i < tp->irq_cnt; i++)
5586 netif_napi_del(&tp->napi[i].napi);
5589 static inline void tg3_netif_stop(struct tg3 *tp)
5591 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5592 tg3_napi_disable(tp);
5593 netif_tx_disable(tp->dev);
5596 static inline void tg3_netif_start(struct tg3 *tp)
5598 /* NOTE: unconditional netif_tx_wake_all_queues is only
5599 * appropriate so long as all callers are assured to
5600 * have free tx slots (such as after tg3_init_hw)
5602 netif_tx_wake_all_queues(tp->dev);
5604 tg3_napi_enable(tp);
5605 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5606 tg3_enable_ints(tp);
5609 static void tg3_irq_quiesce(struct tg3 *tp)
5611 int i;
5613 BUG_ON(tp->irq_sync);
5615 tp->irq_sync = 1;
5616 smp_mb();
5618 for (i = 0; i < tp->irq_cnt; i++)
5619 synchronize_irq(tp->napi[i].irq_vec);
5622 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5623 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5624 * with as well. Most of the time, this is not necessary except when
5625 * shutting down the device.
5627 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5629 spin_lock_bh(&tp->lock);
5630 if (irq_sync)
5631 tg3_irq_quiesce(tp);
5634 static inline void tg3_full_unlock(struct tg3 *tp)
5636 spin_unlock_bh(&tp->lock);
5639 /* One-shot MSI handler - Chip automatically disables interrupt
5640 * after sending MSI so driver doesn't have to do it.
5642 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5644 struct tg3_napi *tnapi = dev_id;
5645 struct tg3 *tp = tnapi->tp;
5647 prefetch(tnapi->hw_status);
5648 if (tnapi->rx_rcb)
5649 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5651 if (likely(!tg3_irq_sync(tp)))
5652 napi_schedule(&tnapi->napi);
5654 return IRQ_HANDLED;
5657 /* MSI ISR - No need to check for interrupt sharing and no need to
5658 * flush status block and interrupt mailbox. PCI ordering rules
5659 * guarantee that MSI will arrive after the status block.
5661 static irqreturn_t tg3_msi(int irq, void *dev_id)
5663 struct tg3_napi *tnapi = dev_id;
5664 struct tg3 *tp = tnapi->tp;
5666 prefetch(tnapi->hw_status);
5667 if (tnapi->rx_rcb)
5668 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5670 * Writing any value to intr-mbox-0 clears PCI INTA# and
5671 * chip-internal interrupt pending events.
5672 * Writing non-zero to intr-mbox-0 additional tells the
5673 * NIC to stop sending us irqs, engaging "in-intr-handler"
5674 * event coalescing.
5676 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5677 if (likely(!tg3_irq_sync(tp)))
5678 napi_schedule(&tnapi->napi);
5680 return IRQ_RETVAL(1);
5683 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5685 struct tg3_napi *tnapi = dev_id;
5686 struct tg3 *tp = tnapi->tp;
5687 struct tg3_hw_status *sblk = tnapi->hw_status;
5688 unsigned int handled = 1;
5690 /* In INTx mode, it is possible for the interrupt to arrive at
5691 * the CPU before the status block posted prior to the interrupt.
5692 * Reading the PCI State register will confirm whether the
5693 * interrupt is ours and will flush the status block.
5695 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5696 if (tg3_flag(tp, CHIP_RESETTING) ||
5697 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5698 handled = 0;
5699 goto out;
5704 * Writing any value to intr-mbox-0 clears PCI INTA# and
5705 * chip-internal interrupt pending events.
5706 * Writing non-zero to intr-mbox-0 additional tells the
5707 * NIC to stop sending us irqs, engaging "in-intr-handler"
5708 * event coalescing.
5710 * Flush the mailbox to de-assert the IRQ immediately to prevent
5711 * spurious interrupts. The flush impacts performance but
5712 * excessive spurious interrupts can be worse in some cases.
5714 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5715 if (tg3_irq_sync(tp))
5716 goto out;
5717 sblk->status &= ~SD_STATUS_UPDATED;
5718 if (likely(tg3_has_work(tnapi))) {
5719 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5720 napi_schedule(&tnapi->napi);
5721 } else {
5722 /* No work, shared interrupt perhaps? re-enable
5723 * interrupts, and flush that PCI write
5725 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5726 0x00000000);
5728 out:
5729 return IRQ_RETVAL(handled);
5732 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5734 struct tg3_napi *tnapi = dev_id;
5735 struct tg3 *tp = tnapi->tp;
5736 struct tg3_hw_status *sblk = tnapi->hw_status;
5737 unsigned int handled = 1;
5739 /* In INTx mode, it is possible for the interrupt to arrive at
5740 * the CPU before the status block posted prior to the interrupt.
5741 * Reading the PCI State register will confirm whether the
5742 * interrupt is ours and will flush the status block.
5744 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5745 if (tg3_flag(tp, CHIP_RESETTING) ||
5746 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5747 handled = 0;
5748 goto out;
5753 * writing any value to intr-mbox-0 clears PCI INTA# and
5754 * chip-internal interrupt pending events.
5755 * writing non-zero to intr-mbox-0 additional tells the
5756 * NIC to stop sending us irqs, engaging "in-intr-handler"
5757 * event coalescing.
5759 * Flush the mailbox to de-assert the IRQ immediately to prevent
5760 * spurious interrupts. The flush impacts performance but
5761 * excessive spurious interrupts can be worse in some cases.
5763 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5766 * In a shared interrupt configuration, sometimes other devices'
5767 * interrupts will scream. We record the current status tag here
5768 * so that the above check can report that the screaming interrupts
5769 * are unhandled. Eventually they will be silenced.
5771 tnapi->last_irq_tag = sblk->status_tag;
5773 if (tg3_irq_sync(tp))
5774 goto out;
5776 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5778 napi_schedule(&tnapi->napi);
5780 out:
5781 return IRQ_RETVAL(handled);
5784 /* ISR for interrupt test */
5785 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5787 struct tg3_napi *tnapi = dev_id;
5788 struct tg3 *tp = tnapi->tp;
5789 struct tg3_hw_status *sblk = tnapi->hw_status;
5791 if ((sblk->status & SD_STATUS_UPDATED) ||
5792 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5793 tg3_disable_ints(tp);
5794 return IRQ_RETVAL(1);
5796 return IRQ_RETVAL(0);
5799 static int tg3_init_hw(struct tg3 *, int);
5800 static int tg3_halt(struct tg3 *, int, int);
5802 /* Restart hardware after configuration changes, self-test, etc.
5803 * Invoked with tp->lock held.
5805 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5806 __releases(tp->lock)
5807 __acquires(tp->lock)
5809 int err;
5811 err = tg3_init_hw(tp, reset_phy);
5812 if (err) {
5813 netdev_err(tp->dev,
5814 "Failed to re-initialize device, aborting\n");
5815 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5816 tg3_full_unlock(tp);
5817 del_timer_sync(&tp->timer);
5818 tp->irq_sync = 0;
5819 tg3_napi_enable(tp);
5820 dev_close(tp->dev);
5821 tg3_full_lock(tp, 0);
5823 return err;
5826 #ifdef CONFIG_NET_POLL_CONTROLLER
5827 static void tg3_poll_controller(struct net_device *dev)
5829 int i;
5830 struct tg3 *tp = netdev_priv(dev);
5832 for (i = 0; i < tp->irq_cnt; i++)
5833 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5835 #endif
5837 static void tg3_reset_task(struct work_struct *work)
5839 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5840 int err;
5841 unsigned int restart_timer;
5843 tg3_full_lock(tp, 0);
5845 if (!netif_running(tp->dev)) {
5846 tg3_full_unlock(tp);
5847 return;
5850 tg3_full_unlock(tp);
5852 tg3_phy_stop(tp);
5854 tg3_netif_stop(tp);
5856 tg3_full_lock(tp, 1);
5858 restart_timer = tg3_flag(tp, RESTART_TIMER);
5859 tg3_flag_clear(tp, RESTART_TIMER);
5861 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5862 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5863 tp->write32_rx_mbox = tg3_write_flush_reg32;
5864 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5865 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5868 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5869 err = tg3_init_hw(tp, 1);
5870 if (err)
5871 goto out;
5873 tg3_netif_start(tp);
5875 if (restart_timer)
5876 mod_timer(&tp->timer, jiffies + 1);
5878 out:
5879 tg3_full_unlock(tp);
5881 if (!err)
5882 tg3_phy_start(tp);
5885 static void tg3_tx_timeout(struct net_device *dev)
5887 struct tg3 *tp = netdev_priv(dev);
5889 if (netif_msg_tx_err(tp)) {
5890 netdev_err(dev, "transmit timed out, resetting\n");
5891 tg3_dump_state(tp);
5894 schedule_work(&tp->reset_task);
5897 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5898 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5900 u32 base = (u32) mapping & 0xffffffff;
5902 return (base > 0xffffdcc0) && (base + len + 8 < base);
5905 /* Test for DMA addresses > 40-bit */
5906 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5907 int len)
5909 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5910 if (tg3_flag(tp, 40BIT_DMA_BUG))
5911 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5912 return 0;
5913 #else
5914 return 0;
5915 #endif
5918 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
5919 dma_addr_t mapping, u32 len, u32 flags,
5920 u32 mss, u32 vlan)
5922 txbd->addr_hi = ((u64) mapping >> 32);
5923 txbd->addr_lo = ((u64) mapping & 0xffffffff);
5924 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5925 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5928 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
5929 dma_addr_t map, u32 len, u32 flags,
5930 u32 mss, u32 vlan)
5932 struct tg3 *tp = tnapi->tp;
5933 bool hwbug = false;
5935 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
5936 hwbug = 1;
5938 if (tg3_4g_overflow_test(map, len))
5939 hwbug = 1;
5941 if (tg3_40bit_overflow_test(tp, map, len))
5942 hwbug = 1;
5944 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
5945 u32 tmp_flag = flags & ~TXD_FLAG_END;
5946 while (len > TG3_TX_BD_DMA_MAX) {
5947 u32 frag_len = TG3_TX_BD_DMA_MAX;
5948 len -= TG3_TX_BD_DMA_MAX;
5950 if (len) {
5951 tnapi->tx_buffers[*entry].fragmented = true;
5952 /* Avoid the 8byte DMA problem */
5953 if (len <= 8) {
5954 len += TG3_TX_BD_DMA_MAX / 2;
5955 frag_len = TG3_TX_BD_DMA_MAX / 2;
5957 } else
5958 tmp_flag = flags;
5960 if (*budget) {
5961 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5962 frag_len, tmp_flag, mss, vlan);
5963 (*budget)--;
5964 *entry = NEXT_TX(*entry);
5965 } else {
5966 hwbug = 1;
5967 break;
5970 map += frag_len;
5973 if (len) {
5974 if (*budget) {
5975 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5976 len, flags, mss, vlan);
5977 (*budget)--;
5978 *entry = NEXT_TX(*entry);
5979 } else {
5980 hwbug = 1;
5983 } else {
5984 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
5985 len, flags, mss, vlan);
5986 *entry = NEXT_TX(*entry);
5989 return hwbug;
5992 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
5994 int i;
5995 struct sk_buff *skb;
5996 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
5998 skb = txb->skb;
5999 txb->skb = NULL;
6001 pci_unmap_single(tnapi->tp->pdev,
6002 dma_unmap_addr(txb, mapping),
6003 skb_headlen(skb),
6004 PCI_DMA_TODEVICE);
6006 while (txb->fragmented) {
6007 txb->fragmented = false;
6008 entry = NEXT_TX(entry);
6009 txb = &tnapi->tx_buffers[entry];
6012 for (i = 0; i < last; i++) {
6013 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6015 entry = NEXT_TX(entry);
6016 txb = &tnapi->tx_buffers[entry];
6018 pci_unmap_page(tnapi->tp->pdev,
6019 dma_unmap_addr(txb, mapping),
6020 frag->size, PCI_DMA_TODEVICE);
6022 while (txb->fragmented) {
6023 txb->fragmented = false;
6024 entry = NEXT_TX(entry);
6025 txb = &tnapi->tx_buffers[entry];
6030 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6031 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6032 struct sk_buff **pskb,
6033 u32 *entry, u32 *budget,
6034 u32 base_flags, u32 mss, u32 vlan)
6036 struct tg3 *tp = tnapi->tp;
6037 struct sk_buff *new_skb, *skb = *pskb;
6038 dma_addr_t new_addr = 0;
6039 int ret = 0;
6041 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6042 new_skb = skb_copy(skb, GFP_ATOMIC);
6043 else {
6044 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6046 new_skb = skb_copy_expand(skb,
6047 skb_headroom(skb) + more_headroom,
6048 skb_tailroom(skb), GFP_ATOMIC);
6051 if (!new_skb) {
6052 ret = -1;
6053 } else {
6054 /* New SKB is guaranteed to be linear. */
6055 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6056 PCI_DMA_TODEVICE);
6057 /* Make sure the mapping succeeded */
6058 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6059 dev_kfree_skb(new_skb);
6060 ret = -1;
6061 } else {
6062 base_flags |= TXD_FLAG_END;
6064 tnapi->tx_buffers[*entry].skb = new_skb;
6065 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6066 mapping, new_addr);
6068 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6069 new_skb->len, base_flags,
6070 mss, vlan)) {
6071 tg3_tx_skb_unmap(tnapi, *entry, 0);
6072 dev_kfree_skb(new_skb);
6073 ret = -1;
6078 dev_kfree_skb(skb);
6079 *pskb = new_skb;
6080 return ret;
6083 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6085 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6086 * TSO header is greater than 80 bytes.
6088 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6090 struct sk_buff *segs, *nskb;
6091 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6093 /* Estimate the number of fragments in the worst case */
6094 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6095 netif_stop_queue(tp->dev);
6097 /* netif_tx_stop_queue() must be done before checking
6098 * checking tx index in tg3_tx_avail() below, because in
6099 * tg3_tx(), we update tx index before checking for
6100 * netif_tx_queue_stopped().
6102 smp_mb();
6103 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6104 return NETDEV_TX_BUSY;
6106 netif_wake_queue(tp->dev);
6109 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6110 if (IS_ERR(segs))
6111 goto tg3_tso_bug_end;
6113 do {
6114 nskb = segs;
6115 segs = segs->next;
6116 nskb->next = NULL;
6117 tg3_start_xmit(nskb, tp->dev);
6118 } while (segs);
6120 tg3_tso_bug_end:
6121 dev_kfree_skb(skb);
6123 return NETDEV_TX_OK;
6126 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6127 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6129 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6131 struct tg3 *tp = netdev_priv(dev);
6132 u32 len, entry, base_flags, mss, vlan = 0;
6133 u32 budget;
6134 int i = -1, would_hit_hwbug;
6135 dma_addr_t mapping;
6136 struct tg3_napi *tnapi;
6137 struct netdev_queue *txq;
6138 unsigned int last;
6140 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6141 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6142 if (tg3_flag(tp, ENABLE_TSS))
6143 tnapi++;
6145 budget = tg3_tx_avail(tnapi);
6147 /* We are running in BH disabled context with netif_tx_lock
6148 * and TX reclaim runs via tp->napi.poll inside of a software
6149 * interrupt. Furthermore, IRQ processing runs lockless so we have
6150 * no IRQ context deadlocks to worry about either. Rejoice!
6152 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6153 if (!netif_tx_queue_stopped(txq)) {
6154 netif_tx_stop_queue(txq);
6156 /* This is a hard error, log it. */
6157 netdev_err(dev,
6158 "BUG! Tx Ring full when queue awake!\n");
6160 return NETDEV_TX_BUSY;
6163 entry = tnapi->tx_prod;
6164 base_flags = 0;
6165 if (skb->ip_summed == CHECKSUM_PARTIAL)
6166 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6168 mss = skb_shinfo(skb)->gso_size;
6169 if (mss) {
6170 struct iphdr *iph;
6171 u32 tcp_opt_len, hdr_len;
6173 if (skb_header_cloned(skb) &&
6174 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6175 dev_kfree_skb(skb);
6176 goto out_unlock;
6179 iph = ip_hdr(skb);
6180 tcp_opt_len = tcp_optlen(skb);
6182 if (skb_is_gso_v6(skb)) {
6183 hdr_len = skb_headlen(skb) - ETH_HLEN;
6184 } else {
6185 u32 ip_tcp_len;
6187 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6188 hdr_len = ip_tcp_len + tcp_opt_len;
6190 iph->check = 0;
6191 iph->tot_len = htons(mss + hdr_len);
6194 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6195 tg3_flag(tp, TSO_BUG))
6196 return tg3_tso_bug(tp, skb);
6198 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6199 TXD_FLAG_CPU_POST_DMA);
6201 if (tg3_flag(tp, HW_TSO_1) ||
6202 tg3_flag(tp, HW_TSO_2) ||
6203 tg3_flag(tp, HW_TSO_3)) {
6204 tcp_hdr(skb)->check = 0;
6205 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6206 } else
6207 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6208 iph->daddr, 0,
6209 IPPROTO_TCP,
6212 if (tg3_flag(tp, HW_TSO_3)) {
6213 mss |= (hdr_len & 0xc) << 12;
6214 if (hdr_len & 0x10)
6215 base_flags |= 0x00000010;
6216 base_flags |= (hdr_len & 0x3e0) << 5;
6217 } else if (tg3_flag(tp, HW_TSO_2))
6218 mss |= hdr_len << 9;
6219 else if (tg3_flag(tp, HW_TSO_1) ||
6220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6221 if (tcp_opt_len || iph->ihl > 5) {
6222 int tsflags;
6224 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6225 mss |= (tsflags << 11);
6227 } else {
6228 if (tcp_opt_len || iph->ihl > 5) {
6229 int tsflags;
6231 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6232 base_flags |= tsflags << 12;
6237 if (vlan_tx_tag_present(skb)) {
6238 base_flags |= TXD_FLAG_VLAN;
6239 vlan = vlan_tx_tag_get(skb);
6242 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6243 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6244 base_flags |= TXD_FLAG_JMB_PKT;
6246 len = skb_headlen(skb);
6248 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6249 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6250 dev_kfree_skb(skb);
6251 goto out_unlock;
6254 tnapi->tx_buffers[entry].skb = skb;
6255 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6257 would_hit_hwbug = 0;
6259 if (tg3_flag(tp, 5701_DMA_BUG))
6260 would_hit_hwbug = 1;
6262 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6263 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6264 mss, vlan))
6265 would_hit_hwbug = 1;
6267 /* Now loop through additional data fragments, and queue them. */
6268 if (skb_shinfo(skb)->nr_frags > 0) {
6269 u32 tmp_mss = mss;
6271 if (!tg3_flag(tp, HW_TSO_1) &&
6272 !tg3_flag(tp, HW_TSO_2) &&
6273 !tg3_flag(tp, HW_TSO_3))
6274 tmp_mss = 0;
6276 last = skb_shinfo(skb)->nr_frags - 1;
6277 for (i = 0; i <= last; i++) {
6278 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6280 len = frag->size;
6281 mapping = pci_map_page(tp->pdev,
6282 frag->page,
6283 frag->page_offset,
6284 len, PCI_DMA_TODEVICE);
6286 tnapi->tx_buffers[entry].skb = NULL;
6287 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6288 mapping);
6289 if (pci_dma_mapping_error(tp->pdev, mapping))
6290 goto dma_error;
6292 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6293 len, base_flags |
6294 ((i == last) ? TXD_FLAG_END : 0),
6295 tmp_mss, vlan))
6296 would_hit_hwbug = 1;
6300 if (would_hit_hwbug) {
6301 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6303 /* If the workaround fails due to memory/mapping
6304 * failure, silently drop this packet.
6306 entry = tnapi->tx_prod;
6307 budget = tg3_tx_avail(tnapi);
6308 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6309 base_flags, mss, vlan))
6310 goto out_unlock;
6313 skb_tx_timestamp(skb);
6315 /* Packets are ready, update Tx producer idx local and on card. */
6316 tw32_tx_mbox(tnapi->prodmbox, entry);
6318 tnapi->tx_prod = entry;
6319 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6320 netif_tx_stop_queue(txq);
6322 /* netif_tx_stop_queue() must be done before checking
6323 * checking tx index in tg3_tx_avail() below, because in
6324 * tg3_tx(), we update tx index before checking for
6325 * netif_tx_queue_stopped().
6327 smp_mb();
6328 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6329 netif_tx_wake_queue(txq);
6332 out_unlock:
6333 mmiowb();
6335 return NETDEV_TX_OK;
6337 dma_error:
6338 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6339 dev_kfree_skb(skb);
6340 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6341 return NETDEV_TX_OK;
6344 static void tg3_set_loopback(struct net_device *dev, u32 features)
6346 struct tg3 *tp = netdev_priv(dev);
6348 if (features & NETIF_F_LOOPBACK) {
6349 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6350 return;
6353 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6354 * loopback mode if Half-Duplex mode was negotiated earlier.
6356 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6358 /* Enable internal MAC loopback mode */
6359 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6360 spin_lock_bh(&tp->lock);
6361 tw32(MAC_MODE, tp->mac_mode);
6362 netif_carrier_on(tp->dev);
6363 spin_unlock_bh(&tp->lock);
6364 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6365 } else {
6366 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6367 return;
6369 /* Disable internal MAC loopback mode */
6370 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6371 spin_lock_bh(&tp->lock);
6372 tw32(MAC_MODE, tp->mac_mode);
6373 /* Force link status check */
6374 tg3_setup_phy(tp, 1);
6375 spin_unlock_bh(&tp->lock);
6376 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6380 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6382 struct tg3 *tp = netdev_priv(dev);
6384 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6385 features &= ~NETIF_F_ALL_TSO;
6387 return features;
6390 static int tg3_set_features(struct net_device *dev, u32 features)
6392 u32 changed = dev->features ^ features;
6394 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6395 tg3_set_loopback(dev, features);
6397 return 0;
6400 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6401 int new_mtu)
6403 dev->mtu = new_mtu;
6405 if (new_mtu > ETH_DATA_LEN) {
6406 if (tg3_flag(tp, 5780_CLASS)) {
6407 netdev_update_features(dev);
6408 tg3_flag_clear(tp, TSO_CAPABLE);
6409 } else {
6410 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6412 } else {
6413 if (tg3_flag(tp, 5780_CLASS)) {
6414 tg3_flag_set(tp, TSO_CAPABLE);
6415 netdev_update_features(dev);
6417 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6421 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6423 struct tg3 *tp = netdev_priv(dev);
6424 int err;
6426 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6427 return -EINVAL;
6429 if (!netif_running(dev)) {
6430 /* We'll just catch it later when the
6431 * device is up'd.
6433 tg3_set_mtu(dev, tp, new_mtu);
6434 return 0;
6437 tg3_phy_stop(tp);
6439 tg3_netif_stop(tp);
6441 tg3_full_lock(tp, 1);
6443 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6445 tg3_set_mtu(dev, tp, new_mtu);
6447 err = tg3_restart_hw(tp, 0);
6449 if (!err)
6450 tg3_netif_start(tp);
6452 tg3_full_unlock(tp);
6454 if (!err)
6455 tg3_phy_start(tp);
6457 return err;
6460 static void tg3_rx_prodring_free(struct tg3 *tp,
6461 struct tg3_rx_prodring_set *tpr)
6463 int i;
6465 if (tpr != &tp->napi[0].prodring) {
6466 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6467 i = (i + 1) & tp->rx_std_ring_mask)
6468 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6469 tp->rx_pkt_map_sz);
6471 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6472 for (i = tpr->rx_jmb_cons_idx;
6473 i != tpr->rx_jmb_prod_idx;
6474 i = (i + 1) & tp->rx_jmb_ring_mask) {
6475 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6476 TG3_RX_JMB_MAP_SZ);
6480 return;
6483 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6484 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6485 tp->rx_pkt_map_sz);
6487 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6488 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6489 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6490 TG3_RX_JMB_MAP_SZ);
6494 /* Initialize rx rings for packet processing.
6496 * The chip has been shut down and the driver detached from
6497 * the networking, so no interrupts or new tx packets will
6498 * end up in the driver. tp->{tx,}lock are held and thus
6499 * we may not sleep.
6501 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6502 struct tg3_rx_prodring_set *tpr)
6504 u32 i, rx_pkt_dma_sz;
6506 tpr->rx_std_cons_idx = 0;
6507 tpr->rx_std_prod_idx = 0;
6508 tpr->rx_jmb_cons_idx = 0;
6509 tpr->rx_jmb_prod_idx = 0;
6511 if (tpr != &tp->napi[0].prodring) {
6512 memset(&tpr->rx_std_buffers[0], 0,
6513 TG3_RX_STD_BUFF_RING_SIZE(tp));
6514 if (tpr->rx_jmb_buffers)
6515 memset(&tpr->rx_jmb_buffers[0], 0,
6516 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6517 goto done;
6520 /* Zero out all descriptors. */
6521 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6523 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6524 if (tg3_flag(tp, 5780_CLASS) &&
6525 tp->dev->mtu > ETH_DATA_LEN)
6526 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6527 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6529 /* Initialize invariants of the rings, we only set this
6530 * stuff once. This works because the card does not
6531 * write into the rx buffer posting rings.
6533 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6534 struct tg3_rx_buffer_desc *rxd;
6536 rxd = &tpr->rx_std[i];
6537 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6538 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6539 rxd->opaque = (RXD_OPAQUE_RING_STD |
6540 (i << RXD_OPAQUE_INDEX_SHIFT));
6543 /* Now allocate fresh SKBs for each rx ring. */
6544 for (i = 0; i < tp->rx_pending; i++) {
6545 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6546 netdev_warn(tp->dev,
6547 "Using a smaller RX standard ring. Only "
6548 "%d out of %d buffers were allocated "
6549 "successfully\n", i, tp->rx_pending);
6550 if (i == 0)
6551 goto initfail;
6552 tp->rx_pending = i;
6553 break;
6557 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6558 goto done;
6560 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6562 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6563 goto done;
6565 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6566 struct tg3_rx_buffer_desc *rxd;
6568 rxd = &tpr->rx_jmb[i].std;
6569 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6570 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6571 RXD_FLAG_JUMBO;
6572 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6573 (i << RXD_OPAQUE_INDEX_SHIFT));
6576 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6577 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6578 netdev_warn(tp->dev,
6579 "Using a smaller RX jumbo ring. Only %d "
6580 "out of %d buffers were allocated "
6581 "successfully\n", i, tp->rx_jumbo_pending);
6582 if (i == 0)
6583 goto initfail;
6584 tp->rx_jumbo_pending = i;
6585 break;
6589 done:
6590 return 0;
6592 initfail:
6593 tg3_rx_prodring_free(tp, tpr);
6594 return -ENOMEM;
6597 static void tg3_rx_prodring_fini(struct tg3 *tp,
6598 struct tg3_rx_prodring_set *tpr)
6600 kfree(tpr->rx_std_buffers);
6601 tpr->rx_std_buffers = NULL;
6602 kfree(tpr->rx_jmb_buffers);
6603 tpr->rx_jmb_buffers = NULL;
6604 if (tpr->rx_std) {
6605 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6606 tpr->rx_std, tpr->rx_std_mapping);
6607 tpr->rx_std = NULL;
6609 if (tpr->rx_jmb) {
6610 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6611 tpr->rx_jmb, tpr->rx_jmb_mapping);
6612 tpr->rx_jmb = NULL;
6616 static int tg3_rx_prodring_init(struct tg3 *tp,
6617 struct tg3_rx_prodring_set *tpr)
6619 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6620 GFP_KERNEL);
6621 if (!tpr->rx_std_buffers)
6622 return -ENOMEM;
6624 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6625 TG3_RX_STD_RING_BYTES(tp),
6626 &tpr->rx_std_mapping,
6627 GFP_KERNEL);
6628 if (!tpr->rx_std)
6629 goto err_out;
6631 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6632 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6633 GFP_KERNEL);
6634 if (!tpr->rx_jmb_buffers)
6635 goto err_out;
6637 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6638 TG3_RX_JMB_RING_BYTES(tp),
6639 &tpr->rx_jmb_mapping,
6640 GFP_KERNEL);
6641 if (!tpr->rx_jmb)
6642 goto err_out;
6645 return 0;
6647 err_out:
6648 tg3_rx_prodring_fini(tp, tpr);
6649 return -ENOMEM;
6652 /* Free up pending packets in all rx/tx rings.
6654 * The chip has been shut down and the driver detached from
6655 * the networking, so no interrupts or new tx packets will
6656 * end up in the driver. tp->{tx,}lock is not held and we are not
6657 * in an interrupt context and thus may sleep.
6659 static void tg3_free_rings(struct tg3 *tp)
6661 int i, j;
6663 for (j = 0; j < tp->irq_cnt; j++) {
6664 struct tg3_napi *tnapi = &tp->napi[j];
6666 tg3_rx_prodring_free(tp, &tnapi->prodring);
6668 if (!tnapi->tx_buffers)
6669 continue;
6671 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
6672 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
6674 if (!skb)
6675 continue;
6677 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
6679 dev_kfree_skb_any(skb);
6684 /* Initialize tx/rx rings for packet processing.
6686 * The chip has been shut down and the driver detached from
6687 * the networking, so no interrupts or new tx packets will
6688 * end up in the driver. tp->{tx,}lock are held and thus
6689 * we may not sleep.
6691 static int tg3_init_rings(struct tg3 *tp)
6693 int i;
6695 /* Free up all the SKBs. */
6696 tg3_free_rings(tp);
6698 for (i = 0; i < tp->irq_cnt; i++) {
6699 struct tg3_napi *tnapi = &tp->napi[i];
6701 tnapi->last_tag = 0;
6702 tnapi->last_irq_tag = 0;
6703 tnapi->hw_status->status = 0;
6704 tnapi->hw_status->status_tag = 0;
6705 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6707 tnapi->tx_prod = 0;
6708 tnapi->tx_cons = 0;
6709 if (tnapi->tx_ring)
6710 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6712 tnapi->rx_rcb_ptr = 0;
6713 if (tnapi->rx_rcb)
6714 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6716 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6717 tg3_free_rings(tp);
6718 return -ENOMEM;
6722 return 0;
6726 * Must not be invoked with interrupt sources disabled and
6727 * the hardware shutdown down.
6729 static void tg3_free_consistent(struct tg3 *tp)
6731 int i;
6733 for (i = 0; i < tp->irq_cnt; i++) {
6734 struct tg3_napi *tnapi = &tp->napi[i];
6736 if (tnapi->tx_ring) {
6737 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6738 tnapi->tx_ring, tnapi->tx_desc_mapping);
6739 tnapi->tx_ring = NULL;
6742 kfree(tnapi->tx_buffers);
6743 tnapi->tx_buffers = NULL;
6745 if (tnapi->rx_rcb) {
6746 dma_free_coherent(&tp->pdev->dev,
6747 TG3_RX_RCB_RING_BYTES(tp),
6748 tnapi->rx_rcb,
6749 tnapi->rx_rcb_mapping);
6750 tnapi->rx_rcb = NULL;
6753 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6755 if (tnapi->hw_status) {
6756 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6757 tnapi->hw_status,
6758 tnapi->status_mapping);
6759 tnapi->hw_status = NULL;
6763 if (tp->hw_stats) {
6764 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6765 tp->hw_stats, tp->stats_mapping);
6766 tp->hw_stats = NULL;
6771 * Must not be invoked with interrupt sources disabled and
6772 * the hardware shutdown down. Can sleep.
6774 static int tg3_alloc_consistent(struct tg3 *tp)
6776 int i;
6778 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6779 sizeof(struct tg3_hw_stats),
6780 &tp->stats_mapping,
6781 GFP_KERNEL);
6782 if (!tp->hw_stats)
6783 goto err_out;
6785 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6787 for (i = 0; i < tp->irq_cnt; i++) {
6788 struct tg3_napi *tnapi = &tp->napi[i];
6789 struct tg3_hw_status *sblk;
6791 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6792 TG3_HW_STATUS_SIZE,
6793 &tnapi->status_mapping,
6794 GFP_KERNEL);
6795 if (!tnapi->hw_status)
6796 goto err_out;
6798 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6799 sblk = tnapi->hw_status;
6801 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6802 goto err_out;
6804 /* If multivector TSS is enabled, vector 0 does not handle
6805 * tx interrupts. Don't allocate any resources for it.
6807 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6808 (i && tg3_flag(tp, ENABLE_TSS))) {
6809 tnapi->tx_buffers = kzalloc(
6810 sizeof(struct tg3_tx_ring_info) *
6811 TG3_TX_RING_SIZE, GFP_KERNEL);
6812 if (!tnapi->tx_buffers)
6813 goto err_out;
6815 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6816 TG3_TX_RING_BYTES,
6817 &tnapi->tx_desc_mapping,
6818 GFP_KERNEL);
6819 if (!tnapi->tx_ring)
6820 goto err_out;
6824 * When RSS is enabled, the status block format changes
6825 * slightly. The "rx_jumbo_consumer", "reserved",
6826 * and "rx_mini_consumer" members get mapped to the
6827 * other three rx return ring producer indexes.
6829 switch (i) {
6830 default:
6831 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6832 break;
6833 case 2:
6834 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6835 break;
6836 case 3:
6837 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6838 break;
6839 case 4:
6840 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6841 break;
6845 * If multivector RSS is enabled, vector 0 does not handle
6846 * rx or tx interrupts. Don't allocate any resources for it.
6848 if (!i && tg3_flag(tp, ENABLE_RSS))
6849 continue;
6851 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6852 TG3_RX_RCB_RING_BYTES(tp),
6853 &tnapi->rx_rcb_mapping,
6854 GFP_KERNEL);
6855 if (!tnapi->rx_rcb)
6856 goto err_out;
6858 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6861 return 0;
6863 err_out:
6864 tg3_free_consistent(tp);
6865 return -ENOMEM;
6868 #define MAX_WAIT_CNT 1000
6870 /* To stop a block, clear the enable bit and poll till it
6871 * clears. tp->lock is held.
6873 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6875 unsigned int i;
6876 u32 val;
6878 if (tg3_flag(tp, 5705_PLUS)) {
6879 switch (ofs) {
6880 case RCVLSC_MODE:
6881 case DMAC_MODE:
6882 case MBFREE_MODE:
6883 case BUFMGR_MODE:
6884 case MEMARB_MODE:
6885 /* We can't enable/disable these bits of the
6886 * 5705/5750, just say success.
6888 return 0;
6890 default:
6891 break;
6895 val = tr32(ofs);
6896 val &= ~enable_bit;
6897 tw32_f(ofs, val);
6899 for (i = 0; i < MAX_WAIT_CNT; i++) {
6900 udelay(100);
6901 val = tr32(ofs);
6902 if ((val & enable_bit) == 0)
6903 break;
6906 if (i == MAX_WAIT_CNT && !silent) {
6907 dev_err(&tp->pdev->dev,
6908 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6909 ofs, enable_bit);
6910 return -ENODEV;
6913 return 0;
6916 /* tp->lock is held. */
6917 static int tg3_abort_hw(struct tg3 *tp, int silent)
6919 int i, err;
6921 tg3_disable_ints(tp);
6923 tp->rx_mode &= ~RX_MODE_ENABLE;
6924 tw32_f(MAC_RX_MODE, tp->rx_mode);
6925 udelay(10);
6927 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6928 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6929 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6930 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6931 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6932 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6934 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6935 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6936 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6937 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6938 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6939 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6940 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6942 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6943 tw32_f(MAC_MODE, tp->mac_mode);
6944 udelay(40);
6946 tp->tx_mode &= ~TX_MODE_ENABLE;
6947 tw32_f(MAC_TX_MODE, tp->tx_mode);
6949 for (i = 0; i < MAX_WAIT_CNT; i++) {
6950 udelay(100);
6951 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6952 break;
6954 if (i >= MAX_WAIT_CNT) {
6955 dev_err(&tp->pdev->dev,
6956 "%s timed out, TX_MODE_ENABLE will not clear "
6957 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6958 err |= -ENODEV;
6961 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6962 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6963 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6965 tw32(FTQ_RESET, 0xffffffff);
6966 tw32(FTQ_RESET, 0x00000000);
6968 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6969 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6971 for (i = 0; i < tp->irq_cnt; i++) {
6972 struct tg3_napi *tnapi = &tp->napi[i];
6973 if (tnapi->hw_status)
6974 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6976 if (tp->hw_stats)
6977 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6979 return err;
6982 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6984 int i;
6985 u32 apedata;
6987 /* NCSI does not support APE events */
6988 if (tg3_flag(tp, APE_HAS_NCSI))
6989 return;
6991 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6992 if (apedata != APE_SEG_SIG_MAGIC)
6993 return;
6995 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6996 if (!(apedata & APE_FW_STATUS_READY))
6997 return;
6999 /* Wait for up to 1 millisecond for APE to service previous event. */
7000 for (i = 0; i < 10; i++) {
7001 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
7002 return;
7004 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
7006 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7007 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
7008 event | APE_EVENT_STATUS_EVENT_PENDING);
7010 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
7012 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7013 break;
7015 udelay(100);
7018 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
7019 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
7022 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
7024 u32 event;
7025 u32 apedata;
7027 if (!tg3_flag(tp, ENABLE_APE))
7028 return;
7030 switch (kind) {
7031 case RESET_KIND_INIT:
7032 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
7033 APE_HOST_SEG_SIG_MAGIC);
7034 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
7035 APE_HOST_SEG_LEN_MAGIC);
7036 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
7037 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
7038 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
7039 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
7040 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
7041 APE_HOST_BEHAV_NO_PHYLOCK);
7042 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
7043 TG3_APE_HOST_DRVR_STATE_START);
7045 event = APE_EVENT_STATUS_STATE_START;
7046 break;
7047 case RESET_KIND_SHUTDOWN:
7048 /* With the interface we are currently using,
7049 * APE does not track driver state. Wiping
7050 * out the HOST SEGMENT SIGNATURE forces
7051 * the APE to assume OS absent status.
7053 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7055 if (device_may_wakeup(&tp->pdev->dev) &&
7056 tg3_flag(tp, WOL_ENABLE)) {
7057 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7058 TG3_APE_HOST_WOL_SPEED_AUTO);
7059 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7060 } else
7061 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7063 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7065 event = APE_EVENT_STATUS_STATE_UNLOAD;
7066 break;
7067 case RESET_KIND_SUSPEND:
7068 event = APE_EVENT_STATUS_STATE_SUSPEND;
7069 break;
7070 default:
7071 return;
7074 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7076 tg3_ape_send_event(tp, event);
7079 /* tp->lock is held. */
7080 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7082 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7083 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7085 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7086 switch (kind) {
7087 case RESET_KIND_INIT:
7088 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7089 DRV_STATE_START);
7090 break;
7092 case RESET_KIND_SHUTDOWN:
7093 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7094 DRV_STATE_UNLOAD);
7095 break;
7097 case RESET_KIND_SUSPEND:
7098 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7099 DRV_STATE_SUSPEND);
7100 break;
7102 default:
7103 break;
7107 if (kind == RESET_KIND_INIT ||
7108 kind == RESET_KIND_SUSPEND)
7109 tg3_ape_driver_state_change(tp, kind);
7112 /* tp->lock is held. */
7113 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7115 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7116 switch (kind) {
7117 case RESET_KIND_INIT:
7118 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7119 DRV_STATE_START_DONE);
7120 break;
7122 case RESET_KIND_SHUTDOWN:
7123 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7124 DRV_STATE_UNLOAD_DONE);
7125 break;
7127 default:
7128 break;
7132 if (kind == RESET_KIND_SHUTDOWN)
7133 tg3_ape_driver_state_change(tp, kind);
7136 /* tp->lock is held. */
7137 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7139 if (tg3_flag(tp, ENABLE_ASF)) {
7140 switch (kind) {
7141 case RESET_KIND_INIT:
7142 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7143 DRV_STATE_START);
7144 break;
7146 case RESET_KIND_SHUTDOWN:
7147 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7148 DRV_STATE_UNLOAD);
7149 break;
7151 case RESET_KIND_SUSPEND:
7152 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7153 DRV_STATE_SUSPEND);
7154 break;
7156 default:
7157 break;
7162 static int tg3_poll_fw(struct tg3 *tp)
7164 int i;
7165 u32 val;
7167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7168 /* Wait up to 20ms for init done. */
7169 for (i = 0; i < 200; i++) {
7170 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7171 return 0;
7172 udelay(100);
7174 return -ENODEV;
7177 /* Wait for firmware initialization to complete. */
7178 for (i = 0; i < 100000; i++) {
7179 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7180 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7181 break;
7182 udelay(10);
7185 /* Chip might not be fitted with firmware. Some Sun onboard
7186 * parts are configured like that. So don't signal the timeout
7187 * of the above loop as an error, but do report the lack of
7188 * running firmware once.
7190 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7191 tg3_flag_set(tp, NO_FWARE_REPORTED);
7193 netdev_info(tp->dev, "No firmware running\n");
7196 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7197 /* The 57765 A0 needs a little more
7198 * time to do some important work.
7200 mdelay(10);
7203 return 0;
7206 /* Save PCI command register before chip reset */
7207 static void tg3_save_pci_state(struct tg3 *tp)
7209 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7212 /* Restore PCI state after chip reset */
7213 static void tg3_restore_pci_state(struct tg3 *tp)
7215 u32 val;
7217 /* Re-enable indirect register accesses. */
7218 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7219 tp->misc_host_ctrl);
7221 /* Set MAX PCI retry to zero. */
7222 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7223 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7224 tg3_flag(tp, PCIX_MODE))
7225 val |= PCISTATE_RETRY_SAME_DMA;
7226 /* Allow reads and writes to the APE register and memory space. */
7227 if (tg3_flag(tp, ENABLE_APE))
7228 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7229 PCISTATE_ALLOW_APE_SHMEM_WR |
7230 PCISTATE_ALLOW_APE_PSPACE_WR;
7231 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7233 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7235 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7236 if (tg3_flag(tp, PCI_EXPRESS))
7237 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7238 else {
7239 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7240 tp->pci_cacheline_sz);
7241 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7242 tp->pci_lat_timer);
7246 /* Make sure PCI-X relaxed ordering bit is clear. */
7247 if (tg3_flag(tp, PCIX_MODE)) {
7248 u16 pcix_cmd;
7250 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7251 &pcix_cmd);
7252 pcix_cmd &= ~PCI_X_CMD_ERO;
7253 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7254 pcix_cmd);
7257 if (tg3_flag(tp, 5780_CLASS)) {
7259 /* Chip reset on 5780 will reset MSI enable bit,
7260 * so need to restore it.
7262 if (tg3_flag(tp, USING_MSI)) {
7263 u16 ctrl;
7265 pci_read_config_word(tp->pdev,
7266 tp->msi_cap + PCI_MSI_FLAGS,
7267 &ctrl);
7268 pci_write_config_word(tp->pdev,
7269 tp->msi_cap + PCI_MSI_FLAGS,
7270 ctrl | PCI_MSI_FLAGS_ENABLE);
7271 val = tr32(MSGINT_MODE);
7272 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7277 static void tg3_stop_fw(struct tg3 *);
7279 /* tp->lock is held. */
7280 static int tg3_chip_reset(struct tg3 *tp)
7282 u32 val;
7283 void (*write_op)(struct tg3 *, u32, u32);
7284 int i, err;
7286 tg3_nvram_lock(tp);
7288 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7290 /* No matching tg3_nvram_unlock() after this because
7291 * chip reset below will undo the nvram lock.
7293 tp->nvram_lock_cnt = 0;
7295 /* GRC_MISC_CFG core clock reset will clear the memory
7296 * enable bit in PCI register 4 and the MSI enable bit
7297 * on some chips, so we save relevant registers here.
7299 tg3_save_pci_state(tp);
7301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7302 tg3_flag(tp, 5755_PLUS))
7303 tw32(GRC_FASTBOOT_PC, 0);
7306 * We must avoid the readl() that normally takes place.
7307 * It locks machines, causes machine checks, and other
7308 * fun things. So, temporarily disable the 5701
7309 * hardware workaround, while we do the reset.
7311 write_op = tp->write32;
7312 if (write_op == tg3_write_flush_reg32)
7313 tp->write32 = tg3_write32;
7315 /* Prevent the irq handler from reading or writing PCI registers
7316 * during chip reset when the memory enable bit in the PCI command
7317 * register may be cleared. The chip does not generate interrupt
7318 * at this time, but the irq handler may still be called due to irq
7319 * sharing or irqpoll.
7321 tg3_flag_set(tp, CHIP_RESETTING);
7322 for (i = 0; i < tp->irq_cnt; i++) {
7323 struct tg3_napi *tnapi = &tp->napi[i];
7324 if (tnapi->hw_status) {
7325 tnapi->hw_status->status = 0;
7326 tnapi->hw_status->status_tag = 0;
7328 tnapi->last_tag = 0;
7329 tnapi->last_irq_tag = 0;
7331 smp_mb();
7333 for (i = 0; i < tp->irq_cnt; i++)
7334 synchronize_irq(tp->napi[i].irq_vec);
7336 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7337 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7338 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7341 /* do the reset */
7342 val = GRC_MISC_CFG_CORECLK_RESET;
7344 if (tg3_flag(tp, PCI_EXPRESS)) {
7345 /* Force PCIe 1.0a mode */
7346 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7347 !tg3_flag(tp, 57765_PLUS) &&
7348 tr32(TG3_PCIE_PHY_TSTCTL) ==
7349 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7350 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7352 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7353 tw32(GRC_MISC_CFG, (1 << 29));
7354 val |= (1 << 29);
7358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7359 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7360 tw32(GRC_VCPU_EXT_CTRL,
7361 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7364 /* Manage gphy power for all CPMU absent PCIe devices. */
7365 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7366 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7368 tw32(GRC_MISC_CFG, val);
7370 /* restore 5701 hardware bug workaround write method */
7371 tp->write32 = write_op;
7373 /* Unfortunately, we have to delay before the PCI read back.
7374 * Some 575X chips even will not respond to a PCI cfg access
7375 * when the reset command is given to the chip.
7377 * How do these hardware designers expect things to work
7378 * properly if the PCI write is posted for a long period
7379 * of time? It is always necessary to have some method by
7380 * which a register read back can occur to push the write
7381 * out which does the reset.
7383 * For most tg3 variants the trick below was working.
7384 * Ho hum...
7386 udelay(120);
7388 /* Flush PCI posted writes. The normal MMIO registers
7389 * are inaccessible at this time so this is the only
7390 * way to make this reliably (actually, this is no longer
7391 * the case, see above). I tried to use indirect
7392 * register read/write but this upset some 5701 variants.
7394 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7396 udelay(120);
7398 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7399 u16 val16;
7401 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7402 int i;
7403 u32 cfg_val;
7405 /* Wait for link training to complete. */
7406 for (i = 0; i < 5000; i++)
7407 udelay(100);
7409 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7410 pci_write_config_dword(tp->pdev, 0xc4,
7411 cfg_val | (1 << 15));
7414 /* Clear the "no snoop" and "relaxed ordering" bits. */
7415 pci_read_config_word(tp->pdev,
7416 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7417 &val16);
7418 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7419 PCI_EXP_DEVCTL_NOSNOOP_EN);
7421 * Older PCIe devices only support the 128 byte
7422 * MPS setting. Enforce the restriction.
7424 if (!tg3_flag(tp, CPMU_PRESENT))
7425 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7426 pci_write_config_word(tp->pdev,
7427 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7428 val16);
7430 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7432 /* Clear error status */
7433 pci_write_config_word(tp->pdev,
7434 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7435 PCI_EXP_DEVSTA_CED |
7436 PCI_EXP_DEVSTA_NFED |
7437 PCI_EXP_DEVSTA_FED |
7438 PCI_EXP_DEVSTA_URD);
7441 tg3_restore_pci_state(tp);
7443 tg3_flag_clear(tp, CHIP_RESETTING);
7444 tg3_flag_clear(tp, ERROR_PROCESSED);
7446 val = 0;
7447 if (tg3_flag(tp, 5780_CLASS))
7448 val = tr32(MEMARB_MODE);
7449 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7451 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7452 tg3_stop_fw(tp);
7453 tw32(0x5000, 0x400);
7456 tw32(GRC_MODE, tp->grc_mode);
7458 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7459 val = tr32(0xc4);
7461 tw32(0xc4, val | (1 << 15));
7464 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7466 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7467 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7468 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7469 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7472 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7473 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7474 val = tp->mac_mode;
7475 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7476 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7477 val = tp->mac_mode;
7478 } else
7479 val = 0;
7481 tw32_f(MAC_MODE, val);
7482 udelay(40);
7484 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7486 err = tg3_poll_fw(tp);
7487 if (err)
7488 return err;
7490 tg3_mdio_start(tp);
7492 if (tg3_flag(tp, PCI_EXPRESS) &&
7493 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7494 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7495 !tg3_flag(tp, 57765_PLUS)) {
7496 val = tr32(0x7c00);
7498 tw32(0x7c00, val | (1 << 25));
7501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7502 val = tr32(TG3_CPMU_CLCK_ORIDE);
7503 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7506 /* Reprobe ASF enable state. */
7507 tg3_flag_clear(tp, ENABLE_ASF);
7508 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7509 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7510 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7511 u32 nic_cfg;
7513 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7514 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7515 tg3_flag_set(tp, ENABLE_ASF);
7516 tp->last_event_jiffies = jiffies;
7517 if (tg3_flag(tp, 5750_PLUS))
7518 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7522 return 0;
7525 /* tp->lock is held. */
7526 static void tg3_stop_fw(struct tg3 *tp)
7528 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7529 /* Wait for RX cpu to ACK the previous event. */
7530 tg3_wait_for_event_ack(tp);
7532 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7534 tg3_generate_fw_event(tp);
7536 /* Wait for RX cpu to ACK this event. */
7537 tg3_wait_for_event_ack(tp);
7541 /* tp->lock is held. */
7542 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7544 int err;
7546 tg3_stop_fw(tp);
7548 tg3_write_sig_pre_reset(tp, kind);
7550 tg3_abort_hw(tp, silent);
7551 err = tg3_chip_reset(tp);
7553 __tg3_set_mac_addr(tp, 0);
7555 tg3_write_sig_legacy(tp, kind);
7556 tg3_write_sig_post_reset(tp, kind);
7558 if (err)
7559 return err;
7561 return 0;
7564 #define RX_CPU_SCRATCH_BASE 0x30000
7565 #define RX_CPU_SCRATCH_SIZE 0x04000
7566 #define TX_CPU_SCRATCH_BASE 0x34000
7567 #define TX_CPU_SCRATCH_SIZE 0x04000
7569 /* tp->lock is held. */
7570 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7572 int i;
7574 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7576 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7577 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7579 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7580 return 0;
7582 if (offset == RX_CPU_BASE) {
7583 for (i = 0; i < 10000; i++) {
7584 tw32(offset + CPU_STATE, 0xffffffff);
7585 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7586 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7587 break;
7590 tw32(offset + CPU_STATE, 0xffffffff);
7591 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7592 udelay(10);
7593 } else {
7594 for (i = 0; i < 10000; i++) {
7595 tw32(offset + CPU_STATE, 0xffffffff);
7596 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7597 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7598 break;
7602 if (i >= 10000) {
7603 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7604 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7605 return -ENODEV;
7608 /* Clear firmware's nvram arbitration. */
7609 if (tg3_flag(tp, NVRAM))
7610 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7611 return 0;
7614 struct fw_info {
7615 unsigned int fw_base;
7616 unsigned int fw_len;
7617 const __be32 *fw_data;
7620 /* tp->lock is held. */
7621 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7622 int cpu_scratch_size, struct fw_info *info)
7624 int err, lock_err, i;
7625 void (*write_op)(struct tg3 *, u32, u32);
7627 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7628 netdev_err(tp->dev,
7629 "%s: Trying to load TX cpu firmware which is 5705\n",
7630 __func__);
7631 return -EINVAL;
7634 if (tg3_flag(tp, 5705_PLUS))
7635 write_op = tg3_write_mem;
7636 else
7637 write_op = tg3_write_indirect_reg32;
7639 /* It is possible that bootcode is still loading at this point.
7640 * Get the nvram lock first before halting the cpu.
7642 lock_err = tg3_nvram_lock(tp);
7643 err = tg3_halt_cpu(tp, cpu_base);
7644 if (!lock_err)
7645 tg3_nvram_unlock(tp);
7646 if (err)
7647 goto out;
7649 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7650 write_op(tp, cpu_scratch_base + i, 0);
7651 tw32(cpu_base + CPU_STATE, 0xffffffff);
7652 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7653 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7654 write_op(tp, (cpu_scratch_base +
7655 (info->fw_base & 0xffff) +
7656 (i * sizeof(u32))),
7657 be32_to_cpu(info->fw_data[i]));
7659 err = 0;
7661 out:
7662 return err;
7665 /* tp->lock is held. */
7666 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7668 struct fw_info info;
7669 const __be32 *fw_data;
7670 int err, i;
7672 fw_data = (void *)tp->fw->data;
7674 /* Firmware blob starts with version numbers, followed by
7675 start address and length. We are setting complete length.
7676 length = end_address_of_bss - start_address_of_text.
7677 Remainder is the blob to be loaded contiguously
7678 from start address. */
7680 info.fw_base = be32_to_cpu(fw_data[1]);
7681 info.fw_len = tp->fw->size - 12;
7682 info.fw_data = &fw_data[3];
7684 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7685 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7686 &info);
7687 if (err)
7688 return err;
7690 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7691 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7692 &info);
7693 if (err)
7694 return err;
7696 /* Now startup only the RX cpu. */
7697 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7698 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7700 for (i = 0; i < 5; i++) {
7701 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7702 break;
7703 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7704 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7705 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7706 udelay(1000);
7708 if (i >= 5) {
7709 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7710 "should be %08x\n", __func__,
7711 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7712 return -ENODEV;
7714 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7715 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7717 return 0;
7720 /* tp->lock is held. */
7721 static int tg3_load_tso_firmware(struct tg3 *tp)
7723 struct fw_info info;
7724 const __be32 *fw_data;
7725 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7726 int err, i;
7728 if (tg3_flag(tp, HW_TSO_1) ||
7729 tg3_flag(tp, HW_TSO_2) ||
7730 tg3_flag(tp, HW_TSO_3))
7731 return 0;
7733 fw_data = (void *)tp->fw->data;
7735 /* Firmware blob starts with version numbers, followed by
7736 start address and length. We are setting complete length.
7737 length = end_address_of_bss - start_address_of_text.
7738 Remainder is the blob to be loaded contiguously
7739 from start address. */
7741 info.fw_base = be32_to_cpu(fw_data[1]);
7742 cpu_scratch_size = tp->fw_len;
7743 info.fw_len = tp->fw->size - 12;
7744 info.fw_data = &fw_data[3];
7746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7747 cpu_base = RX_CPU_BASE;
7748 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7749 } else {
7750 cpu_base = TX_CPU_BASE;
7751 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7752 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7755 err = tg3_load_firmware_cpu(tp, cpu_base,
7756 cpu_scratch_base, cpu_scratch_size,
7757 &info);
7758 if (err)
7759 return err;
7761 /* Now startup the cpu. */
7762 tw32(cpu_base + CPU_STATE, 0xffffffff);
7763 tw32_f(cpu_base + CPU_PC, info.fw_base);
7765 for (i = 0; i < 5; i++) {
7766 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7767 break;
7768 tw32(cpu_base + CPU_STATE, 0xffffffff);
7769 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7770 tw32_f(cpu_base + CPU_PC, info.fw_base);
7771 udelay(1000);
7773 if (i >= 5) {
7774 netdev_err(tp->dev,
7775 "%s fails to set CPU PC, is %08x should be %08x\n",
7776 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7777 return -ENODEV;
7779 tw32(cpu_base + CPU_STATE, 0xffffffff);
7780 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7781 return 0;
7785 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7787 struct tg3 *tp = netdev_priv(dev);
7788 struct sockaddr *addr = p;
7789 int err = 0, skip_mac_1 = 0;
7791 if (!is_valid_ether_addr(addr->sa_data))
7792 return -EINVAL;
7794 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7796 if (!netif_running(dev))
7797 return 0;
7799 if (tg3_flag(tp, ENABLE_ASF)) {
7800 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7802 addr0_high = tr32(MAC_ADDR_0_HIGH);
7803 addr0_low = tr32(MAC_ADDR_0_LOW);
7804 addr1_high = tr32(MAC_ADDR_1_HIGH);
7805 addr1_low = tr32(MAC_ADDR_1_LOW);
7807 /* Skip MAC addr 1 if ASF is using it. */
7808 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7809 !(addr1_high == 0 && addr1_low == 0))
7810 skip_mac_1 = 1;
7812 spin_lock_bh(&tp->lock);
7813 __tg3_set_mac_addr(tp, skip_mac_1);
7814 spin_unlock_bh(&tp->lock);
7816 return err;
7819 /* tp->lock is held. */
7820 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7821 dma_addr_t mapping, u32 maxlen_flags,
7822 u32 nic_addr)
7824 tg3_write_mem(tp,
7825 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7826 ((u64) mapping >> 32));
7827 tg3_write_mem(tp,
7828 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7829 ((u64) mapping & 0xffffffff));
7830 tg3_write_mem(tp,
7831 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7832 maxlen_flags);
7834 if (!tg3_flag(tp, 5705_PLUS))
7835 tg3_write_mem(tp,
7836 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7837 nic_addr);
7840 static void __tg3_set_rx_mode(struct net_device *);
7841 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7843 int i;
7845 if (!tg3_flag(tp, ENABLE_TSS)) {
7846 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7847 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7848 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7849 } else {
7850 tw32(HOSTCC_TXCOL_TICKS, 0);
7851 tw32(HOSTCC_TXMAX_FRAMES, 0);
7852 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7855 if (!tg3_flag(tp, ENABLE_RSS)) {
7856 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7857 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7858 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7859 } else {
7860 tw32(HOSTCC_RXCOL_TICKS, 0);
7861 tw32(HOSTCC_RXMAX_FRAMES, 0);
7862 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7865 if (!tg3_flag(tp, 5705_PLUS)) {
7866 u32 val = ec->stats_block_coalesce_usecs;
7868 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7869 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7871 if (!netif_carrier_ok(tp->dev))
7872 val = 0;
7874 tw32(HOSTCC_STAT_COAL_TICKS, val);
7877 for (i = 0; i < tp->irq_cnt - 1; i++) {
7878 u32 reg;
7880 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7881 tw32(reg, ec->rx_coalesce_usecs);
7882 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7883 tw32(reg, ec->rx_max_coalesced_frames);
7884 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7885 tw32(reg, ec->rx_max_coalesced_frames_irq);
7887 if (tg3_flag(tp, ENABLE_TSS)) {
7888 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7889 tw32(reg, ec->tx_coalesce_usecs);
7890 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7891 tw32(reg, ec->tx_max_coalesced_frames);
7892 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7893 tw32(reg, ec->tx_max_coalesced_frames_irq);
7897 for (; i < tp->irq_max - 1; i++) {
7898 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7899 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7900 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7902 if (tg3_flag(tp, ENABLE_TSS)) {
7903 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7904 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7905 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7910 /* tp->lock is held. */
7911 static void tg3_rings_reset(struct tg3 *tp)
7913 int i;
7914 u32 stblk, txrcb, rxrcb, limit;
7915 struct tg3_napi *tnapi = &tp->napi[0];
7917 /* Disable all transmit rings but the first. */
7918 if (!tg3_flag(tp, 5705_PLUS))
7919 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7920 else if (tg3_flag(tp, 5717_PLUS))
7921 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7922 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7923 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7924 else
7925 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7927 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7928 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7929 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7930 BDINFO_FLAGS_DISABLED);
7933 /* Disable all receive return rings but the first. */
7934 if (tg3_flag(tp, 5717_PLUS))
7935 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7936 else if (!tg3_flag(tp, 5705_PLUS))
7937 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7938 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7940 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7941 else
7942 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7944 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7945 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7946 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7947 BDINFO_FLAGS_DISABLED);
7949 /* Disable interrupts */
7950 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7951 tp->napi[0].chk_msi_cnt = 0;
7952 tp->napi[0].last_rx_cons = 0;
7953 tp->napi[0].last_tx_cons = 0;
7955 /* Zero mailbox registers. */
7956 if (tg3_flag(tp, SUPPORT_MSIX)) {
7957 for (i = 1; i < tp->irq_max; i++) {
7958 tp->napi[i].tx_prod = 0;
7959 tp->napi[i].tx_cons = 0;
7960 if (tg3_flag(tp, ENABLE_TSS))
7961 tw32_mailbox(tp->napi[i].prodmbox, 0);
7962 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7963 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7964 tp->napi[0].chk_msi_cnt = 0;
7965 tp->napi[i].last_rx_cons = 0;
7966 tp->napi[i].last_tx_cons = 0;
7968 if (!tg3_flag(tp, ENABLE_TSS))
7969 tw32_mailbox(tp->napi[0].prodmbox, 0);
7970 } else {
7971 tp->napi[0].tx_prod = 0;
7972 tp->napi[0].tx_cons = 0;
7973 tw32_mailbox(tp->napi[0].prodmbox, 0);
7974 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7977 /* Make sure the NIC-based send BD rings are disabled. */
7978 if (!tg3_flag(tp, 5705_PLUS)) {
7979 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7980 for (i = 0; i < 16; i++)
7981 tw32_tx_mbox(mbox + i * 8, 0);
7984 txrcb = NIC_SRAM_SEND_RCB;
7985 rxrcb = NIC_SRAM_RCV_RET_RCB;
7987 /* Clear status block in ram. */
7988 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7990 /* Set status block DMA address */
7991 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7992 ((u64) tnapi->status_mapping >> 32));
7993 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7994 ((u64) tnapi->status_mapping & 0xffffffff));
7996 if (tnapi->tx_ring) {
7997 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7998 (TG3_TX_RING_SIZE <<
7999 BDINFO_FLAGS_MAXLEN_SHIFT),
8000 NIC_SRAM_TX_BUFFER_DESC);
8001 txrcb += TG3_BDINFO_SIZE;
8004 if (tnapi->rx_rcb) {
8005 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8006 (tp->rx_ret_ring_mask + 1) <<
8007 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8008 rxrcb += TG3_BDINFO_SIZE;
8011 stblk = HOSTCC_STATBLCK_RING1;
8013 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8014 u64 mapping = (u64)tnapi->status_mapping;
8015 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8016 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8018 /* Clear status block in ram. */
8019 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8021 if (tnapi->tx_ring) {
8022 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8023 (TG3_TX_RING_SIZE <<
8024 BDINFO_FLAGS_MAXLEN_SHIFT),
8025 NIC_SRAM_TX_BUFFER_DESC);
8026 txrcb += TG3_BDINFO_SIZE;
8029 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8030 ((tp->rx_ret_ring_mask + 1) <<
8031 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8033 stblk += 8;
8034 rxrcb += TG3_BDINFO_SIZE;
8038 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8040 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8042 if (!tg3_flag(tp, 5750_PLUS) ||
8043 tg3_flag(tp, 5780_CLASS) ||
8044 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8046 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8047 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8048 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8049 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8050 else
8051 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8053 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8054 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8056 val = min(nic_rep_thresh, host_rep_thresh);
8057 tw32(RCVBDI_STD_THRESH, val);
8059 if (tg3_flag(tp, 57765_PLUS))
8060 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8062 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8063 return;
8065 if (!tg3_flag(tp, 5705_PLUS))
8066 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8067 else
8068 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8070 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8072 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8073 tw32(RCVBDI_JUMBO_THRESH, val);
8075 if (tg3_flag(tp, 57765_PLUS))
8076 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8079 /* tp->lock is held. */
8080 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8082 u32 val, rdmac_mode;
8083 int i, err, limit;
8084 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8086 tg3_disable_ints(tp);
8088 tg3_stop_fw(tp);
8090 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8092 if (tg3_flag(tp, INIT_COMPLETE))
8093 tg3_abort_hw(tp, 1);
8095 /* Enable MAC control of LPI */
8096 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8097 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8098 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8099 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8101 tw32_f(TG3_CPMU_EEE_CTRL,
8102 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8104 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8105 TG3_CPMU_EEEMD_LPI_IN_TX |
8106 TG3_CPMU_EEEMD_LPI_IN_RX |
8107 TG3_CPMU_EEEMD_EEE_ENABLE;
8109 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8110 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8112 if (tg3_flag(tp, ENABLE_APE))
8113 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8115 tw32_f(TG3_CPMU_EEE_MODE, val);
8117 tw32_f(TG3_CPMU_EEE_DBTMR1,
8118 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8119 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8121 tw32_f(TG3_CPMU_EEE_DBTMR2,
8122 TG3_CPMU_DBTMR2_APE_TX_2047US |
8123 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8126 if (reset_phy)
8127 tg3_phy_reset(tp);
8129 err = tg3_chip_reset(tp);
8130 if (err)
8131 return err;
8133 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8135 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8136 val = tr32(TG3_CPMU_CTRL);
8137 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8138 tw32(TG3_CPMU_CTRL, val);
8140 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8141 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8142 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8143 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8145 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8146 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8147 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8148 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8150 val = tr32(TG3_CPMU_HST_ACC);
8151 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8152 val |= CPMU_HST_ACC_MACCLK_6_25;
8153 tw32(TG3_CPMU_HST_ACC, val);
8156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8157 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8158 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8159 PCIE_PWR_MGMT_L1_THRESH_4MS;
8160 tw32(PCIE_PWR_MGMT_THRESH, val);
8162 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8163 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8165 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8167 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8168 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8171 if (tg3_flag(tp, L1PLLPD_EN)) {
8172 u32 grc_mode = tr32(GRC_MODE);
8174 /* Access the lower 1K of PL PCIE block registers. */
8175 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8176 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8178 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8179 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8180 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8182 tw32(GRC_MODE, grc_mode);
8185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8186 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8187 u32 grc_mode = tr32(GRC_MODE);
8189 /* Access the lower 1K of PL PCIE block registers. */
8190 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8191 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8193 val = tr32(TG3_PCIE_TLDLPL_PORT +
8194 TG3_PCIE_PL_LO_PHYCTL5);
8195 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8196 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8198 tw32(GRC_MODE, grc_mode);
8201 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8202 u32 grc_mode = tr32(GRC_MODE);
8204 /* Access the lower 1K of DL PCIE block registers. */
8205 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8206 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8208 val = tr32(TG3_PCIE_TLDLPL_PORT +
8209 TG3_PCIE_DL_LO_FTSMAX);
8210 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8211 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8212 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8214 tw32(GRC_MODE, grc_mode);
8217 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8218 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8219 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8220 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8223 /* This works around an issue with Athlon chipsets on
8224 * B3 tigon3 silicon. This bit has no effect on any
8225 * other revision. But do not set this on PCI Express
8226 * chips and don't even touch the clocks if the CPMU is present.
8228 if (!tg3_flag(tp, CPMU_PRESENT)) {
8229 if (!tg3_flag(tp, PCI_EXPRESS))
8230 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8231 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8234 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8235 tg3_flag(tp, PCIX_MODE)) {
8236 val = tr32(TG3PCI_PCISTATE);
8237 val |= PCISTATE_RETRY_SAME_DMA;
8238 tw32(TG3PCI_PCISTATE, val);
8241 if (tg3_flag(tp, ENABLE_APE)) {
8242 /* Allow reads and writes to the
8243 * APE register and memory space.
8245 val = tr32(TG3PCI_PCISTATE);
8246 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8247 PCISTATE_ALLOW_APE_SHMEM_WR |
8248 PCISTATE_ALLOW_APE_PSPACE_WR;
8249 tw32(TG3PCI_PCISTATE, val);
8252 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8253 /* Enable some hw fixes. */
8254 val = tr32(TG3PCI_MSI_DATA);
8255 val |= (1 << 26) | (1 << 28) | (1 << 29);
8256 tw32(TG3PCI_MSI_DATA, val);
8259 /* Descriptor ring init may make accesses to the
8260 * NIC SRAM area to setup the TX descriptors, so we
8261 * can only do this after the hardware has been
8262 * successfully reset.
8264 err = tg3_init_rings(tp);
8265 if (err)
8266 return err;
8268 if (tg3_flag(tp, 57765_PLUS)) {
8269 val = tr32(TG3PCI_DMA_RW_CTRL) &
8270 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8271 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8272 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8273 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8274 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8275 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8276 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8277 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8278 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8279 /* This value is determined during the probe time DMA
8280 * engine test, tg3_test_dma.
8282 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8285 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8286 GRC_MODE_4X_NIC_SEND_RINGS |
8287 GRC_MODE_NO_TX_PHDR_CSUM |
8288 GRC_MODE_NO_RX_PHDR_CSUM);
8289 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8291 /* Pseudo-header checksum is done by hardware logic and not
8292 * the offload processers, so make the chip do the pseudo-
8293 * header checksums on receive. For transmit it is more
8294 * convenient to do the pseudo-header checksum in software
8295 * as Linux does that on transmit for us in all cases.
8297 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8299 tw32(GRC_MODE,
8300 tp->grc_mode |
8301 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8303 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8304 val = tr32(GRC_MISC_CFG);
8305 val &= ~0xff;
8306 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8307 tw32(GRC_MISC_CFG, val);
8309 /* Initialize MBUF/DESC pool. */
8310 if (tg3_flag(tp, 5750_PLUS)) {
8311 /* Do nothing. */
8312 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8313 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8314 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8315 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8316 else
8317 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8318 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8319 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8320 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8321 int fw_len;
8323 fw_len = tp->fw_len;
8324 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8325 tw32(BUFMGR_MB_POOL_ADDR,
8326 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8327 tw32(BUFMGR_MB_POOL_SIZE,
8328 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8331 if (tp->dev->mtu <= ETH_DATA_LEN) {
8332 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8333 tp->bufmgr_config.mbuf_read_dma_low_water);
8334 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8335 tp->bufmgr_config.mbuf_mac_rx_low_water);
8336 tw32(BUFMGR_MB_HIGH_WATER,
8337 tp->bufmgr_config.mbuf_high_water);
8338 } else {
8339 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8340 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8341 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8342 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8343 tw32(BUFMGR_MB_HIGH_WATER,
8344 tp->bufmgr_config.mbuf_high_water_jumbo);
8346 tw32(BUFMGR_DMA_LOW_WATER,
8347 tp->bufmgr_config.dma_low_water);
8348 tw32(BUFMGR_DMA_HIGH_WATER,
8349 tp->bufmgr_config.dma_high_water);
8351 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8353 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8354 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8355 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8356 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8357 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8358 tw32(BUFMGR_MODE, val);
8359 for (i = 0; i < 2000; i++) {
8360 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8361 break;
8362 udelay(10);
8364 if (i >= 2000) {
8365 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8366 return -ENODEV;
8369 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8370 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8372 tg3_setup_rxbd_thresholds(tp);
8374 /* Initialize TG3_BDINFO's at:
8375 * RCVDBDI_STD_BD: standard eth size rx ring
8376 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8377 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8379 * like so:
8380 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8381 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8382 * ring attribute flags
8383 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8385 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8386 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8388 * The size of each ring is fixed in the firmware, but the location is
8389 * configurable.
8391 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8392 ((u64) tpr->rx_std_mapping >> 32));
8393 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8394 ((u64) tpr->rx_std_mapping & 0xffffffff));
8395 if (!tg3_flag(tp, 5717_PLUS))
8396 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8397 NIC_SRAM_RX_BUFFER_DESC);
8399 /* Disable the mini ring */
8400 if (!tg3_flag(tp, 5705_PLUS))
8401 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8402 BDINFO_FLAGS_DISABLED);
8404 /* Program the jumbo buffer descriptor ring control
8405 * blocks on those devices that have them.
8407 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8408 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8410 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8411 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8412 ((u64) tpr->rx_jmb_mapping >> 32));
8413 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8414 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8415 val = TG3_RX_JMB_RING_SIZE(tp) <<
8416 BDINFO_FLAGS_MAXLEN_SHIFT;
8417 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8418 val | BDINFO_FLAGS_USE_EXT_RECV);
8419 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8421 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8422 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8423 } else {
8424 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8425 BDINFO_FLAGS_DISABLED);
8428 if (tg3_flag(tp, 57765_PLUS)) {
8429 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8430 val = TG3_RX_STD_MAX_SIZE_5700;
8431 else
8432 val = TG3_RX_STD_MAX_SIZE_5717;
8433 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8434 val |= (TG3_RX_STD_DMA_SZ << 2);
8435 } else
8436 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8437 } else
8438 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8440 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8442 tpr->rx_std_prod_idx = tp->rx_pending;
8443 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8445 tpr->rx_jmb_prod_idx =
8446 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8447 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8449 tg3_rings_reset(tp);
8451 /* Initialize MAC address and backoff seed. */
8452 __tg3_set_mac_addr(tp, 0);
8454 /* MTU + ethernet header + FCS + optional VLAN tag */
8455 tw32(MAC_RX_MTU_SIZE,
8456 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8458 /* The slot time is changed by tg3_setup_phy if we
8459 * run at gigabit with half duplex.
8461 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8462 (6 << TX_LENGTHS_IPG_SHIFT) |
8463 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8466 val |= tr32(MAC_TX_LENGTHS) &
8467 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8468 TX_LENGTHS_CNT_DWN_VAL_MSK);
8470 tw32(MAC_TX_LENGTHS, val);
8472 /* Receive rules. */
8473 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8474 tw32(RCVLPC_CONFIG, 0x0181);
8476 /* Calculate RDMAC_MODE setting early, we need it to determine
8477 * the RCVLPC_STATE_ENABLE mask.
8479 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8480 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8481 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8482 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8483 RDMAC_MODE_LNGREAD_ENAB);
8485 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8486 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8491 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8492 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8493 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8496 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8497 if (tg3_flag(tp, TSO_CAPABLE) &&
8498 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8499 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8500 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8501 !tg3_flag(tp, IS_5788)) {
8502 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8506 if (tg3_flag(tp, PCI_EXPRESS))
8507 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8509 if (tg3_flag(tp, HW_TSO_1) ||
8510 tg3_flag(tp, HW_TSO_2) ||
8511 tg3_flag(tp, HW_TSO_3))
8512 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8514 if (tg3_flag(tp, 57765_PLUS) ||
8515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8517 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8520 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8522 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8526 tg3_flag(tp, 57765_PLUS)) {
8527 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8528 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8529 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8530 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8531 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8532 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8533 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8534 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8535 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8537 tw32(TG3_RDMA_RSRVCTRL_REG,
8538 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8542 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8543 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8544 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8545 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8546 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8549 /* Receive/send statistics. */
8550 if (tg3_flag(tp, 5750_PLUS)) {
8551 val = tr32(RCVLPC_STATS_ENABLE);
8552 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8553 tw32(RCVLPC_STATS_ENABLE, val);
8554 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8555 tg3_flag(tp, TSO_CAPABLE)) {
8556 val = tr32(RCVLPC_STATS_ENABLE);
8557 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8558 tw32(RCVLPC_STATS_ENABLE, val);
8559 } else {
8560 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8562 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8563 tw32(SNDDATAI_STATSENAB, 0xffffff);
8564 tw32(SNDDATAI_STATSCTRL,
8565 (SNDDATAI_SCTRL_ENABLE |
8566 SNDDATAI_SCTRL_FASTUPD));
8568 /* Setup host coalescing engine. */
8569 tw32(HOSTCC_MODE, 0);
8570 for (i = 0; i < 2000; i++) {
8571 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8572 break;
8573 udelay(10);
8576 __tg3_set_coalesce(tp, &tp->coal);
8578 if (!tg3_flag(tp, 5705_PLUS)) {
8579 /* Status/statistics block address. See tg3_timer,
8580 * the tg3_periodic_fetch_stats call there, and
8581 * tg3_get_stats to see how this works for 5705/5750 chips.
8583 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8584 ((u64) tp->stats_mapping >> 32));
8585 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8586 ((u64) tp->stats_mapping & 0xffffffff));
8587 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8589 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8591 /* Clear statistics and status block memory areas */
8592 for (i = NIC_SRAM_STATS_BLK;
8593 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8594 i += sizeof(u32)) {
8595 tg3_write_mem(tp, i, 0);
8596 udelay(40);
8600 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8602 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8603 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8604 if (!tg3_flag(tp, 5705_PLUS))
8605 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8607 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8608 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8609 /* reset to prevent losing 1st rx packet intermittently */
8610 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8611 udelay(10);
8614 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8615 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8616 MAC_MODE_FHDE_ENABLE;
8617 if (tg3_flag(tp, ENABLE_APE))
8618 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8619 if (!tg3_flag(tp, 5705_PLUS) &&
8620 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8621 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8622 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8623 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8624 udelay(40);
8626 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8627 * If TG3_FLAG_IS_NIC is zero, we should read the
8628 * register to preserve the GPIO settings for LOMs. The GPIOs,
8629 * whether used as inputs or outputs, are set by boot code after
8630 * reset.
8632 if (!tg3_flag(tp, IS_NIC)) {
8633 u32 gpio_mask;
8635 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8636 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8637 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8640 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8641 GRC_LCLCTRL_GPIO_OUTPUT3;
8643 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8644 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8646 tp->grc_local_ctrl &= ~gpio_mask;
8647 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8649 /* GPIO1 must be driven high for eeprom write protect */
8650 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8651 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8652 GRC_LCLCTRL_GPIO_OUTPUT1);
8654 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8655 udelay(100);
8657 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8658 val = tr32(MSGINT_MODE);
8659 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8660 tw32(MSGINT_MODE, val);
8663 if (!tg3_flag(tp, 5705_PLUS)) {
8664 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8665 udelay(40);
8668 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8669 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8670 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8671 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8672 WDMAC_MODE_LNGREAD_ENAB);
8674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8675 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8676 if (tg3_flag(tp, TSO_CAPABLE) &&
8677 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8678 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8679 /* nothing */
8680 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8681 !tg3_flag(tp, IS_5788)) {
8682 val |= WDMAC_MODE_RX_ACCEL;
8686 /* Enable host coalescing bug fix */
8687 if (tg3_flag(tp, 5755_PLUS))
8688 val |= WDMAC_MODE_STATUS_TAG_FIX;
8690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8691 val |= WDMAC_MODE_BURST_ALL_DATA;
8693 tw32_f(WDMAC_MODE, val);
8694 udelay(40);
8696 if (tg3_flag(tp, PCIX_MODE)) {
8697 u16 pcix_cmd;
8699 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8700 &pcix_cmd);
8701 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8702 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8703 pcix_cmd |= PCI_X_CMD_READ_2K;
8704 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8705 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8706 pcix_cmd |= PCI_X_CMD_READ_2K;
8708 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8709 pcix_cmd);
8712 tw32_f(RDMAC_MODE, rdmac_mode);
8713 udelay(40);
8715 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8716 if (!tg3_flag(tp, 5705_PLUS))
8717 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8719 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8720 tw32(SNDDATAC_MODE,
8721 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8722 else
8723 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8725 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8726 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8727 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8728 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8729 val |= RCVDBDI_MODE_LRG_RING_SZ;
8730 tw32(RCVDBDI_MODE, val);
8731 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8732 if (tg3_flag(tp, HW_TSO_1) ||
8733 tg3_flag(tp, HW_TSO_2) ||
8734 tg3_flag(tp, HW_TSO_3))
8735 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8736 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8737 if (tg3_flag(tp, ENABLE_TSS))
8738 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8739 tw32(SNDBDI_MODE, val);
8740 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8742 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8743 err = tg3_load_5701_a0_firmware_fix(tp);
8744 if (err)
8745 return err;
8748 if (tg3_flag(tp, TSO_CAPABLE)) {
8749 err = tg3_load_tso_firmware(tp);
8750 if (err)
8751 return err;
8754 tp->tx_mode = TX_MODE_ENABLE;
8756 if (tg3_flag(tp, 5755_PLUS) ||
8757 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8758 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8760 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8761 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8762 tp->tx_mode &= ~val;
8763 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8766 tw32_f(MAC_TX_MODE, tp->tx_mode);
8767 udelay(100);
8769 if (tg3_flag(tp, ENABLE_RSS)) {
8770 int i = 0;
8771 u32 reg = MAC_RSS_INDIR_TBL_0;
8773 if (tp->irq_cnt == 2) {
8774 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8775 tw32(reg, 0x0);
8776 reg += 4;
8778 } else {
8779 u32 val;
8781 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8782 val = i % (tp->irq_cnt - 1);
8783 i++;
8784 for (; i % 8; i++) {
8785 val <<= 4;
8786 val |= (i % (tp->irq_cnt - 1));
8788 tw32(reg, val);
8789 reg += 4;
8793 /* Setup the "secret" hash key. */
8794 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8795 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8796 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8797 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8798 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8799 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8800 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8801 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8802 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8803 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8806 tp->rx_mode = RX_MODE_ENABLE;
8807 if (tg3_flag(tp, 5755_PLUS))
8808 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8810 if (tg3_flag(tp, ENABLE_RSS))
8811 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8812 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8813 RX_MODE_RSS_IPV6_HASH_EN |
8814 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8815 RX_MODE_RSS_IPV4_HASH_EN |
8816 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8818 tw32_f(MAC_RX_MODE, tp->rx_mode);
8819 udelay(10);
8821 tw32(MAC_LED_CTRL, tp->led_ctrl);
8823 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8824 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8825 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8826 udelay(10);
8828 tw32_f(MAC_RX_MODE, tp->rx_mode);
8829 udelay(10);
8831 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8832 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8833 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8834 /* Set drive transmission level to 1.2V */
8835 /* only if the signal pre-emphasis bit is not set */
8836 val = tr32(MAC_SERDES_CFG);
8837 val &= 0xfffff000;
8838 val |= 0x880;
8839 tw32(MAC_SERDES_CFG, val);
8841 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8842 tw32(MAC_SERDES_CFG, 0x616000);
8845 /* Prevent chip from dropping frames when flow control
8846 * is enabled.
8848 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8849 val = 1;
8850 else
8851 val = 2;
8852 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8855 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8856 /* Use hardware link auto-negotiation */
8857 tg3_flag_set(tp, HW_AUTONEG);
8860 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8861 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8862 u32 tmp;
8864 tmp = tr32(SERDES_RX_CTRL);
8865 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8866 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8867 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8868 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8871 if (!tg3_flag(tp, USE_PHYLIB)) {
8872 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8873 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8874 tp->link_config.speed = tp->link_config.orig_speed;
8875 tp->link_config.duplex = tp->link_config.orig_duplex;
8876 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8879 err = tg3_setup_phy(tp, 0);
8880 if (err)
8881 return err;
8883 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8884 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8885 u32 tmp;
8887 /* Clear CRC stats. */
8888 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8889 tg3_writephy(tp, MII_TG3_TEST1,
8890 tmp | MII_TG3_TEST1_CRC_EN);
8891 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8896 __tg3_set_rx_mode(tp->dev);
8898 /* Initialize receive rules. */
8899 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8900 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8901 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8902 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8904 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8905 limit = 8;
8906 else
8907 limit = 16;
8908 if (tg3_flag(tp, ENABLE_ASF))
8909 limit -= 4;
8910 switch (limit) {
8911 case 16:
8912 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8913 case 15:
8914 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8915 case 14:
8916 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8917 case 13:
8918 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8919 case 12:
8920 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8921 case 11:
8922 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8923 case 10:
8924 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8925 case 9:
8926 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8927 case 8:
8928 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8929 case 7:
8930 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8931 case 6:
8932 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8933 case 5:
8934 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8935 case 4:
8936 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8937 case 3:
8938 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8939 case 2:
8940 case 1:
8942 default:
8943 break;
8946 if (tg3_flag(tp, ENABLE_APE))
8947 /* Write our heartbeat update interval to APE. */
8948 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8949 APE_HOST_HEARTBEAT_INT_DISABLE);
8951 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8953 return 0;
8956 /* Called at device open time to get the chip ready for
8957 * packet processing. Invoked with tp->lock held.
8959 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8961 tg3_switch_clocks(tp);
8963 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8965 return tg3_reset_hw(tp, reset_phy);
8968 #define TG3_STAT_ADD32(PSTAT, REG) \
8969 do { u32 __val = tr32(REG); \
8970 (PSTAT)->low += __val; \
8971 if ((PSTAT)->low < __val) \
8972 (PSTAT)->high += 1; \
8973 } while (0)
8975 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8977 struct tg3_hw_stats *sp = tp->hw_stats;
8979 if (!netif_carrier_ok(tp->dev))
8980 return;
8982 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8983 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8984 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8985 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8986 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8987 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8988 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8989 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8990 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8991 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8992 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8993 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8994 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8996 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8997 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8998 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8999 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9000 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9001 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9002 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9003 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9004 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9005 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9006 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9007 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9008 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9009 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9011 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9012 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9013 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9014 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9015 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9016 } else {
9017 u32 val = tr32(HOSTCC_FLOW_ATTN);
9018 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9019 if (val) {
9020 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9021 sp->rx_discards.low += val;
9022 if (sp->rx_discards.low < val)
9023 sp->rx_discards.high += 1;
9025 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9027 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9030 static void tg3_chk_missed_msi(struct tg3 *tp)
9032 u32 i;
9034 for (i = 0; i < tp->irq_cnt; i++) {
9035 struct tg3_napi *tnapi = &tp->napi[i];
9037 if (tg3_has_work(tnapi)) {
9038 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9039 tnapi->last_tx_cons == tnapi->tx_cons) {
9040 if (tnapi->chk_msi_cnt < 1) {
9041 tnapi->chk_msi_cnt++;
9042 return;
9044 tw32_mailbox(tnapi->int_mbox,
9045 tnapi->last_tag << 24);
9048 tnapi->chk_msi_cnt = 0;
9049 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9050 tnapi->last_tx_cons = tnapi->tx_cons;
9054 static void tg3_timer(unsigned long __opaque)
9056 struct tg3 *tp = (struct tg3 *) __opaque;
9058 if (tp->irq_sync)
9059 goto restart_timer;
9061 spin_lock(&tp->lock);
9063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9065 tg3_chk_missed_msi(tp);
9067 if (!tg3_flag(tp, TAGGED_STATUS)) {
9068 /* All of this garbage is because when using non-tagged
9069 * IRQ status the mailbox/status_block protocol the chip
9070 * uses with the cpu is race prone.
9072 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9073 tw32(GRC_LOCAL_CTRL,
9074 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9075 } else {
9076 tw32(HOSTCC_MODE, tp->coalesce_mode |
9077 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9080 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9081 tg3_flag_set(tp, RESTART_TIMER);
9082 spin_unlock(&tp->lock);
9083 schedule_work(&tp->reset_task);
9084 return;
9088 /* This part only runs once per second. */
9089 if (!--tp->timer_counter) {
9090 if (tg3_flag(tp, 5705_PLUS))
9091 tg3_periodic_fetch_stats(tp);
9093 if (tp->setlpicnt && !--tp->setlpicnt)
9094 tg3_phy_eee_enable(tp);
9096 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9097 u32 mac_stat;
9098 int phy_event;
9100 mac_stat = tr32(MAC_STATUS);
9102 phy_event = 0;
9103 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9104 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9105 phy_event = 1;
9106 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9107 phy_event = 1;
9109 if (phy_event)
9110 tg3_setup_phy(tp, 0);
9111 } else if (tg3_flag(tp, POLL_SERDES)) {
9112 u32 mac_stat = tr32(MAC_STATUS);
9113 int need_setup = 0;
9115 if (netif_carrier_ok(tp->dev) &&
9116 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9117 need_setup = 1;
9119 if (!netif_carrier_ok(tp->dev) &&
9120 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9121 MAC_STATUS_SIGNAL_DET))) {
9122 need_setup = 1;
9124 if (need_setup) {
9125 if (!tp->serdes_counter) {
9126 tw32_f(MAC_MODE,
9127 (tp->mac_mode &
9128 ~MAC_MODE_PORT_MODE_MASK));
9129 udelay(40);
9130 tw32_f(MAC_MODE, tp->mac_mode);
9131 udelay(40);
9133 tg3_setup_phy(tp, 0);
9135 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9136 tg3_flag(tp, 5780_CLASS)) {
9137 tg3_serdes_parallel_detect(tp);
9140 tp->timer_counter = tp->timer_multiplier;
9143 /* Heartbeat is only sent once every 2 seconds.
9145 * The heartbeat is to tell the ASF firmware that the host
9146 * driver is still alive. In the event that the OS crashes,
9147 * ASF needs to reset the hardware to free up the FIFO space
9148 * that may be filled with rx packets destined for the host.
9149 * If the FIFO is full, ASF will no longer function properly.
9151 * Unintended resets have been reported on real time kernels
9152 * where the timer doesn't run on time. Netpoll will also have
9153 * same problem.
9155 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9156 * to check the ring condition when the heartbeat is expiring
9157 * before doing the reset. This will prevent most unintended
9158 * resets.
9160 if (!--tp->asf_counter) {
9161 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9162 tg3_wait_for_event_ack(tp);
9164 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9165 FWCMD_NICDRV_ALIVE3);
9166 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9167 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9168 TG3_FW_UPDATE_TIMEOUT_SEC);
9170 tg3_generate_fw_event(tp);
9172 tp->asf_counter = tp->asf_multiplier;
9175 spin_unlock(&tp->lock);
9177 restart_timer:
9178 tp->timer.expires = jiffies + tp->timer_offset;
9179 add_timer(&tp->timer);
9182 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9184 irq_handler_t fn;
9185 unsigned long flags;
9186 char *name;
9187 struct tg3_napi *tnapi = &tp->napi[irq_num];
9189 if (tp->irq_cnt == 1)
9190 name = tp->dev->name;
9191 else {
9192 name = &tnapi->irq_lbl[0];
9193 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9194 name[IFNAMSIZ-1] = 0;
9197 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9198 fn = tg3_msi;
9199 if (tg3_flag(tp, 1SHOT_MSI))
9200 fn = tg3_msi_1shot;
9201 flags = 0;
9202 } else {
9203 fn = tg3_interrupt;
9204 if (tg3_flag(tp, TAGGED_STATUS))
9205 fn = tg3_interrupt_tagged;
9206 flags = IRQF_SHARED;
9209 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9212 static int tg3_test_interrupt(struct tg3 *tp)
9214 struct tg3_napi *tnapi = &tp->napi[0];
9215 struct net_device *dev = tp->dev;
9216 int err, i, intr_ok = 0;
9217 u32 val;
9219 if (!netif_running(dev))
9220 return -ENODEV;
9222 tg3_disable_ints(tp);
9224 free_irq(tnapi->irq_vec, tnapi);
9227 * Turn off MSI one shot mode. Otherwise this test has no
9228 * observable way to know whether the interrupt was delivered.
9230 if (tg3_flag(tp, 57765_PLUS)) {
9231 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9232 tw32(MSGINT_MODE, val);
9235 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9236 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9237 if (err)
9238 return err;
9240 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9241 tg3_enable_ints(tp);
9243 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9244 tnapi->coal_now);
9246 for (i = 0; i < 5; i++) {
9247 u32 int_mbox, misc_host_ctrl;
9249 int_mbox = tr32_mailbox(tnapi->int_mbox);
9250 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9252 if ((int_mbox != 0) ||
9253 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9254 intr_ok = 1;
9255 break;
9258 if (tg3_flag(tp, 57765_PLUS) &&
9259 tnapi->hw_status->status_tag != tnapi->last_tag)
9260 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9262 msleep(10);
9265 tg3_disable_ints(tp);
9267 free_irq(tnapi->irq_vec, tnapi);
9269 err = tg3_request_irq(tp, 0);
9271 if (err)
9272 return err;
9274 if (intr_ok) {
9275 /* Reenable MSI one shot mode. */
9276 if (tg3_flag(tp, 57765_PLUS)) {
9277 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9278 tw32(MSGINT_MODE, val);
9280 return 0;
9283 return -EIO;
9286 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9287 * successfully restored
9289 static int tg3_test_msi(struct tg3 *tp)
9291 int err;
9292 u16 pci_cmd;
9294 if (!tg3_flag(tp, USING_MSI))
9295 return 0;
9297 /* Turn off SERR reporting in case MSI terminates with Master
9298 * Abort.
9300 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9301 pci_write_config_word(tp->pdev, PCI_COMMAND,
9302 pci_cmd & ~PCI_COMMAND_SERR);
9304 err = tg3_test_interrupt(tp);
9306 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9308 if (!err)
9309 return 0;
9311 /* other failures */
9312 if (err != -EIO)
9313 return err;
9315 /* MSI test failed, go back to INTx mode */
9316 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9317 "to INTx mode. Please report this failure to the PCI "
9318 "maintainer and include system chipset information\n");
9320 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9322 pci_disable_msi(tp->pdev);
9324 tg3_flag_clear(tp, USING_MSI);
9325 tp->napi[0].irq_vec = tp->pdev->irq;
9327 err = tg3_request_irq(tp, 0);
9328 if (err)
9329 return err;
9331 /* Need to reset the chip because the MSI cycle may have terminated
9332 * with Master Abort.
9334 tg3_full_lock(tp, 1);
9336 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9337 err = tg3_init_hw(tp, 1);
9339 tg3_full_unlock(tp);
9341 if (err)
9342 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9344 return err;
9347 static int tg3_request_firmware(struct tg3 *tp)
9349 const __be32 *fw_data;
9351 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9352 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9353 tp->fw_needed);
9354 return -ENOENT;
9357 fw_data = (void *)tp->fw->data;
9359 /* Firmware blob starts with version numbers, followed by
9360 * start address and _full_ length including BSS sections
9361 * (which must be longer than the actual data, of course
9364 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9365 if (tp->fw_len < (tp->fw->size - 12)) {
9366 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9367 tp->fw_len, tp->fw_needed);
9368 release_firmware(tp->fw);
9369 tp->fw = NULL;
9370 return -EINVAL;
9373 /* We no longer need firmware; we have it. */
9374 tp->fw_needed = NULL;
9375 return 0;
9378 static bool tg3_enable_msix(struct tg3 *tp)
9380 int i, rc, cpus = num_online_cpus();
9381 struct msix_entry msix_ent[tp->irq_max];
9383 if (cpus == 1)
9384 /* Just fallback to the simpler MSI mode. */
9385 return false;
9388 * We want as many rx rings enabled as there are cpus.
9389 * The first MSIX vector only deals with link interrupts, etc,
9390 * so we add one to the number of vectors we are requesting.
9392 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9394 for (i = 0; i < tp->irq_max; i++) {
9395 msix_ent[i].entry = i;
9396 msix_ent[i].vector = 0;
9399 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9400 if (rc < 0) {
9401 return false;
9402 } else if (rc != 0) {
9403 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9404 return false;
9405 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9406 tp->irq_cnt, rc);
9407 tp->irq_cnt = rc;
9410 for (i = 0; i < tp->irq_max; i++)
9411 tp->napi[i].irq_vec = msix_ent[i].vector;
9413 netif_set_real_num_tx_queues(tp->dev, 1);
9414 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9415 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9416 pci_disable_msix(tp->pdev);
9417 return false;
9420 if (tp->irq_cnt > 1) {
9421 tg3_flag_set(tp, ENABLE_RSS);
9423 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9424 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9425 tg3_flag_set(tp, ENABLE_TSS);
9426 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9430 return true;
9433 static void tg3_ints_init(struct tg3 *tp)
9435 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9436 !tg3_flag(tp, TAGGED_STATUS)) {
9437 /* All MSI supporting chips should support tagged
9438 * status. Assert that this is the case.
9440 netdev_warn(tp->dev,
9441 "MSI without TAGGED_STATUS? Not using MSI\n");
9442 goto defcfg;
9445 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9446 tg3_flag_set(tp, USING_MSIX);
9447 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9448 tg3_flag_set(tp, USING_MSI);
9450 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9451 u32 msi_mode = tr32(MSGINT_MODE);
9452 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9453 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9454 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9456 defcfg:
9457 if (!tg3_flag(tp, USING_MSIX)) {
9458 tp->irq_cnt = 1;
9459 tp->napi[0].irq_vec = tp->pdev->irq;
9460 netif_set_real_num_tx_queues(tp->dev, 1);
9461 netif_set_real_num_rx_queues(tp->dev, 1);
9465 static void tg3_ints_fini(struct tg3 *tp)
9467 if (tg3_flag(tp, USING_MSIX))
9468 pci_disable_msix(tp->pdev);
9469 else if (tg3_flag(tp, USING_MSI))
9470 pci_disable_msi(tp->pdev);
9471 tg3_flag_clear(tp, USING_MSI);
9472 tg3_flag_clear(tp, USING_MSIX);
9473 tg3_flag_clear(tp, ENABLE_RSS);
9474 tg3_flag_clear(tp, ENABLE_TSS);
9477 static int tg3_open(struct net_device *dev)
9479 struct tg3 *tp = netdev_priv(dev);
9480 int i, err;
9482 if (tp->fw_needed) {
9483 err = tg3_request_firmware(tp);
9484 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9485 if (err)
9486 return err;
9487 } else if (err) {
9488 netdev_warn(tp->dev, "TSO capability disabled\n");
9489 tg3_flag_clear(tp, TSO_CAPABLE);
9490 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9491 netdev_notice(tp->dev, "TSO capability restored\n");
9492 tg3_flag_set(tp, TSO_CAPABLE);
9496 netif_carrier_off(tp->dev);
9498 err = tg3_power_up(tp);
9499 if (err)
9500 return err;
9502 tg3_full_lock(tp, 0);
9504 tg3_disable_ints(tp);
9505 tg3_flag_clear(tp, INIT_COMPLETE);
9507 tg3_full_unlock(tp);
9510 * Setup interrupts first so we know how
9511 * many NAPI resources to allocate
9513 tg3_ints_init(tp);
9515 /* The placement of this call is tied
9516 * to the setup and use of Host TX descriptors.
9518 err = tg3_alloc_consistent(tp);
9519 if (err)
9520 goto err_out1;
9522 tg3_napi_init(tp);
9524 tg3_napi_enable(tp);
9526 for (i = 0; i < tp->irq_cnt; i++) {
9527 struct tg3_napi *tnapi = &tp->napi[i];
9528 err = tg3_request_irq(tp, i);
9529 if (err) {
9530 for (i--; i >= 0; i--)
9531 free_irq(tnapi->irq_vec, tnapi);
9532 break;
9536 if (err)
9537 goto err_out2;
9539 tg3_full_lock(tp, 0);
9541 err = tg3_init_hw(tp, 1);
9542 if (err) {
9543 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9544 tg3_free_rings(tp);
9545 } else {
9546 if (tg3_flag(tp, TAGGED_STATUS) &&
9547 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9548 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9549 tp->timer_offset = HZ;
9550 else
9551 tp->timer_offset = HZ / 10;
9553 BUG_ON(tp->timer_offset > HZ);
9554 tp->timer_counter = tp->timer_multiplier =
9555 (HZ / tp->timer_offset);
9556 tp->asf_counter = tp->asf_multiplier =
9557 ((HZ / tp->timer_offset) * 2);
9559 init_timer(&tp->timer);
9560 tp->timer.expires = jiffies + tp->timer_offset;
9561 tp->timer.data = (unsigned long) tp;
9562 tp->timer.function = tg3_timer;
9565 tg3_full_unlock(tp);
9567 if (err)
9568 goto err_out3;
9570 if (tg3_flag(tp, USING_MSI)) {
9571 err = tg3_test_msi(tp);
9573 if (err) {
9574 tg3_full_lock(tp, 0);
9575 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9576 tg3_free_rings(tp);
9577 tg3_full_unlock(tp);
9579 goto err_out2;
9582 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9583 u32 val = tr32(PCIE_TRANSACTION_CFG);
9585 tw32(PCIE_TRANSACTION_CFG,
9586 val | PCIE_TRANS_CFG_1SHOT_MSI);
9590 tg3_phy_start(tp);
9592 tg3_full_lock(tp, 0);
9594 add_timer(&tp->timer);
9595 tg3_flag_set(tp, INIT_COMPLETE);
9596 tg3_enable_ints(tp);
9598 tg3_full_unlock(tp);
9600 netif_tx_start_all_queues(dev);
9603 * Reset loopback feature if it was turned on while the device was down
9604 * make sure that it's installed properly now.
9606 if (dev->features & NETIF_F_LOOPBACK)
9607 tg3_set_loopback(dev, dev->features);
9609 return 0;
9611 err_out3:
9612 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9613 struct tg3_napi *tnapi = &tp->napi[i];
9614 free_irq(tnapi->irq_vec, tnapi);
9617 err_out2:
9618 tg3_napi_disable(tp);
9619 tg3_napi_fini(tp);
9620 tg3_free_consistent(tp);
9622 err_out1:
9623 tg3_ints_fini(tp);
9624 tg3_frob_aux_power(tp, false);
9625 pci_set_power_state(tp->pdev, PCI_D3hot);
9626 return err;
9629 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9630 struct rtnl_link_stats64 *);
9631 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9633 static int tg3_close(struct net_device *dev)
9635 int i;
9636 struct tg3 *tp = netdev_priv(dev);
9638 tg3_napi_disable(tp);
9639 cancel_work_sync(&tp->reset_task);
9641 netif_tx_stop_all_queues(dev);
9643 del_timer_sync(&tp->timer);
9645 tg3_phy_stop(tp);
9647 tg3_full_lock(tp, 1);
9649 tg3_disable_ints(tp);
9651 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9652 tg3_free_rings(tp);
9653 tg3_flag_clear(tp, INIT_COMPLETE);
9655 tg3_full_unlock(tp);
9657 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9658 struct tg3_napi *tnapi = &tp->napi[i];
9659 free_irq(tnapi->irq_vec, tnapi);
9662 tg3_ints_fini(tp);
9664 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9666 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9667 sizeof(tp->estats_prev));
9669 tg3_napi_fini(tp);
9671 tg3_free_consistent(tp);
9673 tg3_power_down(tp);
9675 netif_carrier_off(tp->dev);
9677 return 0;
9680 static inline u64 get_stat64(tg3_stat64_t *val)
9682 return ((u64)val->high << 32) | ((u64)val->low);
9685 static u64 calc_crc_errors(struct tg3 *tp)
9687 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9689 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9690 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9692 u32 val;
9694 spin_lock_bh(&tp->lock);
9695 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9696 tg3_writephy(tp, MII_TG3_TEST1,
9697 val | MII_TG3_TEST1_CRC_EN);
9698 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9699 } else
9700 val = 0;
9701 spin_unlock_bh(&tp->lock);
9703 tp->phy_crc_errors += val;
9705 return tp->phy_crc_errors;
9708 return get_stat64(&hw_stats->rx_fcs_errors);
9711 #define ESTAT_ADD(member) \
9712 estats->member = old_estats->member + \
9713 get_stat64(&hw_stats->member)
9715 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9717 struct tg3_ethtool_stats *estats = &tp->estats;
9718 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9719 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9721 if (!hw_stats)
9722 return old_estats;
9724 ESTAT_ADD(rx_octets);
9725 ESTAT_ADD(rx_fragments);
9726 ESTAT_ADD(rx_ucast_packets);
9727 ESTAT_ADD(rx_mcast_packets);
9728 ESTAT_ADD(rx_bcast_packets);
9729 ESTAT_ADD(rx_fcs_errors);
9730 ESTAT_ADD(rx_align_errors);
9731 ESTAT_ADD(rx_xon_pause_rcvd);
9732 ESTAT_ADD(rx_xoff_pause_rcvd);
9733 ESTAT_ADD(rx_mac_ctrl_rcvd);
9734 ESTAT_ADD(rx_xoff_entered);
9735 ESTAT_ADD(rx_frame_too_long_errors);
9736 ESTAT_ADD(rx_jabbers);
9737 ESTAT_ADD(rx_undersize_packets);
9738 ESTAT_ADD(rx_in_length_errors);
9739 ESTAT_ADD(rx_out_length_errors);
9740 ESTAT_ADD(rx_64_or_less_octet_packets);
9741 ESTAT_ADD(rx_65_to_127_octet_packets);
9742 ESTAT_ADD(rx_128_to_255_octet_packets);
9743 ESTAT_ADD(rx_256_to_511_octet_packets);
9744 ESTAT_ADD(rx_512_to_1023_octet_packets);
9745 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9746 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9747 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9748 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9749 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9751 ESTAT_ADD(tx_octets);
9752 ESTAT_ADD(tx_collisions);
9753 ESTAT_ADD(tx_xon_sent);
9754 ESTAT_ADD(tx_xoff_sent);
9755 ESTAT_ADD(tx_flow_control);
9756 ESTAT_ADD(tx_mac_errors);
9757 ESTAT_ADD(tx_single_collisions);
9758 ESTAT_ADD(tx_mult_collisions);
9759 ESTAT_ADD(tx_deferred);
9760 ESTAT_ADD(tx_excessive_collisions);
9761 ESTAT_ADD(tx_late_collisions);
9762 ESTAT_ADD(tx_collide_2times);
9763 ESTAT_ADD(tx_collide_3times);
9764 ESTAT_ADD(tx_collide_4times);
9765 ESTAT_ADD(tx_collide_5times);
9766 ESTAT_ADD(tx_collide_6times);
9767 ESTAT_ADD(tx_collide_7times);
9768 ESTAT_ADD(tx_collide_8times);
9769 ESTAT_ADD(tx_collide_9times);
9770 ESTAT_ADD(tx_collide_10times);
9771 ESTAT_ADD(tx_collide_11times);
9772 ESTAT_ADD(tx_collide_12times);
9773 ESTAT_ADD(tx_collide_13times);
9774 ESTAT_ADD(tx_collide_14times);
9775 ESTAT_ADD(tx_collide_15times);
9776 ESTAT_ADD(tx_ucast_packets);
9777 ESTAT_ADD(tx_mcast_packets);
9778 ESTAT_ADD(tx_bcast_packets);
9779 ESTAT_ADD(tx_carrier_sense_errors);
9780 ESTAT_ADD(tx_discards);
9781 ESTAT_ADD(tx_errors);
9783 ESTAT_ADD(dma_writeq_full);
9784 ESTAT_ADD(dma_write_prioq_full);
9785 ESTAT_ADD(rxbds_empty);
9786 ESTAT_ADD(rx_discards);
9787 ESTAT_ADD(rx_errors);
9788 ESTAT_ADD(rx_threshold_hit);
9790 ESTAT_ADD(dma_readq_full);
9791 ESTAT_ADD(dma_read_prioq_full);
9792 ESTAT_ADD(tx_comp_queue_full);
9794 ESTAT_ADD(ring_set_send_prod_index);
9795 ESTAT_ADD(ring_status_update);
9796 ESTAT_ADD(nic_irqs);
9797 ESTAT_ADD(nic_avoided_irqs);
9798 ESTAT_ADD(nic_tx_threshold_hit);
9800 ESTAT_ADD(mbuf_lwm_thresh_hit);
9802 return estats;
9805 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9806 struct rtnl_link_stats64 *stats)
9808 struct tg3 *tp = netdev_priv(dev);
9809 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9810 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9812 if (!hw_stats)
9813 return old_stats;
9815 stats->rx_packets = old_stats->rx_packets +
9816 get_stat64(&hw_stats->rx_ucast_packets) +
9817 get_stat64(&hw_stats->rx_mcast_packets) +
9818 get_stat64(&hw_stats->rx_bcast_packets);
9820 stats->tx_packets = old_stats->tx_packets +
9821 get_stat64(&hw_stats->tx_ucast_packets) +
9822 get_stat64(&hw_stats->tx_mcast_packets) +
9823 get_stat64(&hw_stats->tx_bcast_packets);
9825 stats->rx_bytes = old_stats->rx_bytes +
9826 get_stat64(&hw_stats->rx_octets);
9827 stats->tx_bytes = old_stats->tx_bytes +
9828 get_stat64(&hw_stats->tx_octets);
9830 stats->rx_errors = old_stats->rx_errors +
9831 get_stat64(&hw_stats->rx_errors);
9832 stats->tx_errors = old_stats->tx_errors +
9833 get_stat64(&hw_stats->tx_errors) +
9834 get_stat64(&hw_stats->tx_mac_errors) +
9835 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9836 get_stat64(&hw_stats->tx_discards);
9838 stats->multicast = old_stats->multicast +
9839 get_stat64(&hw_stats->rx_mcast_packets);
9840 stats->collisions = old_stats->collisions +
9841 get_stat64(&hw_stats->tx_collisions);
9843 stats->rx_length_errors = old_stats->rx_length_errors +
9844 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9845 get_stat64(&hw_stats->rx_undersize_packets);
9847 stats->rx_over_errors = old_stats->rx_over_errors +
9848 get_stat64(&hw_stats->rxbds_empty);
9849 stats->rx_frame_errors = old_stats->rx_frame_errors +
9850 get_stat64(&hw_stats->rx_align_errors);
9851 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9852 get_stat64(&hw_stats->tx_discards);
9853 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9854 get_stat64(&hw_stats->tx_carrier_sense_errors);
9856 stats->rx_crc_errors = old_stats->rx_crc_errors +
9857 calc_crc_errors(tp);
9859 stats->rx_missed_errors = old_stats->rx_missed_errors +
9860 get_stat64(&hw_stats->rx_discards);
9862 stats->rx_dropped = tp->rx_dropped;
9864 return stats;
9867 static inline u32 calc_crc(unsigned char *buf, int len)
9869 u32 reg;
9870 u32 tmp;
9871 int j, k;
9873 reg = 0xffffffff;
9875 for (j = 0; j < len; j++) {
9876 reg ^= buf[j];
9878 for (k = 0; k < 8; k++) {
9879 tmp = reg & 0x01;
9881 reg >>= 1;
9883 if (tmp)
9884 reg ^= 0xedb88320;
9888 return ~reg;
9891 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9893 /* accept or reject all multicast frames */
9894 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9895 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9896 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9897 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9900 static void __tg3_set_rx_mode(struct net_device *dev)
9902 struct tg3 *tp = netdev_priv(dev);
9903 u32 rx_mode;
9905 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9906 RX_MODE_KEEP_VLAN_TAG);
9908 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9909 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9910 * flag clear.
9912 if (!tg3_flag(tp, ENABLE_ASF))
9913 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9914 #endif
9916 if (dev->flags & IFF_PROMISC) {
9917 /* Promiscuous mode. */
9918 rx_mode |= RX_MODE_PROMISC;
9919 } else if (dev->flags & IFF_ALLMULTI) {
9920 /* Accept all multicast. */
9921 tg3_set_multi(tp, 1);
9922 } else if (netdev_mc_empty(dev)) {
9923 /* Reject all multicast. */
9924 tg3_set_multi(tp, 0);
9925 } else {
9926 /* Accept one or more multicast(s). */
9927 struct netdev_hw_addr *ha;
9928 u32 mc_filter[4] = { 0, };
9929 u32 regidx;
9930 u32 bit;
9931 u32 crc;
9933 netdev_for_each_mc_addr(ha, dev) {
9934 crc = calc_crc(ha->addr, ETH_ALEN);
9935 bit = ~crc & 0x7f;
9936 regidx = (bit & 0x60) >> 5;
9937 bit &= 0x1f;
9938 mc_filter[regidx] |= (1 << bit);
9941 tw32(MAC_HASH_REG_0, mc_filter[0]);
9942 tw32(MAC_HASH_REG_1, mc_filter[1]);
9943 tw32(MAC_HASH_REG_2, mc_filter[2]);
9944 tw32(MAC_HASH_REG_3, mc_filter[3]);
9947 if (rx_mode != tp->rx_mode) {
9948 tp->rx_mode = rx_mode;
9949 tw32_f(MAC_RX_MODE, rx_mode);
9950 udelay(10);
9954 static void tg3_set_rx_mode(struct net_device *dev)
9956 struct tg3 *tp = netdev_priv(dev);
9958 if (!netif_running(dev))
9959 return;
9961 tg3_full_lock(tp, 0);
9962 __tg3_set_rx_mode(dev);
9963 tg3_full_unlock(tp);
9966 static int tg3_get_regs_len(struct net_device *dev)
9968 return TG3_REG_BLK_SIZE;
9971 static void tg3_get_regs(struct net_device *dev,
9972 struct ethtool_regs *regs, void *_p)
9974 struct tg3 *tp = netdev_priv(dev);
9976 regs->version = 0;
9978 memset(_p, 0, TG3_REG_BLK_SIZE);
9980 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9981 return;
9983 tg3_full_lock(tp, 0);
9985 tg3_dump_legacy_regs(tp, (u32 *)_p);
9987 tg3_full_unlock(tp);
9990 static int tg3_get_eeprom_len(struct net_device *dev)
9992 struct tg3 *tp = netdev_priv(dev);
9994 return tp->nvram_size;
9997 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9999 struct tg3 *tp = netdev_priv(dev);
10000 int ret;
10001 u8 *pd;
10002 u32 i, offset, len, b_offset, b_count;
10003 __be32 val;
10005 if (tg3_flag(tp, NO_NVRAM))
10006 return -EINVAL;
10008 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10009 return -EAGAIN;
10011 offset = eeprom->offset;
10012 len = eeprom->len;
10013 eeprom->len = 0;
10015 eeprom->magic = TG3_EEPROM_MAGIC;
10017 if (offset & 3) {
10018 /* adjustments to start on required 4 byte boundary */
10019 b_offset = offset & 3;
10020 b_count = 4 - b_offset;
10021 if (b_count > len) {
10022 /* i.e. offset=1 len=2 */
10023 b_count = len;
10025 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10026 if (ret)
10027 return ret;
10028 memcpy(data, ((char *)&val) + b_offset, b_count);
10029 len -= b_count;
10030 offset += b_count;
10031 eeprom->len += b_count;
10034 /* read bytes up to the last 4 byte boundary */
10035 pd = &data[eeprom->len];
10036 for (i = 0; i < (len - (len & 3)); i += 4) {
10037 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10038 if (ret) {
10039 eeprom->len += i;
10040 return ret;
10042 memcpy(pd + i, &val, 4);
10044 eeprom->len += i;
10046 if (len & 3) {
10047 /* read last bytes not ending on 4 byte boundary */
10048 pd = &data[eeprom->len];
10049 b_count = len & 3;
10050 b_offset = offset + len - b_count;
10051 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10052 if (ret)
10053 return ret;
10054 memcpy(pd, &val, b_count);
10055 eeprom->len += b_count;
10057 return 0;
10060 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10062 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10064 struct tg3 *tp = netdev_priv(dev);
10065 int ret;
10066 u32 offset, len, b_offset, odd_len;
10067 u8 *buf;
10068 __be32 start, end;
10070 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10071 return -EAGAIN;
10073 if (tg3_flag(tp, NO_NVRAM) ||
10074 eeprom->magic != TG3_EEPROM_MAGIC)
10075 return -EINVAL;
10077 offset = eeprom->offset;
10078 len = eeprom->len;
10080 if ((b_offset = (offset & 3))) {
10081 /* adjustments to start on required 4 byte boundary */
10082 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10083 if (ret)
10084 return ret;
10085 len += b_offset;
10086 offset &= ~3;
10087 if (len < 4)
10088 len = 4;
10091 odd_len = 0;
10092 if (len & 3) {
10093 /* adjustments to end on required 4 byte boundary */
10094 odd_len = 1;
10095 len = (len + 3) & ~3;
10096 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10097 if (ret)
10098 return ret;
10101 buf = data;
10102 if (b_offset || odd_len) {
10103 buf = kmalloc(len, GFP_KERNEL);
10104 if (!buf)
10105 return -ENOMEM;
10106 if (b_offset)
10107 memcpy(buf, &start, 4);
10108 if (odd_len)
10109 memcpy(buf+len-4, &end, 4);
10110 memcpy(buf + b_offset, data, eeprom->len);
10113 ret = tg3_nvram_write_block(tp, offset, len, buf);
10115 if (buf != data)
10116 kfree(buf);
10118 return ret;
10121 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10123 struct tg3 *tp = netdev_priv(dev);
10125 if (tg3_flag(tp, USE_PHYLIB)) {
10126 struct phy_device *phydev;
10127 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10128 return -EAGAIN;
10129 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10130 return phy_ethtool_gset(phydev, cmd);
10133 cmd->supported = (SUPPORTED_Autoneg);
10135 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10136 cmd->supported |= (SUPPORTED_1000baseT_Half |
10137 SUPPORTED_1000baseT_Full);
10139 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10140 cmd->supported |= (SUPPORTED_100baseT_Half |
10141 SUPPORTED_100baseT_Full |
10142 SUPPORTED_10baseT_Half |
10143 SUPPORTED_10baseT_Full |
10144 SUPPORTED_TP);
10145 cmd->port = PORT_TP;
10146 } else {
10147 cmd->supported |= SUPPORTED_FIBRE;
10148 cmd->port = PORT_FIBRE;
10151 cmd->advertising = tp->link_config.advertising;
10152 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10153 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10154 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10155 cmd->advertising |= ADVERTISED_Pause;
10156 } else {
10157 cmd->advertising |= ADVERTISED_Pause |
10158 ADVERTISED_Asym_Pause;
10160 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10161 cmd->advertising |= ADVERTISED_Asym_Pause;
10164 if (netif_running(dev)) {
10165 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10166 cmd->duplex = tp->link_config.active_duplex;
10167 } else {
10168 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10169 cmd->duplex = DUPLEX_INVALID;
10171 cmd->phy_address = tp->phy_addr;
10172 cmd->transceiver = XCVR_INTERNAL;
10173 cmd->autoneg = tp->link_config.autoneg;
10174 cmd->maxtxpkt = 0;
10175 cmd->maxrxpkt = 0;
10176 return 0;
10179 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10181 struct tg3 *tp = netdev_priv(dev);
10182 u32 speed = ethtool_cmd_speed(cmd);
10184 if (tg3_flag(tp, USE_PHYLIB)) {
10185 struct phy_device *phydev;
10186 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10187 return -EAGAIN;
10188 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10189 return phy_ethtool_sset(phydev, cmd);
10192 if (cmd->autoneg != AUTONEG_ENABLE &&
10193 cmd->autoneg != AUTONEG_DISABLE)
10194 return -EINVAL;
10196 if (cmd->autoneg == AUTONEG_DISABLE &&
10197 cmd->duplex != DUPLEX_FULL &&
10198 cmd->duplex != DUPLEX_HALF)
10199 return -EINVAL;
10201 if (cmd->autoneg == AUTONEG_ENABLE) {
10202 u32 mask = ADVERTISED_Autoneg |
10203 ADVERTISED_Pause |
10204 ADVERTISED_Asym_Pause;
10206 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10207 mask |= ADVERTISED_1000baseT_Half |
10208 ADVERTISED_1000baseT_Full;
10210 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10211 mask |= ADVERTISED_100baseT_Half |
10212 ADVERTISED_100baseT_Full |
10213 ADVERTISED_10baseT_Half |
10214 ADVERTISED_10baseT_Full |
10215 ADVERTISED_TP;
10216 else
10217 mask |= ADVERTISED_FIBRE;
10219 if (cmd->advertising & ~mask)
10220 return -EINVAL;
10222 mask &= (ADVERTISED_1000baseT_Half |
10223 ADVERTISED_1000baseT_Full |
10224 ADVERTISED_100baseT_Half |
10225 ADVERTISED_100baseT_Full |
10226 ADVERTISED_10baseT_Half |
10227 ADVERTISED_10baseT_Full);
10229 cmd->advertising &= mask;
10230 } else {
10231 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10232 if (speed != SPEED_1000)
10233 return -EINVAL;
10235 if (cmd->duplex != DUPLEX_FULL)
10236 return -EINVAL;
10237 } else {
10238 if (speed != SPEED_100 &&
10239 speed != SPEED_10)
10240 return -EINVAL;
10244 tg3_full_lock(tp, 0);
10246 tp->link_config.autoneg = cmd->autoneg;
10247 if (cmd->autoneg == AUTONEG_ENABLE) {
10248 tp->link_config.advertising = (cmd->advertising |
10249 ADVERTISED_Autoneg);
10250 tp->link_config.speed = SPEED_INVALID;
10251 tp->link_config.duplex = DUPLEX_INVALID;
10252 } else {
10253 tp->link_config.advertising = 0;
10254 tp->link_config.speed = speed;
10255 tp->link_config.duplex = cmd->duplex;
10258 tp->link_config.orig_speed = tp->link_config.speed;
10259 tp->link_config.orig_duplex = tp->link_config.duplex;
10260 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10262 if (netif_running(dev))
10263 tg3_setup_phy(tp, 1);
10265 tg3_full_unlock(tp);
10267 return 0;
10270 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10272 struct tg3 *tp = netdev_priv(dev);
10274 strcpy(info->driver, DRV_MODULE_NAME);
10275 strcpy(info->version, DRV_MODULE_VERSION);
10276 strcpy(info->fw_version, tp->fw_ver);
10277 strcpy(info->bus_info, pci_name(tp->pdev));
10280 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10282 struct tg3 *tp = netdev_priv(dev);
10284 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10285 wol->supported = WAKE_MAGIC;
10286 else
10287 wol->supported = 0;
10288 wol->wolopts = 0;
10289 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10290 wol->wolopts = WAKE_MAGIC;
10291 memset(&wol->sopass, 0, sizeof(wol->sopass));
10294 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10296 struct tg3 *tp = netdev_priv(dev);
10297 struct device *dp = &tp->pdev->dev;
10299 if (wol->wolopts & ~WAKE_MAGIC)
10300 return -EINVAL;
10301 if ((wol->wolopts & WAKE_MAGIC) &&
10302 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10303 return -EINVAL;
10305 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10307 spin_lock_bh(&tp->lock);
10308 if (device_may_wakeup(dp))
10309 tg3_flag_set(tp, WOL_ENABLE);
10310 else
10311 tg3_flag_clear(tp, WOL_ENABLE);
10312 spin_unlock_bh(&tp->lock);
10314 return 0;
10317 static u32 tg3_get_msglevel(struct net_device *dev)
10319 struct tg3 *tp = netdev_priv(dev);
10320 return tp->msg_enable;
10323 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10325 struct tg3 *tp = netdev_priv(dev);
10326 tp->msg_enable = value;
10329 static int tg3_nway_reset(struct net_device *dev)
10331 struct tg3 *tp = netdev_priv(dev);
10332 int r;
10334 if (!netif_running(dev))
10335 return -EAGAIN;
10337 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10338 return -EINVAL;
10340 if (tg3_flag(tp, USE_PHYLIB)) {
10341 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10342 return -EAGAIN;
10343 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10344 } else {
10345 u32 bmcr;
10347 spin_lock_bh(&tp->lock);
10348 r = -EINVAL;
10349 tg3_readphy(tp, MII_BMCR, &bmcr);
10350 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10351 ((bmcr & BMCR_ANENABLE) ||
10352 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10353 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10354 BMCR_ANENABLE);
10355 r = 0;
10357 spin_unlock_bh(&tp->lock);
10360 return r;
10363 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10365 struct tg3 *tp = netdev_priv(dev);
10367 ering->rx_max_pending = tp->rx_std_ring_mask;
10368 ering->rx_mini_max_pending = 0;
10369 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10370 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10371 else
10372 ering->rx_jumbo_max_pending = 0;
10374 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10376 ering->rx_pending = tp->rx_pending;
10377 ering->rx_mini_pending = 0;
10378 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10379 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10380 else
10381 ering->rx_jumbo_pending = 0;
10383 ering->tx_pending = tp->napi[0].tx_pending;
10386 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10388 struct tg3 *tp = netdev_priv(dev);
10389 int i, irq_sync = 0, err = 0;
10391 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10392 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10393 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10394 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10395 (tg3_flag(tp, TSO_BUG) &&
10396 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10397 return -EINVAL;
10399 if (netif_running(dev)) {
10400 tg3_phy_stop(tp);
10401 tg3_netif_stop(tp);
10402 irq_sync = 1;
10405 tg3_full_lock(tp, irq_sync);
10407 tp->rx_pending = ering->rx_pending;
10409 if (tg3_flag(tp, MAX_RXPEND_64) &&
10410 tp->rx_pending > 63)
10411 tp->rx_pending = 63;
10412 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10414 for (i = 0; i < tp->irq_max; i++)
10415 tp->napi[i].tx_pending = ering->tx_pending;
10417 if (netif_running(dev)) {
10418 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10419 err = tg3_restart_hw(tp, 1);
10420 if (!err)
10421 tg3_netif_start(tp);
10424 tg3_full_unlock(tp);
10426 if (irq_sync && !err)
10427 tg3_phy_start(tp);
10429 return err;
10432 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10434 struct tg3 *tp = netdev_priv(dev);
10436 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10438 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10439 epause->rx_pause = 1;
10440 else
10441 epause->rx_pause = 0;
10443 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10444 epause->tx_pause = 1;
10445 else
10446 epause->tx_pause = 0;
10449 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10451 struct tg3 *tp = netdev_priv(dev);
10452 int err = 0;
10454 if (tg3_flag(tp, USE_PHYLIB)) {
10455 u32 newadv;
10456 struct phy_device *phydev;
10458 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10460 if (!(phydev->supported & SUPPORTED_Pause) ||
10461 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10462 (epause->rx_pause != epause->tx_pause)))
10463 return -EINVAL;
10465 tp->link_config.flowctrl = 0;
10466 if (epause->rx_pause) {
10467 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10469 if (epause->tx_pause) {
10470 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10471 newadv = ADVERTISED_Pause;
10472 } else
10473 newadv = ADVERTISED_Pause |
10474 ADVERTISED_Asym_Pause;
10475 } else if (epause->tx_pause) {
10476 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10477 newadv = ADVERTISED_Asym_Pause;
10478 } else
10479 newadv = 0;
10481 if (epause->autoneg)
10482 tg3_flag_set(tp, PAUSE_AUTONEG);
10483 else
10484 tg3_flag_clear(tp, PAUSE_AUTONEG);
10486 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10487 u32 oldadv = phydev->advertising &
10488 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10489 if (oldadv != newadv) {
10490 phydev->advertising &=
10491 ~(ADVERTISED_Pause |
10492 ADVERTISED_Asym_Pause);
10493 phydev->advertising |= newadv;
10494 if (phydev->autoneg) {
10496 * Always renegotiate the link to
10497 * inform our link partner of our
10498 * flow control settings, even if the
10499 * flow control is forced. Let
10500 * tg3_adjust_link() do the final
10501 * flow control setup.
10503 return phy_start_aneg(phydev);
10507 if (!epause->autoneg)
10508 tg3_setup_flow_control(tp, 0, 0);
10509 } else {
10510 tp->link_config.orig_advertising &=
10511 ~(ADVERTISED_Pause |
10512 ADVERTISED_Asym_Pause);
10513 tp->link_config.orig_advertising |= newadv;
10515 } else {
10516 int irq_sync = 0;
10518 if (netif_running(dev)) {
10519 tg3_netif_stop(tp);
10520 irq_sync = 1;
10523 tg3_full_lock(tp, irq_sync);
10525 if (epause->autoneg)
10526 tg3_flag_set(tp, PAUSE_AUTONEG);
10527 else
10528 tg3_flag_clear(tp, PAUSE_AUTONEG);
10529 if (epause->rx_pause)
10530 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10531 else
10532 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10533 if (epause->tx_pause)
10534 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10535 else
10536 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10538 if (netif_running(dev)) {
10539 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10540 err = tg3_restart_hw(tp, 1);
10541 if (!err)
10542 tg3_netif_start(tp);
10545 tg3_full_unlock(tp);
10548 return err;
10551 static int tg3_get_sset_count(struct net_device *dev, int sset)
10553 switch (sset) {
10554 case ETH_SS_TEST:
10555 return TG3_NUM_TEST;
10556 case ETH_SS_STATS:
10557 return TG3_NUM_STATS;
10558 default:
10559 return -EOPNOTSUPP;
10563 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10565 switch (stringset) {
10566 case ETH_SS_STATS:
10567 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10568 break;
10569 case ETH_SS_TEST:
10570 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10571 break;
10572 default:
10573 WARN_ON(1); /* we need a WARN() */
10574 break;
10578 static int tg3_set_phys_id(struct net_device *dev,
10579 enum ethtool_phys_id_state state)
10581 struct tg3 *tp = netdev_priv(dev);
10583 if (!netif_running(tp->dev))
10584 return -EAGAIN;
10586 switch (state) {
10587 case ETHTOOL_ID_ACTIVE:
10588 return 1; /* cycle on/off once per second */
10590 case ETHTOOL_ID_ON:
10591 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10592 LED_CTRL_1000MBPS_ON |
10593 LED_CTRL_100MBPS_ON |
10594 LED_CTRL_10MBPS_ON |
10595 LED_CTRL_TRAFFIC_OVERRIDE |
10596 LED_CTRL_TRAFFIC_BLINK |
10597 LED_CTRL_TRAFFIC_LED);
10598 break;
10600 case ETHTOOL_ID_OFF:
10601 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10602 LED_CTRL_TRAFFIC_OVERRIDE);
10603 break;
10605 case ETHTOOL_ID_INACTIVE:
10606 tw32(MAC_LED_CTRL, tp->led_ctrl);
10607 break;
10610 return 0;
10613 static void tg3_get_ethtool_stats(struct net_device *dev,
10614 struct ethtool_stats *estats, u64 *tmp_stats)
10616 struct tg3 *tp = netdev_priv(dev);
10617 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10620 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10622 int i;
10623 __be32 *buf;
10624 u32 offset = 0, len = 0;
10625 u32 magic, val;
10627 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10628 return NULL;
10630 if (magic == TG3_EEPROM_MAGIC) {
10631 for (offset = TG3_NVM_DIR_START;
10632 offset < TG3_NVM_DIR_END;
10633 offset += TG3_NVM_DIRENT_SIZE) {
10634 if (tg3_nvram_read(tp, offset, &val))
10635 return NULL;
10637 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10638 TG3_NVM_DIRTYPE_EXTVPD)
10639 break;
10642 if (offset != TG3_NVM_DIR_END) {
10643 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10644 if (tg3_nvram_read(tp, offset + 4, &offset))
10645 return NULL;
10647 offset = tg3_nvram_logical_addr(tp, offset);
10651 if (!offset || !len) {
10652 offset = TG3_NVM_VPD_OFF;
10653 len = TG3_NVM_VPD_LEN;
10656 buf = kmalloc(len, GFP_KERNEL);
10657 if (buf == NULL)
10658 return NULL;
10660 if (magic == TG3_EEPROM_MAGIC) {
10661 for (i = 0; i < len; i += 4) {
10662 /* The data is in little-endian format in NVRAM.
10663 * Use the big-endian read routines to preserve
10664 * the byte order as it exists in NVRAM.
10666 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10667 goto error;
10669 } else {
10670 u8 *ptr;
10671 ssize_t cnt;
10672 unsigned int pos = 0;
10674 ptr = (u8 *)&buf[0];
10675 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10676 cnt = pci_read_vpd(tp->pdev, pos,
10677 len - pos, ptr);
10678 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10679 cnt = 0;
10680 else if (cnt < 0)
10681 goto error;
10683 if (pos != len)
10684 goto error;
10687 *vpdlen = len;
10689 return buf;
10691 error:
10692 kfree(buf);
10693 return NULL;
10696 #define NVRAM_TEST_SIZE 0x100
10697 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10698 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10699 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10700 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10701 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10702 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10703 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10704 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10706 static int tg3_test_nvram(struct tg3 *tp)
10708 u32 csum, magic, len;
10709 __be32 *buf;
10710 int i, j, k, err = 0, size;
10712 if (tg3_flag(tp, NO_NVRAM))
10713 return 0;
10715 if (tg3_nvram_read(tp, 0, &magic) != 0)
10716 return -EIO;
10718 if (magic == TG3_EEPROM_MAGIC)
10719 size = NVRAM_TEST_SIZE;
10720 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10721 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10722 TG3_EEPROM_SB_FORMAT_1) {
10723 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10724 case TG3_EEPROM_SB_REVISION_0:
10725 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10726 break;
10727 case TG3_EEPROM_SB_REVISION_2:
10728 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10729 break;
10730 case TG3_EEPROM_SB_REVISION_3:
10731 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10732 break;
10733 case TG3_EEPROM_SB_REVISION_4:
10734 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10735 break;
10736 case TG3_EEPROM_SB_REVISION_5:
10737 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10738 break;
10739 case TG3_EEPROM_SB_REVISION_6:
10740 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10741 break;
10742 default:
10743 return -EIO;
10745 } else
10746 return 0;
10747 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10748 size = NVRAM_SELFBOOT_HW_SIZE;
10749 else
10750 return -EIO;
10752 buf = kmalloc(size, GFP_KERNEL);
10753 if (buf == NULL)
10754 return -ENOMEM;
10756 err = -EIO;
10757 for (i = 0, j = 0; i < size; i += 4, j++) {
10758 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10759 if (err)
10760 break;
10762 if (i < size)
10763 goto out;
10765 /* Selfboot format */
10766 magic = be32_to_cpu(buf[0]);
10767 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10768 TG3_EEPROM_MAGIC_FW) {
10769 u8 *buf8 = (u8 *) buf, csum8 = 0;
10771 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10772 TG3_EEPROM_SB_REVISION_2) {
10773 /* For rev 2, the csum doesn't include the MBA. */
10774 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10775 csum8 += buf8[i];
10776 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10777 csum8 += buf8[i];
10778 } else {
10779 for (i = 0; i < size; i++)
10780 csum8 += buf8[i];
10783 if (csum8 == 0) {
10784 err = 0;
10785 goto out;
10788 err = -EIO;
10789 goto out;
10792 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10793 TG3_EEPROM_MAGIC_HW) {
10794 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10795 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10796 u8 *buf8 = (u8 *) buf;
10798 /* Separate the parity bits and the data bytes. */
10799 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10800 if ((i == 0) || (i == 8)) {
10801 int l;
10802 u8 msk;
10804 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10805 parity[k++] = buf8[i] & msk;
10806 i++;
10807 } else if (i == 16) {
10808 int l;
10809 u8 msk;
10811 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10812 parity[k++] = buf8[i] & msk;
10813 i++;
10815 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10816 parity[k++] = buf8[i] & msk;
10817 i++;
10819 data[j++] = buf8[i];
10822 err = -EIO;
10823 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10824 u8 hw8 = hweight8(data[i]);
10826 if ((hw8 & 0x1) && parity[i])
10827 goto out;
10828 else if (!(hw8 & 0x1) && !parity[i])
10829 goto out;
10831 err = 0;
10832 goto out;
10835 err = -EIO;
10837 /* Bootstrap checksum at offset 0x10 */
10838 csum = calc_crc((unsigned char *) buf, 0x10);
10839 if (csum != le32_to_cpu(buf[0x10/4]))
10840 goto out;
10842 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10843 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10844 if (csum != le32_to_cpu(buf[0xfc/4]))
10845 goto out;
10847 kfree(buf);
10849 buf = tg3_vpd_readblock(tp, &len);
10850 if (!buf)
10851 return -ENOMEM;
10853 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
10854 if (i > 0) {
10855 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10856 if (j < 0)
10857 goto out;
10859 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
10860 goto out;
10862 i += PCI_VPD_LRDT_TAG_SIZE;
10863 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10864 PCI_VPD_RO_KEYWORD_CHKSUM);
10865 if (j > 0) {
10866 u8 csum8 = 0;
10868 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10870 for (i = 0; i <= j; i++)
10871 csum8 += ((u8 *)buf)[i];
10873 if (csum8)
10874 goto out;
10878 err = 0;
10880 out:
10881 kfree(buf);
10882 return err;
10885 #define TG3_SERDES_TIMEOUT_SEC 2
10886 #define TG3_COPPER_TIMEOUT_SEC 6
10888 static int tg3_test_link(struct tg3 *tp)
10890 int i, max;
10892 if (!netif_running(tp->dev))
10893 return -ENODEV;
10895 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10896 max = TG3_SERDES_TIMEOUT_SEC;
10897 else
10898 max = TG3_COPPER_TIMEOUT_SEC;
10900 for (i = 0; i < max; i++) {
10901 if (netif_carrier_ok(tp->dev))
10902 return 0;
10904 if (msleep_interruptible(1000))
10905 break;
10908 return -EIO;
10911 /* Only test the commonly used registers */
10912 static int tg3_test_registers(struct tg3 *tp)
10914 int i, is_5705, is_5750;
10915 u32 offset, read_mask, write_mask, val, save_val, read_val;
10916 static struct {
10917 u16 offset;
10918 u16 flags;
10919 #define TG3_FL_5705 0x1
10920 #define TG3_FL_NOT_5705 0x2
10921 #define TG3_FL_NOT_5788 0x4
10922 #define TG3_FL_NOT_5750 0x8
10923 u32 read_mask;
10924 u32 write_mask;
10925 } reg_tbl[] = {
10926 /* MAC Control Registers */
10927 { MAC_MODE, TG3_FL_NOT_5705,
10928 0x00000000, 0x00ef6f8c },
10929 { MAC_MODE, TG3_FL_5705,
10930 0x00000000, 0x01ef6b8c },
10931 { MAC_STATUS, TG3_FL_NOT_5705,
10932 0x03800107, 0x00000000 },
10933 { MAC_STATUS, TG3_FL_5705,
10934 0x03800100, 0x00000000 },
10935 { MAC_ADDR_0_HIGH, 0x0000,
10936 0x00000000, 0x0000ffff },
10937 { MAC_ADDR_0_LOW, 0x0000,
10938 0x00000000, 0xffffffff },
10939 { MAC_RX_MTU_SIZE, 0x0000,
10940 0x00000000, 0x0000ffff },
10941 { MAC_TX_MODE, 0x0000,
10942 0x00000000, 0x00000070 },
10943 { MAC_TX_LENGTHS, 0x0000,
10944 0x00000000, 0x00003fff },
10945 { MAC_RX_MODE, TG3_FL_NOT_5705,
10946 0x00000000, 0x000007fc },
10947 { MAC_RX_MODE, TG3_FL_5705,
10948 0x00000000, 0x000007dc },
10949 { MAC_HASH_REG_0, 0x0000,
10950 0x00000000, 0xffffffff },
10951 { MAC_HASH_REG_1, 0x0000,
10952 0x00000000, 0xffffffff },
10953 { MAC_HASH_REG_2, 0x0000,
10954 0x00000000, 0xffffffff },
10955 { MAC_HASH_REG_3, 0x0000,
10956 0x00000000, 0xffffffff },
10958 /* Receive Data and Receive BD Initiator Control Registers. */
10959 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10960 0x00000000, 0xffffffff },
10961 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10962 0x00000000, 0xffffffff },
10963 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10964 0x00000000, 0x00000003 },
10965 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10966 0x00000000, 0xffffffff },
10967 { RCVDBDI_STD_BD+0, 0x0000,
10968 0x00000000, 0xffffffff },
10969 { RCVDBDI_STD_BD+4, 0x0000,
10970 0x00000000, 0xffffffff },
10971 { RCVDBDI_STD_BD+8, 0x0000,
10972 0x00000000, 0xffff0002 },
10973 { RCVDBDI_STD_BD+0xc, 0x0000,
10974 0x00000000, 0xffffffff },
10976 /* Receive BD Initiator Control Registers. */
10977 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10978 0x00000000, 0xffffffff },
10979 { RCVBDI_STD_THRESH, TG3_FL_5705,
10980 0x00000000, 0x000003ff },
10981 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10982 0x00000000, 0xffffffff },
10984 /* Host Coalescing Control Registers. */
10985 { HOSTCC_MODE, TG3_FL_NOT_5705,
10986 0x00000000, 0x00000004 },
10987 { HOSTCC_MODE, TG3_FL_5705,
10988 0x00000000, 0x000000f6 },
10989 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10990 0x00000000, 0xffffffff },
10991 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10992 0x00000000, 0x000003ff },
10993 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10994 0x00000000, 0xffffffff },
10995 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10996 0x00000000, 0x000003ff },
10997 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10998 0x00000000, 0xffffffff },
10999 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11000 0x00000000, 0x000000ff },
11001 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11002 0x00000000, 0xffffffff },
11003 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11004 0x00000000, 0x000000ff },
11005 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11006 0x00000000, 0xffffffff },
11007 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11008 0x00000000, 0xffffffff },
11009 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11010 0x00000000, 0xffffffff },
11011 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11012 0x00000000, 0x000000ff },
11013 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11014 0x00000000, 0xffffffff },
11015 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11016 0x00000000, 0x000000ff },
11017 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11018 0x00000000, 0xffffffff },
11019 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11020 0x00000000, 0xffffffff },
11021 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11022 0x00000000, 0xffffffff },
11023 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11024 0x00000000, 0xffffffff },
11025 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11026 0x00000000, 0xffffffff },
11027 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11028 0xffffffff, 0x00000000 },
11029 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11030 0xffffffff, 0x00000000 },
11032 /* Buffer Manager Control Registers. */
11033 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11034 0x00000000, 0x007fff80 },
11035 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11036 0x00000000, 0x007fffff },
11037 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11038 0x00000000, 0x0000003f },
11039 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11040 0x00000000, 0x000001ff },
11041 { BUFMGR_MB_HIGH_WATER, 0x0000,
11042 0x00000000, 0x000001ff },
11043 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11044 0xffffffff, 0x00000000 },
11045 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11046 0xffffffff, 0x00000000 },
11048 /* Mailbox Registers */
11049 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11050 0x00000000, 0x000001ff },
11051 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11052 0x00000000, 0x000001ff },
11053 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11054 0x00000000, 0x000007ff },
11055 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11056 0x00000000, 0x000001ff },
11058 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11061 is_5705 = is_5750 = 0;
11062 if (tg3_flag(tp, 5705_PLUS)) {
11063 is_5705 = 1;
11064 if (tg3_flag(tp, 5750_PLUS))
11065 is_5750 = 1;
11068 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11069 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11070 continue;
11072 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11073 continue;
11075 if (tg3_flag(tp, IS_5788) &&
11076 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11077 continue;
11079 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11080 continue;
11082 offset = (u32) reg_tbl[i].offset;
11083 read_mask = reg_tbl[i].read_mask;
11084 write_mask = reg_tbl[i].write_mask;
11086 /* Save the original register content */
11087 save_val = tr32(offset);
11089 /* Determine the read-only value. */
11090 read_val = save_val & read_mask;
11092 /* Write zero to the register, then make sure the read-only bits
11093 * are not changed and the read/write bits are all zeros.
11095 tw32(offset, 0);
11097 val = tr32(offset);
11099 /* Test the read-only and read/write bits. */
11100 if (((val & read_mask) != read_val) || (val & write_mask))
11101 goto out;
11103 /* Write ones to all the bits defined by RdMask and WrMask, then
11104 * make sure the read-only bits are not changed and the
11105 * read/write bits are all ones.
11107 tw32(offset, read_mask | write_mask);
11109 val = tr32(offset);
11111 /* Test the read-only bits. */
11112 if ((val & read_mask) != read_val)
11113 goto out;
11115 /* Test the read/write bits. */
11116 if ((val & write_mask) != write_mask)
11117 goto out;
11119 tw32(offset, save_val);
11122 return 0;
11124 out:
11125 if (netif_msg_hw(tp))
11126 netdev_err(tp->dev,
11127 "Register test failed at offset %x\n", offset);
11128 tw32(offset, save_val);
11129 return -EIO;
11132 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11134 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11135 int i;
11136 u32 j;
11138 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11139 for (j = 0; j < len; j += 4) {
11140 u32 val;
11142 tg3_write_mem(tp, offset + j, test_pattern[i]);
11143 tg3_read_mem(tp, offset + j, &val);
11144 if (val != test_pattern[i])
11145 return -EIO;
11148 return 0;
11151 static int tg3_test_memory(struct tg3 *tp)
11153 static struct mem_entry {
11154 u32 offset;
11155 u32 len;
11156 } mem_tbl_570x[] = {
11157 { 0x00000000, 0x00b50},
11158 { 0x00002000, 0x1c000},
11159 { 0xffffffff, 0x00000}
11160 }, mem_tbl_5705[] = {
11161 { 0x00000100, 0x0000c},
11162 { 0x00000200, 0x00008},
11163 { 0x00004000, 0x00800},
11164 { 0x00006000, 0x01000},
11165 { 0x00008000, 0x02000},
11166 { 0x00010000, 0x0e000},
11167 { 0xffffffff, 0x00000}
11168 }, mem_tbl_5755[] = {
11169 { 0x00000200, 0x00008},
11170 { 0x00004000, 0x00800},
11171 { 0x00006000, 0x00800},
11172 { 0x00008000, 0x02000},
11173 { 0x00010000, 0x0c000},
11174 { 0xffffffff, 0x00000}
11175 }, mem_tbl_5906[] = {
11176 { 0x00000200, 0x00008},
11177 { 0x00004000, 0x00400},
11178 { 0x00006000, 0x00400},
11179 { 0x00008000, 0x01000},
11180 { 0x00010000, 0x01000},
11181 { 0xffffffff, 0x00000}
11182 }, mem_tbl_5717[] = {
11183 { 0x00000200, 0x00008},
11184 { 0x00010000, 0x0a000},
11185 { 0x00020000, 0x13c00},
11186 { 0xffffffff, 0x00000}
11187 }, mem_tbl_57765[] = {
11188 { 0x00000200, 0x00008},
11189 { 0x00004000, 0x00800},
11190 { 0x00006000, 0x09800},
11191 { 0x00010000, 0x0a000},
11192 { 0xffffffff, 0x00000}
11194 struct mem_entry *mem_tbl;
11195 int err = 0;
11196 int i;
11198 if (tg3_flag(tp, 5717_PLUS))
11199 mem_tbl = mem_tbl_5717;
11200 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11201 mem_tbl = mem_tbl_57765;
11202 else if (tg3_flag(tp, 5755_PLUS))
11203 mem_tbl = mem_tbl_5755;
11204 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11205 mem_tbl = mem_tbl_5906;
11206 else if (tg3_flag(tp, 5705_PLUS))
11207 mem_tbl = mem_tbl_5705;
11208 else
11209 mem_tbl = mem_tbl_570x;
11211 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11212 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11213 if (err)
11214 break;
11217 return err;
11220 #define TG3_MAC_LOOPBACK 0
11221 #define TG3_PHY_LOOPBACK 1
11222 #define TG3_TSO_LOOPBACK 2
11224 #define TG3_TSO_MSS 500
11226 #define TG3_TSO_IP_HDR_LEN 20
11227 #define TG3_TSO_TCP_HDR_LEN 20
11228 #define TG3_TSO_TCP_OPT_LEN 12
11230 static const u8 tg3_tso_header[] = {
11231 0x08, 0x00,
11232 0x45, 0x00, 0x00, 0x00,
11233 0x00, 0x00, 0x40, 0x00,
11234 0x40, 0x06, 0x00, 0x00,
11235 0x0a, 0x00, 0x00, 0x01,
11236 0x0a, 0x00, 0x00, 0x02,
11237 0x0d, 0x00, 0xe0, 0x00,
11238 0x00, 0x00, 0x01, 0x00,
11239 0x00, 0x00, 0x02, 0x00,
11240 0x80, 0x10, 0x10, 0x00,
11241 0x14, 0x09, 0x00, 0x00,
11242 0x01, 0x01, 0x08, 0x0a,
11243 0x11, 0x11, 0x11, 0x11,
11244 0x11, 0x11, 0x11, 0x11,
11247 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11249 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11250 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11251 u32 budget;
11252 struct sk_buff *skb, *rx_skb;
11253 u8 *tx_data;
11254 dma_addr_t map;
11255 int num_pkts, tx_len, rx_len, i, err;
11256 struct tg3_rx_buffer_desc *desc;
11257 struct tg3_napi *tnapi, *rnapi;
11258 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11260 tnapi = &tp->napi[0];
11261 rnapi = &tp->napi[0];
11262 if (tp->irq_cnt > 1) {
11263 if (tg3_flag(tp, ENABLE_RSS))
11264 rnapi = &tp->napi[1];
11265 if (tg3_flag(tp, ENABLE_TSS))
11266 tnapi = &tp->napi[1];
11268 coal_now = tnapi->coal_now | rnapi->coal_now;
11270 if (loopback_mode == TG3_MAC_LOOPBACK) {
11271 /* HW errata - mac loopback fails in some cases on 5780.
11272 * Normal traffic and PHY loopback are not affected by
11273 * errata. Also, the MAC loopback test is deprecated for
11274 * all newer ASIC revisions.
11276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11277 tg3_flag(tp, CPMU_PRESENT))
11278 return 0;
11280 mac_mode = tp->mac_mode &
11281 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11282 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11283 if (!tg3_flag(tp, 5705_PLUS))
11284 mac_mode |= MAC_MODE_LINK_POLARITY;
11285 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11286 mac_mode |= MAC_MODE_PORT_MODE_MII;
11287 else
11288 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11289 tw32(MAC_MODE, mac_mode);
11290 } else {
11291 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11292 tg3_phy_fet_toggle_apd(tp, false);
11293 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11294 } else
11295 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11297 tg3_phy_toggle_automdix(tp, 0);
11299 tg3_writephy(tp, MII_BMCR, val);
11300 udelay(40);
11302 mac_mode = tp->mac_mode &
11303 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11304 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11305 tg3_writephy(tp, MII_TG3_FET_PTEST,
11306 MII_TG3_FET_PTEST_FRC_TX_LINK |
11307 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11308 /* The write needs to be flushed for the AC131 */
11309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11310 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11311 mac_mode |= MAC_MODE_PORT_MODE_MII;
11312 } else
11313 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11315 /* reset to prevent losing 1st rx packet intermittently */
11316 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11317 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11318 udelay(10);
11319 tw32_f(MAC_RX_MODE, tp->rx_mode);
11321 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11322 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11323 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11324 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11325 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11326 mac_mode |= MAC_MODE_LINK_POLARITY;
11327 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11328 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11330 tw32(MAC_MODE, mac_mode);
11332 /* Wait for link */
11333 for (i = 0; i < 100; i++) {
11334 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11335 break;
11336 mdelay(1);
11340 err = -EIO;
11342 tx_len = pktsz;
11343 skb = netdev_alloc_skb(tp->dev, tx_len);
11344 if (!skb)
11345 return -ENOMEM;
11347 tx_data = skb_put(skb, tx_len);
11348 memcpy(tx_data, tp->dev->dev_addr, 6);
11349 memset(tx_data + 6, 0x0, 8);
11351 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11353 if (loopback_mode == TG3_TSO_LOOPBACK) {
11354 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11356 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11357 TG3_TSO_TCP_OPT_LEN;
11359 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11360 sizeof(tg3_tso_header));
11361 mss = TG3_TSO_MSS;
11363 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11364 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11366 /* Set the total length field in the IP header */
11367 iph->tot_len = htons((u16)(mss + hdr_len));
11369 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11370 TXD_FLAG_CPU_POST_DMA);
11372 if (tg3_flag(tp, HW_TSO_1) ||
11373 tg3_flag(tp, HW_TSO_2) ||
11374 tg3_flag(tp, HW_TSO_3)) {
11375 struct tcphdr *th;
11376 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11377 th = (struct tcphdr *)&tx_data[val];
11378 th->check = 0;
11379 } else
11380 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11382 if (tg3_flag(tp, HW_TSO_3)) {
11383 mss |= (hdr_len & 0xc) << 12;
11384 if (hdr_len & 0x10)
11385 base_flags |= 0x00000010;
11386 base_flags |= (hdr_len & 0x3e0) << 5;
11387 } else if (tg3_flag(tp, HW_TSO_2))
11388 mss |= hdr_len << 9;
11389 else if (tg3_flag(tp, HW_TSO_1) ||
11390 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11391 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11392 } else {
11393 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11396 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11397 } else {
11398 num_pkts = 1;
11399 data_off = ETH_HLEN;
11402 for (i = data_off; i < tx_len; i++)
11403 tx_data[i] = (u8) (i & 0xff);
11405 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11406 if (pci_dma_mapping_error(tp->pdev, map)) {
11407 dev_kfree_skb(skb);
11408 return -EIO;
11411 val = tnapi->tx_prod;
11412 tnapi->tx_buffers[val].skb = skb;
11413 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11415 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11416 rnapi->coal_now);
11418 udelay(10);
11420 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11422 budget = tg3_tx_avail(tnapi);
11423 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11424 base_flags | TXD_FLAG_END, mss, 0)) {
11425 tnapi->tx_buffers[val].skb = NULL;
11426 dev_kfree_skb(skb);
11427 return -EIO;
11430 tnapi->tx_prod++;
11432 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11433 tr32_mailbox(tnapi->prodmbox);
11435 udelay(10);
11437 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11438 for (i = 0; i < 35; i++) {
11439 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11440 coal_now);
11442 udelay(10);
11444 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11445 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11446 if ((tx_idx == tnapi->tx_prod) &&
11447 (rx_idx == (rx_start_idx + num_pkts)))
11448 break;
11451 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11452 dev_kfree_skb(skb);
11454 if (tx_idx != tnapi->tx_prod)
11455 goto out;
11457 if (rx_idx != rx_start_idx + num_pkts)
11458 goto out;
11460 val = data_off;
11461 while (rx_idx != rx_start_idx) {
11462 desc = &rnapi->rx_rcb[rx_start_idx++];
11463 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11464 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11466 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11467 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11468 goto out;
11470 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11471 - ETH_FCS_LEN;
11473 if (loopback_mode != TG3_TSO_LOOPBACK) {
11474 if (rx_len != tx_len)
11475 goto out;
11477 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11478 if (opaque_key != RXD_OPAQUE_RING_STD)
11479 goto out;
11480 } else {
11481 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11482 goto out;
11484 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11485 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11486 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11487 goto out;
11490 if (opaque_key == RXD_OPAQUE_RING_STD) {
11491 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11492 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11493 mapping);
11494 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11495 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11496 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11497 mapping);
11498 } else
11499 goto out;
11501 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11502 PCI_DMA_FROMDEVICE);
11504 for (i = data_off; i < rx_len; i++, val++) {
11505 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11506 goto out;
11510 err = 0;
11512 /* tg3_free_rings will unmap and free the rx_skb */
11513 out:
11514 return err;
11517 #define TG3_STD_LOOPBACK_FAILED 1
11518 #define TG3_JMB_LOOPBACK_FAILED 2
11519 #define TG3_TSO_LOOPBACK_FAILED 4
11521 #define TG3_MAC_LOOPBACK_SHIFT 0
11522 #define TG3_PHY_LOOPBACK_SHIFT 4
11523 #define TG3_LOOPBACK_FAILED 0x00000077
11525 static int tg3_test_loopback(struct tg3 *tp)
11527 int err = 0;
11528 u32 eee_cap, cpmuctrl = 0;
11530 if (!netif_running(tp->dev))
11531 return TG3_LOOPBACK_FAILED;
11533 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11534 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11536 err = tg3_reset_hw(tp, 1);
11537 if (err) {
11538 err = TG3_LOOPBACK_FAILED;
11539 goto done;
11542 if (tg3_flag(tp, ENABLE_RSS)) {
11543 int i;
11545 /* Reroute all rx packets to the 1st queue */
11546 for (i = MAC_RSS_INDIR_TBL_0;
11547 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11548 tw32(i, 0x0);
11551 /* Turn off gphy autopowerdown. */
11552 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11553 tg3_phy_toggle_apd(tp, false);
11555 if (tg3_flag(tp, CPMU_PRESENT)) {
11556 int i;
11557 u32 status;
11559 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11561 /* Wait for up to 40 microseconds to acquire lock. */
11562 for (i = 0; i < 4; i++) {
11563 status = tr32(TG3_CPMU_MUTEX_GNT);
11564 if (status == CPMU_MUTEX_GNT_DRIVER)
11565 break;
11566 udelay(10);
11569 if (status != CPMU_MUTEX_GNT_DRIVER) {
11570 err = TG3_LOOPBACK_FAILED;
11571 goto done;
11574 /* Turn off link-based power management. */
11575 cpmuctrl = tr32(TG3_CPMU_CTRL);
11576 tw32(TG3_CPMU_CTRL,
11577 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11578 CPMU_CTRL_LINK_AWARE_MODE));
11581 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11582 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11584 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11585 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11586 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11588 if (tg3_flag(tp, CPMU_PRESENT)) {
11589 tw32(TG3_CPMU_CTRL, cpmuctrl);
11591 /* Release the mutex */
11592 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11595 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11596 !tg3_flag(tp, USE_PHYLIB)) {
11597 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11598 err |= TG3_STD_LOOPBACK_FAILED <<
11599 TG3_PHY_LOOPBACK_SHIFT;
11600 if (tg3_flag(tp, TSO_CAPABLE) &&
11601 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11602 err |= TG3_TSO_LOOPBACK_FAILED <<
11603 TG3_PHY_LOOPBACK_SHIFT;
11604 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11605 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11606 err |= TG3_JMB_LOOPBACK_FAILED <<
11607 TG3_PHY_LOOPBACK_SHIFT;
11610 /* Re-enable gphy autopowerdown. */
11611 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11612 tg3_phy_toggle_apd(tp, true);
11614 done:
11615 tp->phy_flags |= eee_cap;
11617 return err;
11620 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11621 u64 *data)
11623 struct tg3 *tp = netdev_priv(dev);
11625 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11626 tg3_power_up(tp)) {
11627 etest->flags |= ETH_TEST_FL_FAILED;
11628 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11629 return;
11632 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11634 if (tg3_test_nvram(tp) != 0) {
11635 etest->flags |= ETH_TEST_FL_FAILED;
11636 data[0] = 1;
11638 if (tg3_test_link(tp) != 0) {
11639 etest->flags |= ETH_TEST_FL_FAILED;
11640 data[1] = 1;
11642 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11643 int err, err2 = 0, irq_sync = 0;
11645 if (netif_running(dev)) {
11646 tg3_phy_stop(tp);
11647 tg3_netif_stop(tp);
11648 irq_sync = 1;
11651 tg3_full_lock(tp, irq_sync);
11653 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11654 err = tg3_nvram_lock(tp);
11655 tg3_halt_cpu(tp, RX_CPU_BASE);
11656 if (!tg3_flag(tp, 5705_PLUS))
11657 tg3_halt_cpu(tp, TX_CPU_BASE);
11658 if (!err)
11659 tg3_nvram_unlock(tp);
11661 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11662 tg3_phy_reset(tp);
11664 if (tg3_test_registers(tp) != 0) {
11665 etest->flags |= ETH_TEST_FL_FAILED;
11666 data[2] = 1;
11668 if (tg3_test_memory(tp) != 0) {
11669 etest->flags |= ETH_TEST_FL_FAILED;
11670 data[3] = 1;
11672 if ((data[4] = tg3_test_loopback(tp)) != 0)
11673 etest->flags |= ETH_TEST_FL_FAILED;
11675 tg3_full_unlock(tp);
11677 if (tg3_test_interrupt(tp) != 0) {
11678 etest->flags |= ETH_TEST_FL_FAILED;
11679 data[5] = 1;
11682 tg3_full_lock(tp, 0);
11684 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11685 if (netif_running(dev)) {
11686 tg3_flag_set(tp, INIT_COMPLETE);
11687 err2 = tg3_restart_hw(tp, 1);
11688 if (!err2)
11689 tg3_netif_start(tp);
11692 tg3_full_unlock(tp);
11694 if (irq_sync && !err2)
11695 tg3_phy_start(tp);
11697 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11698 tg3_power_down(tp);
11702 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11704 struct mii_ioctl_data *data = if_mii(ifr);
11705 struct tg3 *tp = netdev_priv(dev);
11706 int err;
11708 if (tg3_flag(tp, USE_PHYLIB)) {
11709 struct phy_device *phydev;
11710 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11711 return -EAGAIN;
11712 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11713 return phy_mii_ioctl(phydev, ifr, cmd);
11716 switch (cmd) {
11717 case SIOCGMIIPHY:
11718 data->phy_id = tp->phy_addr;
11720 /* fallthru */
11721 case SIOCGMIIREG: {
11722 u32 mii_regval;
11724 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11725 break; /* We have no PHY */
11727 if (!netif_running(dev))
11728 return -EAGAIN;
11730 spin_lock_bh(&tp->lock);
11731 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11732 spin_unlock_bh(&tp->lock);
11734 data->val_out = mii_regval;
11736 return err;
11739 case SIOCSMIIREG:
11740 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11741 break; /* We have no PHY */
11743 if (!netif_running(dev))
11744 return -EAGAIN;
11746 spin_lock_bh(&tp->lock);
11747 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11748 spin_unlock_bh(&tp->lock);
11750 return err;
11752 default:
11753 /* do nothing */
11754 break;
11756 return -EOPNOTSUPP;
11759 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11761 struct tg3 *tp = netdev_priv(dev);
11763 memcpy(ec, &tp->coal, sizeof(*ec));
11764 return 0;
11767 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11769 struct tg3 *tp = netdev_priv(dev);
11770 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11771 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11773 if (!tg3_flag(tp, 5705_PLUS)) {
11774 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11775 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11776 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11777 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11780 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11781 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11782 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11783 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11784 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11785 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11786 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11787 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11788 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11789 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11790 return -EINVAL;
11792 /* No rx interrupts will be generated if both are zero */
11793 if ((ec->rx_coalesce_usecs == 0) &&
11794 (ec->rx_max_coalesced_frames == 0))
11795 return -EINVAL;
11797 /* No tx interrupts will be generated if both are zero */
11798 if ((ec->tx_coalesce_usecs == 0) &&
11799 (ec->tx_max_coalesced_frames == 0))
11800 return -EINVAL;
11802 /* Only copy relevant parameters, ignore all others. */
11803 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11804 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11805 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11806 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11807 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11808 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11809 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11810 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11811 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11813 if (netif_running(dev)) {
11814 tg3_full_lock(tp, 0);
11815 __tg3_set_coalesce(tp, &tp->coal);
11816 tg3_full_unlock(tp);
11818 return 0;
11821 static const struct ethtool_ops tg3_ethtool_ops = {
11822 .get_settings = tg3_get_settings,
11823 .set_settings = tg3_set_settings,
11824 .get_drvinfo = tg3_get_drvinfo,
11825 .get_regs_len = tg3_get_regs_len,
11826 .get_regs = tg3_get_regs,
11827 .get_wol = tg3_get_wol,
11828 .set_wol = tg3_set_wol,
11829 .get_msglevel = tg3_get_msglevel,
11830 .set_msglevel = tg3_set_msglevel,
11831 .nway_reset = tg3_nway_reset,
11832 .get_link = ethtool_op_get_link,
11833 .get_eeprom_len = tg3_get_eeprom_len,
11834 .get_eeprom = tg3_get_eeprom,
11835 .set_eeprom = tg3_set_eeprom,
11836 .get_ringparam = tg3_get_ringparam,
11837 .set_ringparam = tg3_set_ringparam,
11838 .get_pauseparam = tg3_get_pauseparam,
11839 .set_pauseparam = tg3_set_pauseparam,
11840 .self_test = tg3_self_test,
11841 .get_strings = tg3_get_strings,
11842 .set_phys_id = tg3_set_phys_id,
11843 .get_ethtool_stats = tg3_get_ethtool_stats,
11844 .get_coalesce = tg3_get_coalesce,
11845 .set_coalesce = tg3_set_coalesce,
11846 .get_sset_count = tg3_get_sset_count,
11849 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11851 u32 cursize, val, magic;
11853 tp->nvram_size = EEPROM_CHIP_SIZE;
11855 if (tg3_nvram_read(tp, 0, &magic) != 0)
11856 return;
11858 if ((magic != TG3_EEPROM_MAGIC) &&
11859 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11860 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11861 return;
11864 * Size the chip by reading offsets at increasing powers of two.
11865 * When we encounter our validation signature, we know the addressing
11866 * has wrapped around, and thus have our chip size.
11868 cursize = 0x10;
11870 while (cursize < tp->nvram_size) {
11871 if (tg3_nvram_read(tp, cursize, &val) != 0)
11872 return;
11874 if (val == magic)
11875 break;
11877 cursize <<= 1;
11880 tp->nvram_size = cursize;
11883 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11885 u32 val;
11887 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11888 return;
11890 /* Selfboot format */
11891 if (val != TG3_EEPROM_MAGIC) {
11892 tg3_get_eeprom_size(tp);
11893 return;
11896 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11897 if (val != 0) {
11898 /* This is confusing. We want to operate on the
11899 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11900 * call will read from NVRAM and byteswap the data
11901 * according to the byteswapping settings for all
11902 * other register accesses. This ensures the data we
11903 * want will always reside in the lower 16-bits.
11904 * However, the data in NVRAM is in LE format, which
11905 * means the data from the NVRAM read will always be
11906 * opposite the endianness of the CPU. The 16-bit
11907 * byteswap then brings the data to CPU endianness.
11909 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11910 return;
11913 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11916 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11918 u32 nvcfg1;
11920 nvcfg1 = tr32(NVRAM_CFG1);
11921 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11922 tg3_flag_set(tp, FLASH);
11923 } else {
11924 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11925 tw32(NVRAM_CFG1, nvcfg1);
11928 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11929 tg3_flag(tp, 5780_CLASS)) {
11930 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11931 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11932 tp->nvram_jedecnum = JEDEC_ATMEL;
11933 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11934 tg3_flag_set(tp, NVRAM_BUFFERED);
11935 break;
11936 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11937 tp->nvram_jedecnum = JEDEC_ATMEL;
11938 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11939 break;
11940 case FLASH_VENDOR_ATMEL_EEPROM:
11941 tp->nvram_jedecnum = JEDEC_ATMEL;
11942 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11943 tg3_flag_set(tp, NVRAM_BUFFERED);
11944 break;
11945 case FLASH_VENDOR_ST:
11946 tp->nvram_jedecnum = JEDEC_ST;
11947 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11948 tg3_flag_set(tp, NVRAM_BUFFERED);
11949 break;
11950 case FLASH_VENDOR_SAIFUN:
11951 tp->nvram_jedecnum = JEDEC_SAIFUN;
11952 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11953 break;
11954 case FLASH_VENDOR_SST_SMALL:
11955 case FLASH_VENDOR_SST_LARGE:
11956 tp->nvram_jedecnum = JEDEC_SST;
11957 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11958 break;
11960 } else {
11961 tp->nvram_jedecnum = JEDEC_ATMEL;
11962 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11963 tg3_flag_set(tp, NVRAM_BUFFERED);
11967 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11969 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11970 case FLASH_5752PAGE_SIZE_256:
11971 tp->nvram_pagesize = 256;
11972 break;
11973 case FLASH_5752PAGE_SIZE_512:
11974 tp->nvram_pagesize = 512;
11975 break;
11976 case FLASH_5752PAGE_SIZE_1K:
11977 tp->nvram_pagesize = 1024;
11978 break;
11979 case FLASH_5752PAGE_SIZE_2K:
11980 tp->nvram_pagesize = 2048;
11981 break;
11982 case FLASH_5752PAGE_SIZE_4K:
11983 tp->nvram_pagesize = 4096;
11984 break;
11985 case FLASH_5752PAGE_SIZE_264:
11986 tp->nvram_pagesize = 264;
11987 break;
11988 case FLASH_5752PAGE_SIZE_528:
11989 tp->nvram_pagesize = 528;
11990 break;
11994 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11996 u32 nvcfg1;
11998 nvcfg1 = tr32(NVRAM_CFG1);
12000 /* NVRAM protection for TPM */
12001 if (nvcfg1 & (1 << 27))
12002 tg3_flag_set(tp, PROTECTED_NVRAM);
12004 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12005 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12006 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12007 tp->nvram_jedecnum = JEDEC_ATMEL;
12008 tg3_flag_set(tp, NVRAM_BUFFERED);
12009 break;
12010 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12011 tp->nvram_jedecnum = JEDEC_ATMEL;
12012 tg3_flag_set(tp, NVRAM_BUFFERED);
12013 tg3_flag_set(tp, FLASH);
12014 break;
12015 case FLASH_5752VENDOR_ST_M45PE10:
12016 case FLASH_5752VENDOR_ST_M45PE20:
12017 case FLASH_5752VENDOR_ST_M45PE40:
12018 tp->nvram_jedecnum = JEDEC_ST;
12019 tg3_flag_set(tp, NVRAM_BUFFERED);
12020 tg3_flag_set(tp, FLASH);
12021 break;
12024 if (tg3_flag(tp, FLASH)) {
12025 tg3_nvram_get_pagesize(tp, nvcfg1);
12026 } else {
12027 /* For eeprom, set pagesize to maximum eeprom size */
12028 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12030 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12031 tw32(NVRAM_CFG1, nvcfg1);
12035 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12037 u32 nvcfg1, protect = 0;
12039 nvcfg1 = tr32(NVRAM_CFG1);
12041 /* NVRAM protection for TPM */
12042 if (nvcfg1 & (1 << 27)) {
12043 tg3_flag_set(tp, PROTECTED_NVRAM);
12044 protect = 1;
12047 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12048 switch (nvcfg1) {
12049 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12050 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12051 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12052 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12053 tp->nvram_jedecnum = JEDEC_ATMEL;
12054 tg3_flag_set(tp, NVRAM_BUFFERED);
12055 tg3_flag_set(tp, FLASH);
12056 tp->nvram_pagesize = 264;
12057 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12058 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12059 tp->nvram_size = (protect ? 0x3e200 :
12060 TG3_NVRAM_SIZE_512KB);
12061 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12062 tp->nvram_size = (protect ? 0x1f200 :
12063 TG3_NVRAM_SIZE_256KB);
12064 else
12065 tp->nvram_size = (protect ? 0x1f200 :
12066 TG3_NVRAM_SIZE_128KB);
12067 break;
12068 case FLASH_5752VENDOR_ST_M45PE10:
12069 case FLASH_5752VENDOR_ST_M45PE20:
12070 case FLASH_5752VENDOR_ST_M45PE40:
12071 tp->nvram_jedecnum = JEDEC_ST;
12072 tg3_flag_set(tp, NVRAM_BUFFERED);
12073 tg3_flag_set(tp, FLASH);
12074 tp->nvram_pagesize = 256;
12075 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12076 tp->nvram_size = (protect ?
12077 TG3_NVRAM_SIZE_64KB :
12078 TG3_NVRAM_SIZE_128KB);
12079 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12080 tp->nvram_size = (protect ?
12081 TG3_NVRAM_SIZE_64KB :
12082 TG3_NVRAM_SIZE_256KB);
12083 else
12084 tp->nvram_size = (protect ?
12085 TG3_NVRAM_SIZE_128KB :
12086 TG3_NVRAM_SIZE_512KB);
12087 break;
12091 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12093 u32 nvcfg1;
12095 nvcfg1 = tr32(NVRAM_CFG1);
12097 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12098 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12099 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12100 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12101 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12102 tp->nvram_jedecnum = JEDEC_ATMEL;
12103 tg3_flag_set(tp, NVRAM_BUFFERED);
12104 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12106 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12107 tw32(NVRAM_CFG1, nvcfg1);
12108 break;
12109 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12110 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12111 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12112 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12113 tp->nvram_jedecnum = JEDEC_ATMEL;
12114 tg3_flag_set(tp, NVRAM_BUFFERED);
12115 tg3_flag_set(tp, FLASH);
12116 tp->nvram_pagesize = 264;
12117 break;
12118 case FLASH_5752VENDOR_ST_M45PE10:
12119 case FLASH_5752VENDOR_ST_M45PE20:
12120 case FLASH_5752VENDOR_ST_M45PE40:
12121 tp->nvram_jedecnum = JEDEC_ST;
12122 tg3_flag_set(tp, NVRAM_BUFFERED);
12123 tg3_flag_set(tp, FLASH);
12124 tp->nvram_pagesize = 256;
12125 break;
12129 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12131 u32 nvcfg1, protect = 0;
12133 nvcfg1 = tr32(NVRAM_CFG1);
12135 /* NVRAM protection for TPM */
12136 if (nvcfg1 & (1 << 27)) {
12137 tg3_flag_set(tp, PROTECTED_NVRAM);
12138 protect = 1;
12141 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12142 switch (nvcfg1) {
12143 case FLASH_5761VENDOR_ATMEL_ADB021D:
12144 case FLASH_5761VENDOR_ATMEL_ADB041D:
12145 case FLASH_5761VENDOR_ATMEL_ADB081D:
12146 case FLASH_5761VENDOR_ATMEL_ADB161D:
12147 case FLASH_5761VENDOR_ATMEL_MDB021D:
12148 case FLASH_5761VENDOR_ATMEL_MDB041D:
12149 case FLASH_5761VENDOR_ATMEL_MDB081D:
12150 case FLASH_5761VENDOR_ATMEL_MDB161D:
12151 tp->nvram_jedecnum = JEDEC_ATMEL;
12152 tg3_flag_set(tp, NVRAM_BUFFERED);
12153 tg3_flag_set(tp, FLASH);
12154 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12155 tp->nvram_pagesize = 256;
12156 break;
12157 case FLASH_5761VENDOR_ST_A_M45PE20:
12158 case FLASH_5761VENDOR_ST_A_M45PE40:
12159 case FLASH_5761VENDOR_ST_A_M45PE80:
12160 case FLASH_5761VENDOR_ST_A_M45PE16:
12161 case FLASH_5761VENDOR_ST_M_M45PE20:
12162 case FLASH_5761VENDOR_ST_M_M45PE40:
12163 case FLASH_5761VENDOR_ST_M_M45PE80:
12164 case FLASH_5761VENDOR_ST_M_M45PE16:
12165 tp->nvram_jedecnum = JEDEC_ST;
12166 tg3_flag_set(tp, NVRAM_BUFFERED);
12167 tg3_flag_set(tp, FLASH);
12168 tp->nvram_pagesize = 256;
12169 break;
12172 if (protect) {
12173 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12174 } else {
12175 switch (nvcfg1) {
12176 case FLASH_5761VENDOR_ATMEL_ADB161D:
12177 case FLASH_5761VENDOR_ATMEL_MDB161D:
12178 case FLASH_5761VENDOR_ST_A_M45PE16:
12179 case FLASH_5761VENDOR_ST_M_M45PE16:
12180 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12181 break;
12182 case FLASH_5761VENDOR_ATMEL_ADB081D:
12183 case FLASH_5761VENDOR_ATMEL_MDB081D:
12184 case FLASH_5761VENDOR_ST_A_M45PE80:
12185 case FLASH_5761VENDOR_ST_M_M45PE80:
12186 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12187 break;
12188 case FLASH_5761VENDOR_ATMEL_ADB041D:
12189 case FLASH_5761VENDOR_ATMEL_MDB041D:
12190 case FLASH_5761VENDOR_ST_A_M45PE40:
12191 case FLASH_5761VENDOR_ST_M_M45PE40:
12192 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12193 break;
12194 case FLASH_5761VENDOR_ATMEL_ADB021D:
12195 case FLASH_5761VENDOR_ATMEL_MDB021D:
12196 case FLASH_5761VENDOR_ST_A_M45PE20:
12197 case FLASH_5761VENDOR_ST_M_M45PE20:
12198 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12199 break;
12204 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12206 tp->nvram_jedecnum = JEDEC_ATMEL;
12207 tg3_flag_set(tp, NVRAM_BUFFERED);
12208 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12211 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12213 u32 nvcfg1;
12215 nvcfg1 = tr32(NVRAM_CFG1);
12217 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12218 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12219 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12220 tp->nvram_jedecnum = JEDEC_ATMEL;
12221 tg3_flag_set(tp, NVRAM_BUFFERED);
12222 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12224 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12225 tw32(NVRAM_CFG1, nvcfg1);
12226 return;
12227 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12228 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12229 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12230 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12231 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12232 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12233 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12234 tp->nvram_jedecnum = JEDEC_ATMEL;
12235 tg3_flag_set(tp, NVRAM_BUFFERED);
12236 tg3_flag_set(tp, FLASH);
12238 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12239 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12240 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12241 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12242 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12243 break;
12244 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12245 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12246 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12247 break;
12248 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12249 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12250 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12251 break;
12253 break;
12254 case FLASH_5752VENDOR_ST_M45PE10:
12255 case FLASH_5752VENDOR_ST_M45PE20:
12256 case FLASH_5752VENDOR_ST_M45PE40:
12257 tp->nvram_jedecnum = JEDEC_ST;
12258 tg3_flag_set(tp, NVRAM_BUFFERED);
12259 tg3_flag_set(tp, FLASH);
12261 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12262 case FLASH_5752VENDOR_ST_M45PE10:
12263 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12264 break;
12265 case FLASH_5752VENDOR_ST_M45PE20:
12266 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12267 break;
12268 case FLASH_5752VENDOR_ST_M45PE40:
12269 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12270 break;
12272 break;
12273 default:
12274 tg3_flag_set(tp, NO_NVRAM);
12275 return;
12278 tg3_nvram_get_pagesize(tp, nvcfg1);
12279 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12280 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12284 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12286 u32 nvcfg1;
12288 nvcfg1 = tr32(NVRAM_CFG1);
12290 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12291 case FLASH_5717VENDOR_ATMEL_EEPROM:
12292 case FLASH_5717VENDOR_MICRO_EEPROM:
12293 tp->nvram_jedecnum = JEDEC_ATMEL;
12294 tg3_flag_set(tp, NVRAM_BUFFERED);
12295 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12297 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12298 tw32(NVRAM_CFG1, nvcfg1);
12299 return;
12300 case FLASH_5717VENDOR_ATMEL_MDB011D:
12301 case FLASH_5717VENDOR_ATMEL_ADB011B:
12302 case FLASH_5717VENDOR_ATMEL_ADB011D:
12303 case FLASH_5717VENDOR_ATMEL_MDB021D:
12304 case FLASH_5717VENDOR_ATMEL_ADB021B:
12305 case FLASH_5717VENDOR_ATMEL_ADB021D:
12306 case FLASH_5717VENDOR_ATMEL_45USPT:
12307 tp->nvram_jedecnum = JEDEC_ATMEL;
12308 tg3_flag_set(tp, NVRAM_BUFFERED);
12309 tg3_flag_set(tp, FLASH);
12311 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12312 case FLASH_5717VENDOR_ATMEL_MDB021D:
12313 /* Detect size with tg3_nvram_get_size() */
12314 break;
12315 case FLASH_5717VENDOR_ATMEL_ADB021B:
12316 case FLASH_5717VENDOR_ATMEL_ADB021D:
12317 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12318 break;
12319 default:
12320 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12321 break;
12323 break;
12324 case FLASH_5717VENDOR_ST_M_M25PE10:
12325 case FLASH_5717VENDOR_ST_A_M25PE10:
12326 case FLASH_5717VENDOR_ST_M_M45PE10:
12327 case FLASH_5717VENDOR_ST_A_M45PE10:
12328 case FLASH_5717VENDOR_ST_M_M25PE20:
12329 case FLASH_5717VENDOR_ST_A_M25PE20:
12330 case FLASH_5717VENDOR_ST_M_M45PE20:
12331 case FLASH_5717VENDOR_ST_A_M45PE20:
12332 case FLASH_5717VENDOR_ST_25USPT:
12333 case FLASH_5717VENDOR_ST_45USPT:
12334 tp->nvram_jedecnum = JEDEC_ST;
12335 tg3_flag_set(tp, NVRAM_BUFFERED);
12336 tg3_flag_set(tp, FLASH);
12338 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12339 case FLASH_5717VENDOR_ST_M_M25PE20:
12340 case FLASH_5717VENDOR_ST_M_M45PE20:
12341 /* Detect size with tg3_nvram_get_size() */
12342 break;
12343 case FLASH_5717VENDOR_ST_A_M25PE20:
12344 case FLASH_5717VENDOR_ST_A_M45PE20:
12345 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12346 break;
12347 default:
12348 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12349 break;
12351 break;
12352 default:
12353 tg3_flag_set(tp, NO_NVRAM);
12354 return;
12357 tg3_nvram_get_pagesize(tp, nvcfg1);
12358 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12359 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12362 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12364 u32 nvcfg1, nvmpinstrp;
12366 nvcfg1 = tr32(NVRAM_CFG1);
12367 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12369 switch (nvmpinstrp) {
12370 case FLASH_5720_EEPROM_HD:
12371 case FLASH_5720_EEPROM_LD:
12372 tp->nvram_jedecnum = JEDEC_ATMEL;
12373 tg3_flag_set(tp, NVRAM_BUFFERED);
12375 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12376 tw32(NVRAM_CFG1, nvcfg1);
12377 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12378 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12379 else
12380 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12381 return;
12382 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12383 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12384 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12385 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12386 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12387 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12388 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12389 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12390 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12391 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12392 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12393 case FLASH_5720VENDOR_ATMEL_45USPT:
12394 tp->nvram_jedecnum = JEDEC_ATMEL;
12395 tg3_flag_set(tp, NVRAM_BUFFERED);
12396 tg3_flag_set(tp, FLASH);
12398 switch (nvmpinstrp) {
12399 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12400 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12401 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12402 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12403 break;
12404 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12405 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12406 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12407 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12408 break;
12409 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12410 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12411 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12412 break;
12413 default:
12414 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12415 break;
12417 break;
12418 case FLASH_5720VENDOR_M_ST_M25PE10:
12419 case FLASH_5720VENDOR_M_ST_M45PE10:
12420 case FLASH_5720VENDOR_A_ST_M25PE10:
12421 case FLASH_5720VENDOR_A_ST_M45PE10:
12422 case FLASH_5720VENDOR_M_ST_M25PE20:
12423 case FLASH_5720VENDOR_M_ST_M45PE20:
12424 case FLASH_5720VENDOR_A_ST_M25PE20:
12425 case FLASH_5720VENDOR_A_ST_M45PE20:
12426 case FLASH_5720VENDOR_M_ST_M25PE40:
12427 case FLASH_5720VENDOR_M_ST_M45PE40:
12428 case FLASH_5720VENDOR_A_ST_M25PE40:
12429 case FLASH_5720VENDOR_A_ST_M45PE40:
12430 case FLASH_5720VENDOR_M_ST_M25PE80:
12431 case FLASH_5720VENDOR_M_ST_M45PE80:
12432 case FLASH_5720VENDOR_A_ST_M25PE80:
12433 case FLASH_5720VENDOR_A_ST_M45PE80:
12434 case FLASH_5720VENDOR_ST_25USPT:
12435 case FLASH_5720VENDOR_ST_45USPT:
12436 tp->nvram_jedecnum = JEDEC_ST;
12437 tg3_flag_set(tp, NVRAM_BUFFERED);
12438 tg3_flag_set(tp, FLASH);
12440 switch (nvmpinstrp) {
12441 case FLASH_5720VENDOR_M_ST_M25PE20:
12442 case FLASH_5720VENDOR_M_ST_M45PE20:
12443 case FLASH_5720VENDOR_A_ST_M25PE20:
12444 case FLASH_5720VENDOR_A_ST_M45PE20:
12445 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12446 break;
12447 case FLASH_5720VENDOR_M_ST_M25PE40:
12448 case FLASH_5720VENDOR_M_ST_M45PE40:
12449 case FLASH_5720VENDOR_A_ST_M25PE40:
12450 case FLASH_5720VENDOR_A_ST_M45PE40:
12451 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12452 break;
12453 case FLASH_5720VENDOR_M_ST_M25PE80:
12454 case FLASH_5720VENDOR_M_ST_M45PE80:
12455 case FLASH_5720VENDOR_A_ST_M25PE80:
12456 case FLASH_5720VENDOR_A_ST_M45PE80:
12457 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12458 break;
12459 default:
12460 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12461 break;
12463 break;
12464 default:
12465 tg3_flag_set(tp, NO_NVRAM);
12466 return;
12469 tg3_nvram_get_pagesize(tp, nvcfg1);
12470 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12471 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12474 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12475 static void __devinit tg3_nvram_init(struct tg3 *tp)
12477 tw32_f(GRC_EEPROM_ADDR,
12478 (EEPROM_ADDR_FSM_RESET |
12479 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12480 EEPROM_ADDR_CLKPERD_SHIFT)));
12482 msleep(1);
12484 /* Enable seeprom accesses. */
12485 tw32_f(GRC_LOCAL_CTRL,
12486 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12487 udelay(100);
12489 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12490 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12491 tg3_flag_set(tp, NVRAM);
12493 if (tg3_nvram_lock(tp)) {
12494 netdev_warn(tp->dev,
12495 "Cannot get nvram lock, %s failed\n",
12496 __func__);
12497 return;
12499 tg3_enable_nvram_access(tp);
12501 tp->nvram_size = 0;
12503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12504 tg3_get_5752_nvram_info(tp);
12505 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12506 tg3_get_5755_nvram_info(tp);
12507 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12509 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12510 tg3_get_5787_nvram_info(tp);
12511 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12512 tg3_get_5761_nvram_info(tp);
12513 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12514 tg3_get_5906_nvram_info(tp);
12515 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12517 tg3_get_57780_nvram_info(tp);
12518 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12520 tg3_get_5717_nvram_info(tp);
12521 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12522 tg3_get_5720_nvram_info(tp);
12523 else
12524 tg3_get_nvram_info(tp);
12526 if (tp->nvram_size == 0)
12527 tg3_get_nvram_size(tp);
12529 tg3_disable_nvram_access(tp);
12530 tg3_nvram_unlock(tp);
12532 } else {
12533 tg3_flag_clear(tp, NVRAM);
12534 tg3_flag_clear(tp, NVRAM_BUFFERED);
12536 tg3_get_eeprom_size(tp);
12540 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12541 u32 offset, u32 len, u8 *buf)
12543 int i, j, rc = 0;
12544 u32 val;
12546 for (i = 0; i < len; i += 4) {
12547 u32 addr;
12548 __be32 data;
12550 addr = offset + i;
12552 memcpy(&data, buf + i, 4);
12555 * The SEEPROM interface expects the data to always be opposite
12556 * the native endian format. We accomplish this by reversing
12557 * all the operations that would have been performed on the
12558 * data from a call to tg3_nvram_read_be32().
12560 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12562 val = tr32(GRC_EEPROM_ADDR);
12563 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12565 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12566 EEPROM_ADDR_READ);
12567 tw32(GRC_EEPROM_ADDR, val |
12568 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12569 (addr & EEPROM_ADDR_ADDR_MASK) |
12570 EEPROM_ADDR_START |
12571 EEPROM_ADDR_WRITE);
12573 for (j = 0; j < 1000; j++) {
12574 val = tr32(GRC_EEPROM_ADDR);
12576 if (val & EEPROM_ADDR_COMPLETE)
12577 break;
12578 msleep(1);
12580 if (!(val & EEPROM_ADDR_COMPLETE)) {
12581 rc = -EBUSY;
12582 break;
12586 return rc;
12589 /* offset and length are dword aligned */
12590 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12591 u8 *buf)
12593 int ret = 0;
12594 u32 pagesize = tp->nvram_pagesize;
12595 u32 pagemask = pagesize - 1;
12596 u32 nvram_cmd;
12597 u8 *tmp;
12599 tmp = kmalloc(pagesize, GFP_KERNEL);
12600 if (tmp == NULL)
12601 return -ENOMEM;
12603 while (len) {
12604 int j;
12605 u32 phy_addr, page_off, size;
12607 phy_addr = offset & ~pagemask;
12609 for (j = 0; j < pagesize; j += 4) {
12610 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12611 (__be32 *) (tmp + j));
12612 if (ret)
12613 break;
12615 if (ret)
12616 break;
12618 page_off = offset & pagemask;
12619 size = pagesize;
12620 if (len < size)
12621 size = len;
12623 len -= size;
12625 memcpy(tmp + page_off, buf, size);
12627 offset = offset + (pagesize - page_off);
12629 tg3_enable_nvram_access(tp);
12632 * Before we can erase the flash page, we need
12633 * to issue a special "write enable" command.
12635 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12637 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12638 break;
12640 /* Erase the target page */
12641 tw32(NVRAM_ADDR, phy_addr);
12643 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12644 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12646 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12647 break;
12649 /* Issue another write enable to start the write. */
12650 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12652 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12653 break;
12655 for (j = 0; j < pagesize; j += 4) {
12656 __be32 data;
12658 data = *((__be32 *) (tmp + j));
12660 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12662 tw32(NVRAM_ADDR, phy_addr + j);
12664 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12665 NVRAM_CMD_WR;
12667 if (j == 0)
12668 nvram_cmd |= NVRAM_CMD_FIRST;
12669 else if (j == (pagesize - 4))
12670 nvram_cmd |= NVRAM_CMD_LAST;
12672 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12673 break;
12675 if (ret)
12676 break;
12679 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12680 tg3_nvram_exec_cmd(tp, nvram_cmd);
12682 kfree(tmp);
12684 return ret;
12687 /* offset and length are dword aligned */
12688 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12689 u8 *buf)
12691 int i, ret = 0;
12693 for (i = 0; i < len; i += 4, offset += 4) {
12694 u32 page_off, phy_addr, nvram_cmd;
12695 __be32 data;
12697 memcpy(&data, buf + i, 4);
12698 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12700 page_off = offset % tp->nvram_pagesize;
12702 phy_addr = tg3_nvram_phys_addr(tp, offset);
12704 tw32(NVRAM_ADDR, phy_addr);
12706 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12708 if (page_off == 0 || i == 0)
12709 nvram_cmd |= NVRAM_CMD_FIRST;
12710 if (page_off == (tp->nvram_pagesize - 4))
12711 nvram_cmd |= NVRAM_CMD_LAST;
12713 if (i == (len - 4))
12714 nvram_cmd |= NVRAM_CMD_LAST;
12716 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12717 !tg3_flag(tp, 5755_PLUS) &&
12718 (tp->nvram_jedecnum == JEDEC_ST) &&
12719 (nvram_cmd & NVRAM_CMD_FIRST)) {
12721 if ((ret = tg3_nvram_exec_cmd(tp,
12722 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12723 NVRAM_CMD_DONE)))
12725 break;
12727 if (!tg3_flag(tp, FLASH)) {
12728 /* We always do complete word writes to eeprom. */
12729 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12732 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12733 break;
12735 return ret;
12738 /* offset and length are dword aligned */
12739 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12741 int ret;
12743 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12744 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12745 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12746 udelay(40);
12749 if (!tg3_flag(tp, NVRAM)) {
12750 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12751 } else {
12752 u32 grc_mode;
12754 ret = tg3_nvram_lock(tp);
12755 if (ret)
12756 return ret;
12758 tg3_enable_nvram_access(tp);
12759 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12760 tw32(NVRAM_WRITE1, 0x406);
12762 grc_mode = tr32(GRC_MODE);
12763 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12765 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12766 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12767 buf);
12768 } else {
12769 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12770 buf);
12773 grc_mode = tr32(GRC_MODE);
12774 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12776 tg3_disable_nvram_access(tp);
12777 tg3_nvram_unlock(tp);
12780 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12781 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12782 udelay(40);
12785 return ret;
12788 struct subsys_tbl_ent {
12789 u16 subsys_vendor, subsys_devid;
12790 u32 phy_id;
12793 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12794 /* Broadcom boards. */
12795 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12796 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12797 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12798 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12799 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12800 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12801 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12802 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12803 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12804 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12805 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12806 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12807 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12808 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12809 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12810 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12811 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12812 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12813 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12814 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12815 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12816 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12818 /* 3com boards. */
12819 { TG3PCI_SUBVENDOR_ID_3COM,
12820 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12821 { TG3PCI_SUBVENDOR_ID_3COM,
12822 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12823 { TG3PCI_SUBVENDOR_ID_3COM,
12824 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12825 { TG3PCI_SUBVENDOR_ID_3COM,
12826 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12827 { TG3PCI_SUBVENDOR_ID_3COM,
12828 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12830 /* DELL boards. */
12831 { TG3PCI_SUBVENDOR_ID_DELL,
12832 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12833 { TG3PCI_SUBVENDOR_ID_DELL,
12834 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12835 { TG3PCI_SUBVENDOR_ID_DELL,
12836 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12837 { TG3PCI_SUBVENDOR_ID_DELL,
12838 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12840 /* Compaq boards. */
12841 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12842 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12843 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12844 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12845 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12846 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12847 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12848 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12849 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12850 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12852 /* IBM boards. */
12853 { TG3PCI_SUBVENDOR_ID_IBM,
12854 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12857 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12859 int i;
12861 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12862 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12863 tp->pdev->subsystem_vendor) &&
12864 (subsys_id_to_phy_id[i].subsys_devid ==
12865 tp->pdev->subsystem_device))
12866 return &subsys_id_to_phy_id[i];
12868 return NULL;
12871 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12873 u32 val;
12875 tp->phy_id = TG3_PHY_ID_INVALID;
12876 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12878 /* Assume an onboard device and WOL capable by default. */
12879 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12880 tg3_flag_set(tp, WOL_CAP);
12882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12883 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12884 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12885 tg3_flag_set(tp, IS_NIC);
12887 val = tr32(VCPU_CFGSHDW);
12888 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12889 tg3_flag_set(tp, ASPM_WORKAROUND);
12890 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12891 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12892 tg3_flag_set(tp, WOL_ENABLE);
12893 device_set_wakeup_enable(&tp->pdev->dev, true);
12895 goto done;
12898 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12899 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12900 u32 nic_cfg, led_cfg;
12901 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12902 int eeprom_phy_serdes = 0;
12904 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12905 tp->nic_sram_data_cfg = nic_cfg;
12907 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12908 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12909 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12910 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12911 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12912 (ver > 0) && (ver < 0x100))
12913 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12916 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12918 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12919 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12920 eeprom_phy_serdes = 1;
12922 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12923 if (nic_phy_id != 0) {
12924 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12925 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12927 eeprom_phy_id = (id1 >> 16) << 10;
12928 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12929 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12930 } else
12931 eeprom_phy_id = 0;
12933 tp->phy_id = eeprom_phy_id;
12934 if (eeprom_phy_serdes) {
12935 if (!tg3_flag(tp, 5705_PLUS))
12936 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12937 else
12938 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12941 if (tg3_flag(tp, 5750_PLUS))
12942 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12943 SHASTA_EXT_LED_MODE_MASK);
12944 else
12945 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12947 switch (led_cfg) {
12948 default:
12949 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12950 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12951 break;
12953 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12954 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12955 break;
12957 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12958 tp->led_ctrl = LED_CTRL_MODE_MAC;
12960 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12961 * read on some older 5700/5701 bootcode.
12963 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12964 ASIC_REV_5700 ||
12965 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12966 ASIC_REV_5701)
12967 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12969 break;
12971 case SHASTA_EXT_LED_SHARED:
12972 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12973 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12974 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12975 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12976 LED_CTRL_MODE_PHY_2);
12977 break;
12979 case SHASTA_EXT_LED_MAC:
12980 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12981 break;
12983 case SHASTA_EXT_LED_COMBO:
12984 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12985 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12986 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12987 LED_CTRL_MODE_PHY_2);
12988 break;
12992 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12993 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12994 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12995 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12997 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12998 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13000 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13001 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13002 if ((tp->pdev->subsystem_vendor ==
13003 PCI_VENDOR_ID_ARIMA) &&
13004 (tp->pdev->subsystem_device == 0x205a ||
13005 tp->pdev->subsystem_device == 0x2063))
13006 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13007 } else {
13008 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13009 tg3_flag_set(tp, IS_NIC);
13012 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13013 tg3_flag_set(tp, ENABLE_ASF);
13014 if (tg3_flag(tp, 5750_PLUS))
13015 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13018 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13019 tg3_flag(tp, 5750_PLUS))
13020 tg3_flag_set(tp, ENABLE_APE);
13022 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13023 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13024 tg3_flag_clear(tp, WOL_CAP);
13026 if (tg3_flag(tp, WOL_CAP) &&
13027 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13028 tg3_flag_set(tp, WOL_ENABLE);
13029 device_set_wakeup_enable(&tp->pdev->dev, true);
13032 if (cfg2 & (1 << 17))
13033 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13035 /* serdes signal pre-emphasis in register 0x590 set by */
13036 /* bootcode if bit 18 is set */
13037 if (cfg2 & (1 << 18))
13038 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13040 if ((tg3_flag(tp, 57765_PLUS) ||
13041 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13042 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13043 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13044 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13046 if (tg3_flag(tp, PCI_EXPRESS) &&
13047 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13048 !tg3_flag(tp, 57765_PLUS)) {
13049 u32 cfg3;
13051 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13052 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13053 tg3_flag_set(tp, ASPM_WORKAROUND);
13056 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13057 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13058 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13059 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13060 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13061 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13063 done:
13064 if (tg3_flag(tp, WOL_CAP))
13065 device_set_wakeup_enable(&tp->pdev->dev,
13066 tg3_flag(tp, WOL_ENABLE));
13067 else
13068 device_set_wakeup_capable(&tp->pdev->dev, false);
13071 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13073 int i;
13074 u32 val;
13076 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13077 tw32(OTP_CTRL, cmd);
13079 /* Wait for up to 1 ms for command to execute. */
13080 for (i = 0; i < 100; i++) {
13081 val = tr32(OTP_STATUS);
13082 if (val & OTP_STATUS_CMD_DONE)
13083 break;
13084 udelay(10);
13087 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13090 /* Read the gphy configuration from the OTP region of the chip. The gphy
13091 * configuration is a 32-bit value that straddles the alignment boundary.
13092 * We do two 32-bit reads and then shift and merge the results.
13094 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13096 u32 bhalf_otp, thalf_otp;
13098 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13100 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13101 return 0;
13103 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13105 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13106 return 0;
13108 thalf_otp = tr32(OTP_READ_DATA);
13110 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13112 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13113 return 0;
13115 bhalf_otp = tr32(OTP_READ_DATA);
13117 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13120 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13122 u32 adv = ADVERTISED_Autoneg |
13123 ADVERTISED_Pause;
13125 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13126 adv |= ADVERTISED_1000baseT_Half |
13127 ADVERTISED_1000baseT_Full;
13129 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13130 adv |= ADVERTISED_100baseT_Half |
13131 ADVERTISED_100baseT_Full |
13132 ADVERTISED_10baseT_Half |
13133 ADVERTISED_10baseT_Full |
13134 ADVERTISED_TP;
13135 else
13136 adv |= ADVERTISED_FIBRE;
13138 tp->link_config.advertising = adv;
13139 tp->link_config.speed = SPEED_INVALID;
13140 tp->link_config.duplex = DUPLEX_INVALID;
13141 tp->link_config.autoneg = AUTONEG_ENABLE;
13142 tp->link_config.active_speed = SPEED_INVALID;
13143 tp->link_config.active_duplex = DUPLEX_INVALID;
13144 tp->link_config.orig_speed = SPEED_INVALID;
13145 tp->link_config.orig_duplex = DUPLEX_INVALID;
13146 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13149 static int __devinit tg3_phy_probe(struct tg3 *tp)
13151 u32 hw_phy_id_1, hw_phy_id_2;
13152 u32 hw_phy_id, hw_phy_id_masked;
13153 int err;
13155 /* flow control autonegotiation is default behavior */
13156 tg3_flag_set(tp, PAUSE_AUTONEG);
13157 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13159 if (tg3_flag(tp, USE_PHYLIB))
13160 return tg3_phy_init(tp);
13162 /* Reading the PHY ID register can conflict with ASF
13163 * firmware access to the PHY hardware.
13165 err = 0;
13166 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13167 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13168 } else {
13169 /* Now read the physical PHY_ID from the chip and verify
13170 * that it is sane. If it doesn't look good, we fall back
13171 * to either the hard-coded table based PHY_ID and failing
13172 * that the value found in the eeprom area.
13174 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13175 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13177 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13178 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13179 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13181 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13184 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13185 tp->phy_id = hw_phy_id;
13186 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13187 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13188 else
13189 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13190 } else {
13191 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13192 /* Do nothing, phy ID already set up in
13193 * tg3_get_eeprom_hw_cfg().
13195 } else {
13196 struct subsys_tbl_ent *p;
13198 /* No eeprom signature? Try the hardcoded
13199 * subsys device table.
13201 p = tg3_lookup_by_subsys(tp);
13202 if (!p)
13203 return -ENODEV;
13205 tp->phy_id = p->phy_id;
13206 if (!tp->phy_id ||
13207 tp->phy_id == TG3_PHY_ID_BCM8002)
13208 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13212 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13213 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13214 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13215 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13216 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13217 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13218 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13219 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13221 tg3_phy_init_link_config(tp);
13223 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13224 !tg3_flag(tp, ENABLE_APE) &&
13225 !tg3_flag(tp, ENABLE_ASF)) {
13226 u32 bmsr, mask;
13228 tg3_readphy(tp, MII_BMSR, &bmsr);
13229 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13230 (bmsr & BMSR_LSTATUS))
13231 goto skip_phy_reset;
13233 err = tg3_phy_reset(tp);
13234 if (err)
13235 return err;
13237 tg3_phy_set_wirespeed(tp);
13239 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13240 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13241 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13242 if (!tg3_copper_is_advertising_all(tp, mask)) {
13243 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13244 tp->link_config.flowctrl);
13246 tg3_writephy(tp, MII_BMCR,
13247 BMCR_ANENABLE | BMCR_ANRESTART);
13251 skip_phy_reset:
13252 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13253 err = tg3_init_5401phy_dsp(tp);
13254 if (err)
13255 return err;
13257 err = tg3_init_5401phy_dsp(tp);
13260 return err;
13263 static void __devinit tg3_read_vpd(struct tg3 *tp)
13265 u8 *vpd_data;
13266 unsigned int block_end, rosize, len;
13267 u32 vpdlen;
13268 int j, i = 0;
13270 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13271 if (!vpd_data)
13272 goto out_no_vpd;
13274 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13275 if (i < 0)
13276 goto out_not_found;
13278 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13279 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13280 i += PCI_VPD_LRDT_TAG_SIZE;
13282 if (block_end > vpdlen)
13283 goto out_not_found;
13285 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13286 PCI_VPD_RO_KEYWORD_MFR_ID);
13287 if (j > 0) {
13288 len = pci_vpd_info_field_size(&vpd_data[j]);
13290 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13291 if (j + len > block_end || len != 4 ||
13292 memcmp(&vpd_data[j], "1028", 4))
13293 goto partno;
13295 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13296 PCI_VPD_RO_KEYWORD_VENDOR0);
13297 if (j < 0)
13298 goto partno;
13300 len = pci_vpd_info_field_size(&vpd_data[j]);
13302 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13303 if (j + len > block_end)
13304 goto partno;
13306 memcpy(tp->fw_ver, &vpd_data[j], len);
13307 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13310 partno:
13311 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13312 PCI_VPD_RO_KEYWORD_PARTNO);
13313 if (i < 0)
13314 goto out_not_found;
13316 len = pci_vpd_info_field_size(&vpd_data[i]);
13318 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13319 if (len > TG3_BPN_SIZE ||
13320 (len + i) > vpdlen)
13321 goto out_not_found;
13323 memcpy(tp->board_part_number, &vpd_data[i], len);
13325 out_not_found:
13326 kfree(vpd_data);
13327 if (tp->board_part_number[0])
13328 return;
13330 out_no_vpd:
13331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13332 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13333 strcpy(tp->board_part_number, "BCM5717");
13334 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13335 strcpy(tp->board_part_number, "BCM5718");
13336 else
13337 goto nomatch;
13338 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13339 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13340 strcpy(tp->board_part_number, "BCM57780");
13341 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13342 strcpy(tp->board_part_number, "BCM57760");
13343 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13344 strcpy(tp->board_part_number, "BCM57790");
13345 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13346 strcpy(tp->board_part_number, "BCM57788");
13347 else
13348 goto nomatch;
13349 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13350 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13351 strcpy(tp->board_part_number, "BCM57761");
13352 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13353 strcpy(tp->board_part_number, "BCM57765");
13354 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13355 strcpy(tp->board_part_number, "BCM57781");
13356 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13357 strcpy(tp->board_part_number, "BCM57785");
13358 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13359 strcpy(tp->board_part_number, "BCM57791");
13360 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13361 strcpy(tp->board_part_number, "BCM57795");
13362 else
13363 goto nomatch;
13364 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13365 strcpy(tp->board_part_number, "BCM95906");
13366 } else {
13367 nomatch:
13368 strcpy(tp->board_part_number, "none");
13372 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13374 u32 val;
13376 if (tg3_nvram_read(tp, offset, &val) ||
13377 (val & 0xfc000000) != 0x0c000000 ||
13378 tg3_nvram_read(tp, offset + 4, &val) ||
13379 val != 0)
13380 return 0;
13382 return 1;
13385 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13387 u32 val, offset, start, ver_offset;
13388 int i, dst_off;
13389 bool newver = false;
13391 if (tg3_nvram_read(tp, 0xc, &offset) ||
13392 tg3_nvram_read(tp, 0x4, &start))
13393 return;
13395 offset = tg3_nvram_logical_addr(tp, offset);
13397 if (tg3_nvram_read(tp, offset, &val))
13398 return;
13400 if ((val & 0xfc000000) == 0x0c000000) {
13401 if (tg3_nvram_read(tp, offset + 4, &val))
13402 return;
13404 if (val == 0)
13405 newver = true;
13408 dst_off = strlen(tp->fw_ver);
13410 if (newver) {
13411 if (TG3_VER_SIZE - dst_off < 16 ||
13412 tg3_nvram_read(tp, offset + 8, &ver_offset))
13413 return;
13415 offset = offset + ver_offset - start;
13416 for (i = 0; i < 16; i += 4) {
13417 __be32 v;
13418 if (tg3_nvram_read_be32(tp, offset + i, &v))
13419 return;
13421 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13423 } else {
13424 u32 major, minor;
13426 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13427 return;
13429 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13430 TG3_NVM_BCVER_MAJSFT;
13431 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13432 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13433 "v%d.%02d", major, minor);
13437 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13439 u32 val, major, minor;
13441 /* Use native endian representation */
13442 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13443 return;
13445 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13446 TG3_NVM_HWSB_CFG1_MAJSFT;
13447 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13448 TG3_NVM_HWSB_CFG1_MINSFT;
13450 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13453 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13455 u32 offset, major, minor, build;
13457 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13459 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13460 return;
13462 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13463 case TG3_EEPROM_SB_REVISION_0:
13464 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13465 break;
13466 case TG3_EEPROM_SB_REVISION_2:
13467 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13468 break;
13469 case TG3_EEPROM_SB_REVISION_3:
13470 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13471 break;
13472 case TG3_EEPROM_SB_REVISION_4:
13473 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13474 break;
13475 case TG3_EEPROM_SB_REVISION_5:
13476 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13477 break;
13478 case TG3_EEPROM_SB_REVISION_6:
13479 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13480 break;
13481 default:
13482 return;
13485 if (tg3_nvram_read(tp, offset, &val))
13486 return;
13488 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13489 TG3_EEPROM_SB_EDH_BLD_SHFT;
13490 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13491 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13492 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13494 if (minor > 99 || build > 26)
13495 return;
13497 offset = strlen(tp->fw_ver);
13498 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13499 " v%d.%02d", major, minor);
13501 if (build > 0) {
13502 offset = strlen(tp->fw_ver);
13503 if (offset < TG3_VER_SIZE - 1)
13504 tp->fw_ver[offset] = 'a' + build - 1;
13508 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13510 u32 val, offset, start;
13511 int i, vlen;
13513 for (offset = TG3_NVM_DIR_START;
13514 offset < TG3_NVM_DIR_END;
13515 offset += TG3_NVM_DIRENT_SIZE) {
13516 if (tg3_nvram_read(tp, offset, &val))
13517 return;
13519 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13520 break;
13523 if (offset == TG3_NVM_DIR_END)
13524 return;
13526 if (!tg3_flag(tp, 5705_PLUS))
13527 start = 0x08000000;
13528 else if (tg3_nvram_read(tp, offset - 4, &start))
13529 return;
13531 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13532 !tg3_fw_img_is_valid(tp, offset) ||
13533 tg3_nvram_read(tp, offset + 8, &val))
13534 return;
13536 offset += val - start;
13538 vlen = strlen(tp->fw_ver);
13540 tp->fw_ver[vlen++] = ',';
13541 tp->fw_ver[vlen++] = ' ';
13543 for (i = 0; i < 4; i++) {
13544 __be32 v;
13545 if (tg3_nvram_read_be32(tp, offset, &v))
13546 return;
13548 offset += sizeof(v);
13550 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13551 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13552 break;
13555 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13556 vlen += sizeof(v);
13560 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13562 int vlen;
13563 u32 apedata;
13564 char *fwtype;
13566 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13567 return;
13569 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13570 if (apedata != APE_SEG_SIG_MAGIC)
13571 return;
13573 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13574 if (!(apedata & APE_FW_STATUS_READY))
13575 return;
13577 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13579 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13580 tg3_flag_set(tp, APE_HAS_NCSI);
13581 fwtype = "NCSI";
13582 } else {
13583 fwtype = "DASH";
13586 vlen = strlen(tp->fw_ver);
13588 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13589 fwtype,
13590 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13591 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13592 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13593 (apedata & APE_FW_VERSION_BLDMSK));
13596 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13598 u32 val;
13599 bool vpd_vers = false;
13601 if (tp->fw_ver[0] != 0)
13602 vpd_vers = true;
13604 if (tg3_flag(tp, NO_NVRAM)) {
13605 strcat(tp->fw_ver, "sb");
13606 return;
13609 if (tg3_nvram_read(tp, 0, &val))
13610 return;
13612 if (val == TG3_EEPROM_MAGIC)
13613 tg3_read_bc_ver(tp);
13614 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13615 tg3_read_sb_ver(tp, val);
13616 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13617 tg3_read_hwsb_ver(tp);
13618 else
13619 return;
13621 if (vpd_vers)
13622 goto done;
13624 if (tg3_flag(tp, ENABLE_APE)) {
13625 if (tg3_flag(tp, ENABLE_ASF))
13626 tg3_read_dash_ver(tp);
13627 } else if (tg3_flag(tp, ENABLE_ASF)) {
13628 tg3_read_mgmtfw_ver(tp);
13631 done:
13632 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13635 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13637 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13639 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13640 return TG3_RX_RET_MAX_SIZE_5717;
13641 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13642 return TG3_RX_RET_MAX_SIZE_5700;
13643 else
13644 return TG3_RX_RET_MAX_SIZE_5705;
13647 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13648 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13649 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13650 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13651 { },
13654 static int __devinit tg3_get_invariants(struct tg3 *tp)
13656 u32 misc_ctrl_reg;
13657 u32 pci_state_reg, grc_misc_cfg;
13658 u32 val;
13659 u16 pci_cmd;
13660 int err;
13662 /* Force memory write invalidate off. If we leave it on,
13663 * then on 5700_BX chips we have to enable a workaround.
13664 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13665 * to match the cacheline size. The Broadcom driver have this
13666 * workaround but turns MWI off all the times so never uses
13667 * it. This seems to suggest that the workaround is insufficient.
13669 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13670 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13671 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13673 /* Important! -- Make sure register accesses are byteswapped
13674 * correctly. Also, for those chips that require it, make
13675 * sure that indirect register accesses are enabled before
13676 * the first operation.
13678 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13679 &misc_ctrl_reg);
13680 tp->misc_host_ctrl |= (misc_ctrl_reg &
13681 MISC_HOST_CTRL_CHIPREV);
13682 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13683 tp->misc_host_ctrl);
13685 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13686 MISC_HOST_CTRL_CHIPREV_SHIFT);
13687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13688 u32 prod_id_asic_rev;
13690 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13691 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13692 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13693 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13694 pci_read_config_dword(tp->pdev,
13695 TG3PCI_GEN2_PRODID_ASICREV,
13696 &prod_id_asic_rev);
13697 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13698 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13699 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13700 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13701 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13702 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13703 pci_read_config_dword(tp->pdev,
13704 TG3PCI_GEN15_PRODID_ASICREV,
13705 &prod_id_asic_rev);
13706 else
13707 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13708 &prod_id_asic_rev);
13710 tp->pci_chip_rev_id = prod_id_asic_rev;
13713 /* Wrong chip ID in 5752 A0. This code can be removed later
13714 * as A0 is not in production.
13716 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13717 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13719 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13720 * we need to disable memory and use config. cycles
13721 * only to access all registers. The 5702/03 chips
13722 * can mistakenly decode the special cycles from the
13723 * ICH chipsets as memory write cycles, causing corruption
13724 * of register and memory space. Only certain ICH bridges
13725 * will drive special cycles with non-zero data during the
13726 * address phase which can fall within the 5703's address
13727 * range. This is not an ICH bug as the PCI spec allows
13728 * non-zero address during special cycles. However, only
13729 * these ICH bridges are known to drive non-zero addresses
13730 * during special cycles.
13732 * Since special cycles do not cross PCI bridges, we only
13733 * enable this workaround if the 5703 is on the secondary
13734 * bus of these ICH bridges.
13736 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13737 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13738 static struct tg3_dev_id {
13739 u32 vendor;
13740 u32 device;
13741 u32 rev;
13742 } ich_chipsets[] = {
13743 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13744 PCI_ANY_ID },
13745 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13746 PCI_ANY_ID },
13747 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13748 0xa },
13749 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13750 PCI_ANY_ID },
13751 { },
13753 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13754 struct pci_dev *bridge = NULL;
13756 while (pci_id->vendor != 0) {
13757 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13758 bridge);
13759 if (!bridge) {
13760 pci_id++;
13761 continue;
13763 if (pci_id->rev != PCI_ANY_ID) {
13764 if (bridge->revision > pci_id->rev)
13765 continue;
13767 if (bridge->subordinate &&
13768 (bridge->subordinate->number ==
13769 tp->pdev->bus->number)) {
13770 tg3_flag_set(tp, ICH_WORKAROUND);
13771 pci_dev_put(bridge);
13772 break;
13777 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13778 static struct tg3_dev_id {
13779 u32 vendor;
13780 u32 device;
13781 } bridge_chipsets[] = {
13782 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13783 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13784 { },
13786 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13787 struct pci_dev *bridge = NULL;
13789 while (pci_id->vendor != 0) {
13790 bridge = pci_get_device(pci_id->vendor,
13791 pci_id->device,
13792 bridge);
13793 if (!bridge) {
13794 pci_id++;
13795 continue;
13797 if (bridge->subordinate &&
13798 (bridge->subordinate->number <=
13799 tp->pdev->bus->number) &&
13800 (bridge->subordinate->subordinate >=
13801 tp->pdev->bus->number)) {
13802 tg3_flag_set(tp, 5701_DMA_BUG);
13803 pci_dev_put(bridge);
13804 break;
13809 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13810 * DMA addresses > 40-bit. This bridge may have other additional
13811 * 57xx devices behind it in some 4-port NIC designs for example.
13812 * Any tg3 device found behind the bridge will also need the 40-bit
13813 * DMA workaround.
13815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13816 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13817 tg3_flag_set(tp, 5780_CLASS);
13818 tg3_flag_set(tp, 40BIT_DMA_BUG);
13819 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13820 } else {
13821 struct pci_dev *bridge = NULL;
13823 do {
13824 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13825 PCI_DEVICE_ID_SERVERWORKS_EPB,
13826 bridge);
13827 if (bridge && bridge->subordinate &&
13828 (bridge->subordinate->number <=
13829 tp->pdev->bus->number) &&
13830 (bridge->subordinate->subordinate >=
13831 tp->pdev->bus->number)) {
13832 tg3_flag_set(tp, 40BIT_DMA_BUG);
13833 pci_dev_put(bridge);
13834 break;
13836 } while (bridge);
13839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13841 tp->pdev_peer = tg3_find_peer(tp);
13843 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13846 tg3_flag_set(tp, 5717_PLUS);
13848 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13849 tg3_flag(tp, 5717_PLUS))
13850 tg3_flag_set(tp, 57765_PLUS);
13852 /* Intentionally exclude ASIC_REV_5906 */
13853 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13854 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13856 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13857 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13858 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13859 tg3_flag(tp, 57765_PLUS))
13860 tg3_flag_set(tp, 5755_PLUS);
13862 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13863 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13864 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13865 tg3_flag(tp, 5755_PLUS) ||
13866 tg3_flag(tp, 5780_CLASS))
13867 tg3_flag_set(tp, 5750_PLUS);
13869 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13870 tg3_flag(tp, 5750_PLUS))
13871 tg3_flag_set(tp, 5705_PLUS);
13873 /* Determine TSO capabilities */
13874 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13875 ; /* Do nothing. HW bug. */
13876 else if (tg3_flag(tp, 57765_PLUS))
13877 tg3_flag_set(tp, HW_TSO_3);
13878 else if (tg3_flag(tp, 5755_PLUS) ||
13879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13880 tg3_flag_set(tp, HW_TSO_2);
13881 else if (tg3_flag(tp, 5750_PLUS)) {
13882 tg3_flag_set(tp, HW_TSO_1);
13883 tg3_flag_set(tp, TSO_BUG);
13884 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13885 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13886 tg3_flag_clear(tp, TSO_BUG);
13887 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13888 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13889 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13890 tg3_flag_set(tp, TSO_BUG);
13891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13892 tp->fw_needed = FIRMWARE_TG3TSO5;
13893 else
13894 tp->fw_needed = FIRMWARE_TG3TSO;
13897 /* Selectively allow TSO based on operating conditions */
13898 if (tg3_flag(tp, HW_TSO_1) ||
13899 tg3_flag(tp, HW_TSO_2) ||
13900 tg3_flag(tp, HW_TSO_3) ||
13901 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13902 tg3_flag_set(tp, TSO_CAPABLE);
13903 else {
13904 tg3_flag_clear(tp, TSO_CAPABLE);
13905 tg3_flag_clear(tp, TSO_BUG);
13906 tp->fw_needed = NULL;
13909 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13910 tp->fw_needed = FIRMWARE_TG3;
13912 tp->irq_max = 1;
13914 if (tg3_flag(tp, 5750_PLUS)) {
13915 tg3_flag_set(tp, SUPPORT_MSI);
13916 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13917 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13918 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13919 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13920 tp->pdev_peer == tp->pdev))
13921 tg3_flag_clear(tp, SUPPORT_MSI);
13923 if (tg3_flag(tp, 5755_PLUS) ||
13924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13925 tg3_flag_set(tp, 1SHOT_MSI);
13928 if (tg3_flag(tp, 57765_PLUS)) {
13929 tg3_flag_set(tp, SUPPORT_MSIX);
13930 tp->irq_max = TG3_IRQ_MAX_VECS;
13934 if (tg3_flag(tp, 5755_PLUS))
13935 tg3_flag_set(tp, SHORT_DMA_BUG);
13937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13938 tg3_flag_set(tp, 4K_FIFO_LIMIT);
13940 if (tg3_flag(tp, 5717_PLUS))
13941 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13943 if (tg3_flag(tp, 57765_PLUS) &&
13944 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
13945 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13947 if (!tg3_flag(tp, 5705_PLUS) ||
13948 tg3_flag(tp, 5780_CLASS) ||
13949 tg3_flag(tp, USE_JUMBO_BDFLAG))
13950 tg3_flag_set(tp, JUMBO_CAPABLE);
13952 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13953 &pci_state_reg);
13955 if (pci_is_pcie(tp->pdev)) {
13956 u16 lnkctl;
13958 tg3_flag_set(tp, PCI_EXPRESS);
13960 tp->pcie_readrq = 4096;
13961 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13963 tp->pcie_readrq = 2048;
13965 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13967 pci_read_config_word(tp->pdev,
13968 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13969 &lnkctl);
13970 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13971 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13972 ASIC_REV_5906) {
13973 tg3_flag_clear(tp, HW_TSO_2);
13974 tg3_flag_clear(tp, TSO_CAPABLE);
13976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13978 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13979 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13980 tg3_flag_set(tp, CLKREQ_BUG);
13981 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13982 tg3_flag_set(tp, L1PLLPD_EN);
13984 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13985 /* BCM5785 devices are effectively PCIe devices, and should
13986 * follow PCIe codepaths, but do not have a PCIe capabilities
13987 * section.
13989 tg3_flag_set(tp, PCI_EXPRESS);
13990 } else if (!tg3_flag(tp, 5705_PLUS) ||
13991 tg3_flag(tp, 5780_CLASS)) {
13992 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13993 if (!tp->pcix_cap) {
13994 dev_err(&tp->pdev->dev,
13995 "Cannot find PCI-X capability, aborting\n");
13996 return -EIO;
13999 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14000 tg3_flag_set(tp, PCIX_MODE);
14003 /* If we have an AMD 762 or VIA K8T800 chipset, write
14004 * reordering to the mailbox registers done by the host
14005 * controller can cause major troubles. We read back from
14006 * every mailbox register write to force the writes to be
14007 * posted to the chip in order.
14009 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14010 !tg3_flag(tp, PCI_EXPRESS))
14011 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14013 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14014 &tp->pci_cacheline_sz);
14015 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14016 &tp->pci_lat_timer);
14017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14018 tp->pci_lat_timer < 64) {
14019 tp->pci_lat_timer = 64;
14020 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14021 tp->pci_lat_timer);
14024 /* Important! -- It is critical that the PCI-X hw workaround
14025 * situation is decided before the first MMIO register access.
14027 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14028 /* 5700 BX chips need to have their TX producer index
14029 * mailboxes written twice to workaround a bug.
14031 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14033 /* If we are in PCI-X mode, enable register write workaround.
14035 * The workaround is to use indirect register accesses
14036 * for all chip writes not to mailbox registers.
14038 if (tg3_flag(tp, PCIX_MODE)) {
14039 u32 pm_reg;
14041 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14043 /* The chip can have it's power management PCI config
14044 * space registers clobbered due to this bug.
14045 * So explicitly force the chip into D0 here.
14047 pci_read_config_dword(tp->pdev,
14048 tp->pm_cap + PCI_PM_CTRL,
14049 &pm_reg);
14050 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14051 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14052 pci_write_config_dword(tp->pdev,
14053 tp->pm_cap + PCI_PM_CTRL,
14054 pm_reg);
14056 /* Also, force SERR#/PERR# in PCI command. */
14057 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14058 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14059 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14063 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14064 tg3_flag_set(tp, PCI_HIGH_SPEED);
14065 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14066 tg3_flag_set(tp, PCI_32BIT);
14068 /* Chip-specific fixup from Broadcom driver */
14069 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14070 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14071 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14072 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14075 /* Default fast path register access methods */
14076 tp->read32 = tg3_read32;
14077 tp->write32 = tg3_write32;
14078 tp->read32_mbox = tg3_read32;
14079 tp->write32_mbox = tg3_write32;
14080 tp->write32_tx_mbox = tg3_write32;
14081 tp->write32_rx_mbox = tg3_write32;
14083 /* Various workaround register access methods */
14084 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14085 tp->write32 = tg3_write_indirect_reg32;
14086 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14087 (tg3_flag(tp, PCI_EXPRESS) &&
14088 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14090 * Back to back register writes can cause problems on these
14091 * chips, the workaround is to read back all reg writes
14092 * except those to mailbox regs.
14094 * See tg3_write_indirect_reg32().
14096 tp->write32 = tg3_write_flush_reg32;
14099 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14100 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14101 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14102 tp->write32_rx_mbox = tg3_write_flush_reg32;
14105 if (tg3_flag(tp, ICH_WORKAROUND)) {
14106 tp->read32 = tg3_read_indirect_reg32;
14107 tp->write32 = tg3_write_indirect_reg32;
14108 tp->read32_mbox = tg3_read_indirect_mbox;
14109 tp->write32_mbox = tg3_write_indirect_mbox;
14110 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14111 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14113 iounmap(tp->regs);
14114 tp->regs = NULL;
14116 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14117 pci_cmd &= ~PCI_COMMAND_MEMORY;
14118 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14120 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14121 tp->read32_mbox = tg3_read32_mbox_5906;
14122 tp->write32_mbox = tg3_write32_mbox_5906;
14123 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14124 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14127 if (tp->write32 == tg3_write_indirect_reg32 ||
14128 (tg3_flag(tp, PCIX_MODE) &&
14129 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14131 tg3_flag_set(tp, SRAM_USE_CONFIG);
14133 /* The memory arbiter has to be enabled in order for SRAM accesses
14134 * to succeed. Normally on powerup the tg3 chip firmware will make
14135 * sure it is enabled, but other entities such as system netboot
14136 * code might disable it.
14138 val = tr32(MEMARB_MODE);
14139 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14141 if (tg3_flag(tp, PCIX_MODE)) {
14142 pci_read_config_dword(tp->pdev,
14143 tp->pcix_cap + PCI_X_STATUS, &val);
14144 tp->pci_fn = val & 0x7;
14145 } else {
14146 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14149 /* Get eeprom hw config before calling tg3_set_power_state().
14150 * In particular, the TG3_FLAG_IS_NIC flag must be
14151 * determined before calling tg3_set_power_state() so that
14152 * we know whether or not to switch out of Vaux power.
14153 * When the flag is set, it means that GPIO1 is used for eeprom
14154 * write protect and also implies that it is a LOM where GPIOs
14155 * are not used to switch power.
14157 tg3_get_eeprom_hw_cfg(tp);
14159 if (tg3_flag(tp, ENABLE_APE)) {
14160 /* Allow reads and writes to the
14161 * APE register and memory space.
14163 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14164 PCISTATE_ALLOW_APE_SHMEM_WR |
14165 PCISTATE_ALLOW_APE_PSPACE_WR;
14166 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14167 pci_state_reg);
14169 tg3_ape_lock_init(tp);
14172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14176 tg3_flag(tp, 57765_PLUS))
14177 tg3_flag_set(tp, CPMU_PRESENT);
14179 /* Set up tp->grc_local_ctrl before calling
14180 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14181 * will bring 5700's external PHY out of reset.
14182 * It is also used as eeprom write protect on LOMs.
14184 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14186 tg3_flag(tp, EEPROM_WRITE_PROT))
14187 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14188 GRC_LCLCTRL_GPIO_OUTPUT1);
14189 /* Unused GPIO3 must be driven as output on 5752 because there
14190 * are no pull-up resistors on unused GPIO pins.
14192 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14193 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14196 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14197 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14198 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14200 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14201 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14202 /* Turn off the debug UART. */
14203 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14204 if (tg3_flag(tp, IS_NIC))
14205 /* Keep VMain power. */
14206 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14207 GRC_LCLCTRL_GPIO_OUTPUT0;
14210 /* Switch out of Vaux if it is a NIC */
14211 tg3_pwrsrc_switch_to_vmain(tp);
14213 /* Derive initial jumbo mode from MTU assigned in
14214 * ether_setup() via the alloc_etherdev() call
14216 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14217 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14219 /* Determine WakeOnLan speed to use. */
14220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14221 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14222 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14223 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14224 tg3_flag_clear(tp, WOL_SPEED_100MB);
14225 } else {
14226 tg3_flag_set(tp, WOL_SPEED_100MB);
14229 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14230 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14232 /* A few boards don't want Ethernet@WireSpeed phy feature */
14233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14234 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14235 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14236 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14237 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14238 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14239 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14241 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14242 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14243 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14244 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14245 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14247 if (tg3_flag(tp, 5705_PLUS) &&
14248 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14249 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14250 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14251 !tg3_flag(tp, 57765_PLUS)) {
14252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14254 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14255 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14256 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14257 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14258 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14259 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14260 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14261 } else
14262 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14266 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14267 tp->phy_otp = tg3_read_otp_phycfg(tp);
14268 if (tp->phy_otp == 0)
14269 tp->phy_otp = TG3_OTP_DEFAULT;
14272 if (tg3_flag(tp, CPMU_PRESENT))
14273 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14274 else
14275 tp->mi_mode = MAC_MI_MODE_BASE;
14277 tp->coalesce_mode = 0;
14278 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14279 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14280 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14282 /* Set these bits to enable statistics workaround. */
14283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14284 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14285 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14286 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14287 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14290 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14291 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14292 tg3_flag_set(tp, USE_PHYLIB);
14294 err = tg3_mdio_init(tp);
14295 if (err)
14296 return err;
14298 /* Initialize data/descriptor byte/word swapping. */
14299 val = tr32(GRC_MODE);
14300 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14301 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14302 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14303 GRC_MODE_B2HRX_ENABLE |
14304 GRC_MODE_HTX2B_ENABLE |
14305 GRC_MODE_HOST_STACKUP);
14306 else
14307 val &= GRC_MODE_HOST_STACKUP;
14309 tw32(GRC_MODE, val | tp->grc_mode);
14311 tg3_switch_clocks(tp);
14313 /* Clear this out for sanity. */
14314 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14316 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14317 &pci_state_reg);
14318 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14319 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14320 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14322 if (chiprevid == CHIPREV_ID_5701_A0 ||
14323 chiprevid == CHIPREV_ID_5701_B0 ||
14324 chiprevid == CHIPREV_ID_5701_B2 ||
14325 chiprevid == CHIPREV_ID_5701_B5) {
14326 void __iomem *sram_base;
14328 /* Write some dummy words into the SRAM status block
14329 * area, see if it reads back correctly. If the return
14330 * value is bad, force enable the PCIX workaround.
14332 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14334 writel(0x00000000, sram_base);
14335 writel(0x00000000, sram_base + 4);
14336 writel(0xffffffff, sram_base + 4);
14337 if (readl(sram_base) != 0x00000000)
14338 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14342 udelay(50);
14343 tg3_nvram_init(tp);
14345 grc_misc_cfg = tr32(GRC_MISC_CFG);
14346 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14349 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14350 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14351 tg3_flag_set(tp, IS_5788);
14353 if (!tg3_flag(tp, IS_5788) &&
14354 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14355 tg3_flag_set(tp, TAGGED_STATUS);
14356 if (tg3_flag(tp, TAGGED_STATUS)) {
14357 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14358 HOSTCC_MODE_CLRTICK_TXBD);
14360 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14361 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14362 tp->misc_host_ctrl);
14365 /* Preserve the APE MAC_MODE bits */
14366 if (tg3_flag(tp, ENABLE_APE))
14367 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14368 else
14369 tp->mac_mode = TG3_DEF_MAC_MODE;
14371 /* these are limited to 10/100 only */
14372 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14373 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14374 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14375 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14376 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14377 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14378 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14379 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14380 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14381 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14382 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14383 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14384 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14385 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14386 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14387 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14389 err = tg3_phy_probe(tp);
14390 if (err) {
14391 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14392 /* ... but do not return immediately ... */
14393 tg3_mdio_fini(tp);
14396 tg3_read_vpd(tp);
14397 tg3_read_fw_ver(tp);
14399 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14400 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14401 } else {
14402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14403 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14404 else
14405 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14408 /* 5700 {AX,BX} chips have a broken status block link
14409 * change bit implementation, so we must use the
14410 * status register in those cases.
14412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14413 tg3_flag_set(tp, USE_LINKCHG_REG);
14414 else
14415 tg3_flag_clear(tp, USE_LINKCHG_REG);
14417 /* The led_ctrl is set during tg3_phy_probe, here we might
14418 * have to force the link status polling mechanism based
14419 * upon subsystem IDs.
14421 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14422 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14423 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14424 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14425 tg3_flag_set(tp, USE_LINKCHG_REG);
14428 /* For all SERDES we poll the MAC status register. */
14429 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14430 tg3_flag_set(tp, POLL_SERDES);
14431 else
14432 tg3_flag_clear(tp, POLL_SERDES);
14434 tp->rx_offset = NET_IP_ALIGN;
14435 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14437 tg3_flag(tp, PCIX_MODE)) {
14438 tp->rx_offset = 0;
14439 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14440 tp->rx_copy_thresh = ~(u16)0;
14441 #endif
14444 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14445 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14446 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14448 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14450 /* Increment the rx prod index on the rx std ring by at most
14451 * 8 for these chips to workaround hw errata.
14453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14456 tp->rx_std_max_post = 8;
14458 if (tg3_flag(tp, ASPM_WORKAROUND))
14459 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14460 PCIE_PWR_MGMT_L1_THRESH_MSK;
14462 return err;
14465 #ifdef CONFIG_SPARC
14466 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14468 struct net_device *dev = tp->dev;
14469 struct pci_dev *pdev = tp->pdev;
14470 struct device_node *dp = pci_device_to_OF_node(pdev);
14471 const unsigned char *addr;
14472 int len;
14474 addr = of_get_property(dp, "local-mac-address", &len);
14475 if (addr && len == 6) {
14476 memcpy(dev->dev_addr, addr, 6);
14477 memcpy(dev->perm_addr, dev->dev_addr, 6);
14478 return 0;
14480 return -ENODEV;
14483 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14485 struct net_device *dev = tp->dev;
14487 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14488 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14489 return 0;
14491 #endif
14493 static int __devinit tg3_get_device_address(struct tg3 *tp)
14495 struct net_device *dev = tp->dev;
14496 u32 hi, lo, mac_offset;
14497 int addr_ok = 0;
14499 #ifdef CONFIG_SPARC
14500 if (!tg3_get_macaddr_sparc(tp))
14501 return 0;
14502 #endif
14504 mac_offset = 0x7c;
14505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14506 tg3_flag(tp, 5780_CLASS)) {
14507 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14508 mac_offset = 0xcc;
14509 if (tg3_nvram_lock(tp))
14510 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14511 else
14512 tg3_nvram_unlock(tp);
14513 } else if (tg3_flag(tp, 5717_PLUS)) {
14514 if (tp->pci_fn & 1)
14515 mac_offset = 0xcc;
14516 if (tp->pci_fn > 1)
14517 mac_offset += 0x18c;
14518 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14519 mac_offset = 0x10;
14521 /* First try to get it from MAC address mailbox. */
14522 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14523 if ((hi >> 16) == 0x484b) {
14524 dev->dev_addr[0] = (hi >> 8) & 0xff;
14525 dev->dev_addr[1] = (hi >> 0) & 0xff;
14527 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14528 dev->dev_addr[2] = (lo >> 24) & 0xff;
14529 dev->dev_addr[3] = (lo >> 16) & 0xff;
14530 dev->dev_addr[4] = (lo >> 8) & 0xff;
14531 dev->dev_addr[5] = (lo >> 0) & 0xff;
14533 /* Some old bootcode may report a 0 MAC address in SRAM */
14534 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14536 if (!addr_ok) {
14537 /* Next, try NVRAM. */
14538 if (!tg3_flag(tp, NO_NVRAM) &&
14539 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14540 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14541 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14542 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14544 /* Finally just fetch it out of the MAC control regs. */
14545 else {
14546 hi = tr32(MAC_ADDR_0_HIGH);
14547 lo = tr32(MAC_ADDR_0_LOW);
14549 dev->dev_addr[5] = lo & 0xff;
14550 dev->dev_addr[4] = (lo >> 8) & 0xff;
14551 dev->dev_addr[3] = (lo >> 16) & 0xff;
14552 dev->dev_addr[2] = (lo >> 24) & 0xff;
14553 dev->dev_addr[1] = hi & 0xff;
14554 dev->dev_addr[0] = (hi >> 8) & 0xff;
14558 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14559 #ifdef CONFIG_SPARC
14560 if (!tg3_get_default_macaddr_sparc(tp))
14561 return 0;
14562 #endif
14563 return -EINVAL;
14565 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14566 return 0;
14569 #define BOUNDARY_SINGLE_CACHELINE 1
14570 #define BOUNDARY_MULTI_CACHELINE 2
14572 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14574 int cacheline_size;
14575 u8 byte;
14576 int goal;
14578 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14579 if (byte == 0)
14580 cacheline_size = 1024;
14581 else
14582 cacheline_size = (int) byte * 4;
14584 /* On 5703 and later chips, the boundary bits have no
14585 * effect.
14587 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14588 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14589 !tg3_flag(tp, PCI_EXPRESS))
14590 goto out;
14592 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14593 goal = BOUNDARY_MULTI_CACHELINE;
14594 #else
14595 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14596 goal = BOUNDARY_SINGLE_CACHELINE;
14597 #else
14598 goal = 0;
14599 #endif
14600 #endif
14602 if (tg3_flag(tp, 57765_PLUS)) {
14603 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14604 goto out;
14607 if (!goal)
14608 goto out;
14610 /* PCI controllers on most RISC systems tend to disconnect
14611 * when a device tries to burst across a cache-line boundary.
14612 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14614 * Unfortunately, for PCI-E there are only limited
14615 * write-side controls for this, and thus for reads
14616 * we will still get the disconnects. We'll also waste
14617 * these PCI cycles for both read and write for chips
14618 * other than 5700 and 5701 which do not implement the
14619 * boundary bits.
14621 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14622 switch (cacheline_size) {
14623 case 16:
14624 case 32:
14625 case 64:
14626 case 128:
14627 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14628 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14629 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14630 } else {
14631 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14632 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14634 break;
14636 case 256:
14637 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14638 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14639 break;
14641 default:
14642 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14643 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14644 break;
14646 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14647 switch (cacheline_size) {
14648 case 16:
14649 case 32:
14650 case 64:
14651 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14652 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14653 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14654 break;
14656 /* fallthrough */
14657 case 128:
14658 default:
14659 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14660 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14661 break;
14663 } else {
14664 switch (cacheline_size) {
14665 case 16:
14666 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14667 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14668 DMA_RWCTRL_WRITE_BNDRY_16);
14669 break;
14671 /* fallthrough */
14672 case 32:
14673 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14674 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14675 DMA_RWCTRL_WRITE_BNDRY_32);
14676 break;
14678 /* fallthrough */
14679 case 64:
14680 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14681 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14682 DMA_RWCTRL_WRITE_BNDRY_64);
14683 break;
14685 /* fallthrough */
14686 case 128:
14687 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14688 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14689 DMA_RWCTRL_WRITE_BNDRY_128);
14690 break;
14692 /* fallthrough */
14693 case 256:
14694 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14695 DMA_RWCTRL_WRITE_BNDRY_256);
14696 break;
14697 case 512:
14698 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14699 DMA_RWCTRL_WRITE_BNDRY_512);
14700 break;
14701 case 1024:
14702 default:
14703 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14704 DMA_RWCTRL_WRITE_BNDRY_1024);
14705 break;
14709 out:
14710 return val;
14713 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14715 struct tg3_internal_buffer_desc test_desc;
14716 u32 sram_dma_descs;
14717 int i, ret;
14719 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14721 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14722 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14723 tw32(RDMAC_STATUS, 0);
14724 tw32(WDMAC_STATUS, 0);
14726 tw32(BUFMGR_MODE, 0);
14727 tw32(FTQ_RESET, 0);
14729 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14730 test_desc.addr_lo = buf_dma & 0xffffffff;
14731 test_desc.nic_mbuf = 0x00002100;
14732 test_desc.len = size;
14735 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14736 * the *second* time the tg3 driver was getting loaded after an
14737 * initial scan.
14739 * Broadcom tells me:
14740 * ...the DMA engine is connected to the GRC block and a DMA
14741 * reset may affect the GRC block in some unpredictable way...
14742 * The behavior of resets to individual blocks has not been tested.
14744 * Broadcom noted the GRC reset will also reset all sub-components.
14746 if (to_device) {
14747 test_desc.cqid_sqid = (13 << 8) | 2;
14749 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14750 udelay(40);
14751 } else {
14752 test_desc.cqid_sqid = (16 << 8) | 7;
14754 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14755 udelay(40);
14757 test_desc.flags = 0x00000005;
14759 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14760 u32 val;
14762 val = *(((u32 *)&test_desc) + i);
14763 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14764 sram_dma_descs + (i * sizeof(u32)));
14765 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14767 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14769 if (to_device)
14770 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14771 else
14772 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14774 ret = -ENODEV;
14775 for (i = 0; i < 40; i++) {
14776 u32 val;
14778 if (to_device)
14779 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14780 else
14781 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14782 if ((val & 0xffff) == sram_dma_descs) {
14783 ret = 0;
14784 break;
14787 udelay(100);
14790 return ret;
14793 #define TEST_BUFFER_SIZE 0x2000
14795 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14796 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14797 { },
14800 static int __devinit tg3_test_dma(struct tg3 *tp)
14802 dma_addr_t buf_dma;
14803 u32 *buf, saved_dma_rwctrl;
14804 int ret = 0;
14806 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14807 &buf_dma, GFP_KERNEL);
14808 if (!buf) {
14809 ret = -ENOMEM;
14810 goto out_nofree;
14813 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14814 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14816 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14818 if (tg3_flag(tp, 57765_PLUS))
14819 goto out;
14821 if (tg3_flag(tp, PCI_EXPRESS)) {
14822 /* DMA read watermark not used on PCIE */
14823 tp->dma_rwctrl |= 0x00180000;
14824 } else if (!tg3_flag(tp, PCIX_MODE)) {
14825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14826 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14827 tp->dma_rwctrl |= 0x003f0000;
14828 else
14829 tp->dma_rwctrl |= 0x003f000f;
14830 } else {
14831 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14832 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14833 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14834 u32 read_water = 0x7;
14836 /* If the 5704 is behind the EPB bridge, we can
14837 * do the less restrictive ONE_DMA workaround for
14838 * better performance.
14840 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14841 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14842 tp->dma_rwctrl |= 0x8000;
14843 else if (ccval == 0x6 || ccval == 0x7)
14844 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14846 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14847 read_water = 4;
14848 /* Set bit 23 to enable PCIX hw bug fix */
14849 tp->dma_rwctrl |=
14850 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14851 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14852 (1 << 23);
14853 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14854 /* 5780 always in PCIX mode */
14855 tp->dma_rwctrl |= 0x00144000;
14856 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14857 /* 5714 always in PCIX mode */
14858 tp->dma_rwctrl |= 0x00148000;
14859 } else {
14860 tp->dma_rwctrl |= 0x001b000f;
14864 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14865 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14866 tp->dma_rwctrl &= 0xfffffff0;
14868 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14869 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14870 /* Remove this if it causes problems for some boards. */
14871 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14873 /* On 5700/5701 chips, we need to set this bit.
14874 * Otherwise the chip will issue cacheline transactions
14875 * to streamable DMA memory with not all the byte
14876 * enables turned on. This is an error on several
14877 * RISC PCI controllers, in particular sparc64.
14879 * On 5703/5704 chips, this bit has been reassigned
14880 * a different meaning. In particular, it is used
14881 * on those chips to enable a PCI-X workaround.
14883 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14886 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14888 #if 0
14889 /* Unneeded, already done by tg3_get_invariants. */
14890 tg3_switch_clocks(tp);
14891 #endif
14893 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14894 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14895 goto out;
14897 /* It is best to perform DMA test with maximum write burst size
14898 * to expose the 5700/5701 write DMA bug.
14900 saved_dma_rwctrl = tp->dma_rwctrl;
14901 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14902 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14904 while (1) {
14905 u32 *p = buf, i;
14907 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14908 p[i] = i;
14910 /* Send the buffer to the chip. */
14911 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14912 if (ret) {
14913 dev_err(&tp->pdev->dev,
14914 "%s: Buffer write failed. err = %d\n",
14915 __func__, ret);
14916 break;
14919 #if 0
14920 /* validate data reached card RAM correctly. */
14921 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14922 u32 val;
14923 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14924 if (le32_to_cpu(val) != p[i]) {
14925 dev_err(&tp->pdev->dev,
14926 "%s: Buffer corrupted on device! "
14927 "(%d != %d)\n", __func__, val, i);
14928 /* ret = -ENODEV here? */
14930 p[i] = 0;
14932 #endif
14933 /* Now read it back. */
14934 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14935 if (ret) {
14936 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14937 "err = %d\n", __func__, ret);
14938 break;
14941 /* Verify it. */
14942 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14943 if (p[i] == i)
14944 continue;
14946 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14947 DMA_RWCTRL_WRITE_BNDRY_16) {
14948 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14949 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14950 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14951 break;
14952 } else {
14953 dev_err(&tp->pdev->dev,
14954 "%s: Buffer corrupted on read back! "
14955 "(%d != %d)\n", __func__, p[i], i);
14956 ret = -ENODEV;
14957 goto out;
14961 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14962 /* Success. */
14963 ret = 0;
14964 break;
14967 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14968 DMA_RWCTRL_WRITE_BNDRY_16) {
14969 /* DMA test passed without adjusting DMA boundary,
14970 * now look for chipsets that are known to expose the
14971 * DMA bug without failing the test.
14973 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14974 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14975 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14976 } else {
14977 /* Safe to use the calculated DMA boundary. */
14978 tp->dma_rwctrl = saved_dma_rwctrl;
14981 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14984 out:
14985 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14986 out_nofree:
14987 return ret;
14990 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14992 if (tg3_flag(tp, 57765_PLUS)) {
14993 tp->bufmgr_config.mbuf_read_dma_low_water =
14994 DEFAULT_MB_RDMA_LOW_WATER_5705;
14995 tp->bufmgr_config.mbuf_mac_rx_low_water =
14996 DEFAULT_MB_MACRX_LOW_WATER_57765;
14997 tp->bufmgr_config.mbuf_high_water =
14998 DEFAULT_MB_HIGH_WATER_57765;
15000 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15001 DEFAULT_MB_RDMA_LOW_WATER_5705;
15002 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15003 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15004 tp->bufmgr_config.mbuf_high_water_jumbo =
15005 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15006 } else if (tg3_flag(tp, 5705_PLUS)) {
15007 tp->bufmgr_config.mbuf_read_dma_low_water =
15008 DEFAULT_MB_RDMA_LOW_WATER_5705;
15009 tp->bufmgr_config.mbuf_mac_rx_low_water =
15010 DEFAULT_MB_MACRX_LOW_WATER_5705;
15011 tp->bufmgr_config.mbuf_high_water =
15012 DEFAULT_MB_HIGH_WATER_5705;
15013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15014 tp->bufmgr_config.mbuf_mac_rx_low_water =
15015 DEFAULT_MB_MACRX_LOW_WATER_5906;
15016 tp->bufmgr_config.mbuf_high_water =
15017 DEFAULT_MB_HIGH_WATER_5906;
15020 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15021 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15022 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15023 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15024 tp->bufmgr_config.mbuf_high_water_jumbo =
15025 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15026 } else {
15027 tp->bufmgr_config.mbuf_read_dma_low_water =
15028 DEFAULT_MB_RDMA_LOW_WATER;
15029 tp->bufmgr_config.mbuf_mac_rx_low_water =
15030 DEFAULT_MB_MACRX_LOW_WATER;
15031 tp->bufmgr_config.mbuf_high_water =
15032 DEFAULT_MB_HIGH_WATER;
15034 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15035 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15036 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15037 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15038 tp->bufmgr_config.mbuf_high_water_jumbo =
15039 DEFAULT_MB_HIGH_WATER_JUMBO;
15042 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15043 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15046 static char * __devinit tg3_phy_string(struct tg3 *tp)
15048 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15049 case TG3_PHY_ID_BCM5400: return "5400";
15050 case TG3_PHY_ID_BCM5401: return "5401";
15051 case TG3_PHY_ID_BCM5411: return "5411";
15052 case TG3_PHY_ID_BCM5701: return "5701";
15053 case TG3_PHY_ID_BCM5703: return "5703";
15054 case TG3_PHY_ID_BCM5704: return "5704";
15055 case TG3_PHY_ID_BCM5705: return "5705";
15056 case TG3_PHY_ID_BCM5750: return "5750";
15057 case TG3_PHY_ID_BCM5752: return "5752";
15058 case TG3_PHY_ID_BCM5714: return "5714";
15059 case TG3_PHY_ID_BCM5780: return "5780";
15060 case TG3_PHY_ID_BCM5755: return "5755";
15061 case TG3_PHY_ID_BCM5787: return "5787";
15062 case TG3_PHY_ID_BCM5784: return "5784";
15063 case TG3_PHY_ID_BCM5756: return "5722/5756";
15064 case TG3_PHY_ID_BCM5906: return "5906";
15065 case TG3_PHY_ID_BCM5761: return "5761";
15066 case TG3_PHY_ID_BCM5718C: return "5718C";
15067 case TG3_PHY_ID_BCM5718S: return "5718S";
15068 case TG3_PHY_ID_BCM57765: return "57765";
15069 case TG3_PHY_ID_BCM5719C: return "5719C";
15070 case TG3_PHY_ID_BCM5720C: return "5720C";
15071 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15072 case 0: return "serdes";
15073 default: return "unknown";
15077 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15079 if (tg3_flag(tp, PCI_EXPRESS)) {
15080 strcpy(str, "PCI Express");
15081 return str;
15082 } else if (tg3_flag(tp, PCIX_MODE)) {
15083 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15085 strcpy(str, "PCIX:");
15087 if ((clock_ctrl == 7) ||
15088 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15089 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15090 strcat(str, "133MHz");
15091 else if (clock_ctrl == 0)
15092 strcat(str, "33MHz");
15093 else if (clock_ctrl == 2)
15094 strcat(str, "50MHz");
15095 else if (clock_ctrl == 4)
15096 strcat(str, "66MHz");
15097 else if (clock_ctrl == 6)
15098 strcat(str, "100MHz");
15099 } else {
15100 strcpy(str, "PCI:");
15101 if (tg3_flag(tp, PCI_HIGH_SPEED))
15102 strcat(str, "66MHz");
15103 else
15104 strcat(str, "33MHz");
15106 if (tg3_flag(tp, PCI_32BIT))
15107 strcat(str, ":32-bit");
15108 else
15109 strcat(str, ":64-bit");
15110 return str;
15113 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15115 struct pci_dev *peer;
15116 unsigned int func, devnr = tp->pdev->devfn & ~7;
15118 for (func = 0; func < 8; func++) {
15119 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15120 if (peer && peer != tp->pdev)
15121 break;
15122 pci_dev_put(peer);
15124 /* 5704 can be configured in single-port mode, set peer to
15125 * tp->pdev in that case.
15127 if (!peer) {
15128 peer = tp->pdev;
15129 return peer;
15133 * We don't need to keep the refcount elevated; there's no way
15134 * to remove one half of this device without removing the other
15136 pci_dev_put(peer);
15138 return peer;
15141 static void __devinit tg3_init_coal(struct tg3 *tp)
15143 struct ethtool_coalesce *ec = &tp->coal;
15145 memset(ec, 0, sizeof(*ec));
15146 ec->cmd = ETHTOOL_GCOALESCE;
15147 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15148 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15149 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15150 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15151 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15152 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15153 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15154 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15155 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15157 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15158 HOSTCC_MODE_CLRTICK_TXBD)) {
15159 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15160 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15161 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15162 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15165 if (tg3_flag(tp, 5705_PLUS)) {
15166 ec->rx_coalesce_usecs_irq = 0;
15167 ec->tx_coalesce_usecs_irq = 0;
15168 ec->stats_block_coalesce_usecs = 0;
15172 static const struct net_device_ops tg3_netdev_ops = {
15173 .ndo_open = tg3_open,
15174 .ndo_stop = tg3_close,
15175 .ndo_start_xmit = tg3_start_xmit,
15176 .ndo_get_stats64 = tg3_get_stats64,
15177 .ndo_validate_addr = eth_validate_addr,
15178 .ndo_set_multicast_list = tg3_set_rx_mode,
15179 .ndo_set_mac_address = tg3_set_mac_addr,
15180 .ndo_do_ioctl = tg3_ioctl,
15181 .ndo_tx_timeout = tg3_tx_timeout,
15182 .ndo_change_mtu = tg3_change_mtu,
15183 .ndo_fix_features = tg3_fix_features,
15184 .ndo_set_features = tg3_set_features,
15185 #ifdef CONFIG_NET_POLL_CONTROLLER
15186 .ndo_poll_controller = tg3_poll_controller,
15187 #endif
15190 static int __devinit tg3_init_one(struct pci_dev *pdev,
15191 const struct pci_device_id *ent)
15193 struct net_device *dev;
15194 struct tg3 *tp;
15195 int i, err, pm_cap;
15196 u32 sndmbx, rcvmbx, intmbx;
15197 char str[40];
15198 u64 dma_mask, persist_dma_mask;
15199 u32 features = 0;
15201 printk_once(KERN_INFO "%s\n", version);
15203 err = pci_enable_device(pdev);
15204 if (err) {
15205 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15206 return err;
15209 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15210 if (err) {
15211 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15212 goto err_out_disable_pdev;
15215 pci_set_master(pdev);
15217 /* Find power-management capability. */
15218 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15219 if (pm_cap == 0) {
15220 dev_err(&pdev->dev,
15221 "Cannot find Power Management capability, aborting\n");
15222 err = -EIO;
15223 goto err_out_free_res;
15226 err = pci_set_power_state(pdev, PCI_D0);
15227 if (err) {
15228 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15229 goto err_out_free_res;
15232 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15233 if (!dev) {
15234 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15235 err = -ENOMEM;
15236 goto err_out_power_down;
15239 SET_NETDEV_DEV(dev, &pdev->dev);
15241 tp = netdev_priv(dev);
15242 tp->pdev = pdev;
15243 tp->dev = dev;
15244 tp->pm_cap = pm_cap;
15245 tp->rx_mode = TG3_DEF_RX_MODE;
15246 tp->tx_mode = TG3_DEF_TX_MODE;
15248 if (tg3_debug > 0)
15249 tp->msg_enable = tg3_debug;
15250 else
15251 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15253 /* The word/byte swap controls here control register access byte
15254 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15255 * setting below.
15257 tp->misc_host_ctrl =
15258 MISC_HOST_CTRL_MASK_PCI_INT |
15259 MISC_HOST_CTRL_WORD_SWAP |
15260 MISC_HOST_CTRL_INDIR_ACCESS |
15261 MISC_HOST_CTRL_PCISTATE_RW;
15263 /* The NONFRM (non-frame) byte/word swap controls take effect
15264 * on descriptor entries, anything which isn't packet data.
15266 * The StrongARM chips on the board (one for tx, one for rx)
15267 * are running in big-endian mode.
15269 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15270 GRC_MODE_WSWAP_NONFRM_DATA);
15271 #ifdef __BIG_ENDIAN
15272 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15273 #endif
15274 spin_lock_init(&tp->lock);
15275 spin_lock_init(&tp->indirect_lock);
15276 INIT_WORK(&tp->reset_task, tg3_reset_task);
15278 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15279 if (!tp->regs) {
15280 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15281 err = -ENOMEM;
15282 goto err_out_free_dev;
15285 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15286 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15287 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15288 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15289 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15290 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15291 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15292 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15293 tg3_flag_set(tp, ENABLE_APE);
15294 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15295 if (!tp->aperegs) {
15296 dev_err(&pdev->dev,
15297 "Cannot map APE registers, aborting\n");
15298 err = -ENOMEM;
15299 goto err_out_iounmap;
15303 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15304 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15306 dev->ethtool_ops = &tg3_ethtool_ops;
15307 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15308 dev->netdev_ops = &tg3_netdev_ops;
15309 dev->irq = pdev->irq;
15311 err = tg3_get_invariants(tp);
15312 if (err) {
15313 dev_err(&pdev->dev,
15314 "Problem fetching invariants of chip, aborting\n");
15315 goto err_out_apeunmap;
15318 /* The EPB bridge inside 5714, 5715, and 5780 and any
15319 * device behind the EPB cannot support DMA addresses > 40-bit.
15320 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15321 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15322 * do DMA address check in tg3_start_xmit().
15324 if (tg3_flag(tp, IS_5788))
15325 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15326 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15327 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15328 #ifdef CONFIG_HIGHMEM
15329 dma_mask = DMA_BIT_MASK(64);
15330 #endif
15331 } else
15332 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15334 /* Configure DMA attributes. */
15335 if (dma_mask > DMA_BIT_MASK(32)) {
15336 err = pci_set_dma_mask(pdev, dma_mask);
15337 if (!err) {
15338 features |= NETIF_F_HIGHDMA;
15339 err = pci_set_consistent_dma_mask(pdev,
15340 persist_dma_mask);
15341 if (err < 0) {
15342 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15343 "DMA for consistent allocations\n");
15344 goto err_out_apeunmap;
15348 if (err || dma_mask == DMA_BIT_MASK(32)) {
15349 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15350 if (err) {
15351 dev_err(&pdev->dev,
15352 "No usable DMA configuration, aborting\n");
15353 goto err_out_apeunmap;
15357 tg3_init_bufmgr_config(tp);
15359 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15361 /* 5700 B0 chips do not support checksumming correctly due
15362 * to hardware bugs.
15364 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15365 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15367 if (tg3_flag(tp, 5755_PLUS))
15368 features |= NETIF_F_IPV6_CSUM;
15371 /* TSO is on by default on chips that support hardware TSO.
15372 * Firmware TSO on older chips gives lower performance, so it
15373 * is off by default, but can be enabled using ethtool.
15375 if ((tg3_flag(tp, HW_TSO_1) ||
15376 tg3_flag(tp, HW_TSO_2) ||
15377 tg3_flag(tp, HW_TSO_3)) &&
15378 (features & NETIF_F_IP_CSUM))
15379 features |= NETIF_F_TSO;
15380 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15381 if (features & NETIF_F_IPV6_CSUM)
15382 features |= NETIF_F_TSO6;
15383 if (tg3_flag(tp, HW_TSO_3) ||
15384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15385 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15386 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15387 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15389 features |= NETIF_F_TSO_ECN;
15392 dev->features |= features;
15393 dev->vlan_features |= features;
15396 * Add loopback capability only for a subset of devices that support
15397 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15398 * loopback for the remaining devices.
15400 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15401 !tg3_flag(tp, CPMU_PRESENT))
15402 /* Add the loopback capability */
15403 features |= NETIF_F_LOOPBACK;
15405 dev->hw_features |= features;
15407 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15408 !tg3_flag(tp, TSO_CAPABLE) &&
15409 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15410 tg3_flag_set(tp, MAX_RXPEND_64);
15411 tp->rx_pending = 63;
15414 err = tg3_get_device_address(tp);
15415 if (err) {
15416 dev_err(&pdev->dev,
15417 "Could not obtain valid ethernet address, aborting\n");
15418 goto err_out_apeunmap;
15422 * Reset chip in case UNDI or EFI driver did not shutdown
15423 * DMA self test will enable WDMAC and we'll see (spurious)
15424 * pending DMA on the PCI bus at that point.
15426 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15427 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15428 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15429 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15432 err = tg3_test_dma(tp);
15433 if (err) {
15434 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15435 goto err_out_apeunmap;
15438 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15439 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15440 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15441 for (i = 0; i < tp->irq_max; i++) {
15442 struct tg3_napi *tnapi = &tp->napi[i];
15444 tnapi->tp = tp;
15445 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15447 tnapi->int_mbox = intmbx;
15448 if (i < 4)
15449 intmbx += 0x8;
15450 else
15451 intmbx += 0x4;
15453 tnapi->consmbox = rcvmbx;
15454 tnapi->prodmbox = sndmbx;
15456 if (i)
15457 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15458 else
15459 tnapi->coal_now = HOSTCC_MODE_NOW;
15461 if (!tg3_flag(tp, SUPPORT_MSIX))
15462 break;
15465 * If we support MSIX, we'll be using RSS. If we're using
15466 * RSS, the first vector only handles link interrupts and the
15467 * remaining vectors handle rx and tx interrupts. Reuse the
15468 * mailbox values for the next iteration. The values we setup
15469 * above are still useful for the single vectored mode.
15471 if (!i)
15472 continue;
15474 rcvmbx += 0x8;
15476 if (sndmbx & 0x4)
15477 sndmbx -= 0x4;
15478 else
15479 sndmbx += 0xc;
15482 tg3_init_coal(tp);
15484 pci_set_drvdata(pdev, dev);
15486 if (tg3_flag(tp, 5717_PLUS)) {
15487 /* Resume a low-power mode */
15488 tg3_frob_aux_power(tp, false);
15491 err = register_netdev(dev);
15492 if (err) {
15493 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15494 goto err_out_apeunmap;
15497 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15498 tp->board_part_number,
15499 tp->pci_chip_rev_id,
15500 tg3_bus_string(tp, str),
15501 dev->dev_addr);
15503 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15504 struct phy_device *phydev;
15505 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15506 netdev_info(dev,
15507 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15508 phydev->drv->name, dev_name(&phydev->dev));
15509 } else {
15510 char *ethtype;
15512 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15513 ethtype = "10/100Base-TX";
15514 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15515 ethtype = "1000Base-SX";
15516 else
15517 ethtype = "10/100/1000Base-T";
15519 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15520 "(WireSpeed[%d], EEE[%d])\n",
15521 tg3_phy_string(tp), ethtype,
15522 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15523 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15526 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15527 (dev->features & NETIF_F_RXCSUM) != 0,
15528 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15529 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15530 tg3_flag(tp, ENABLE_ASF) != 0,
15531 tg3_flag(tp, TSO_CAPABLE) != 0);
15532 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15533 tp->dma_rwctrl,
15534 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15535 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15537 pci_save_state(pdev);
15539 return 0;
15541 err_out_apeunmap:
15542 if (tp->aperegs) {
15543 iounmap(tp->aperegs);
15544 tp->aperegs = NULL;
15547 err_out_iounmap:
15548 if (tp->regs) {
15549 iounmap(tp->regs);
15550 tp->regs = NULL;
15553 err_out_free_dev:
15554 free_netdev(dev);
15556 err_out_power_down:
15557 pci_set_power_state(pdev, PCI_D3hot);
15559 err_out_free_res:
15560 pci_release_regions(pdev);
15562 err_out_disable_pdev:
15563 pci_disable_device(pdev);
15564 pci_set_drvdata(pdev, NULL);
15565 return err;
15568 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15570 struct net_device *dev = pci_get_drvdata(pdev);
15572 if (dev) {
15573 struct tg3 *tp = netdev_priv(dev);
15575 if (tp->fw)
15576 release_firmware(tp->fw);
15578 cancel_work_sync(&tp->reset_task);
15580 if (tg3_flag(tp, USE_PHYLIB)) {
15581 tg3_phy_fini(tp);
15582 tg3_mdio_fini(tp);
15585 unregister_netdev(dev);
15586 if (tp->aperegs) {
15587 iounmap(tp->aperegs);
15588 tp->aperegs = NULL;
15590 if (tp->regs) {
15591 iounmap(tp->regs);
15592 tp->regs = NULL;
15594 free_netdev(dev);
15595 pci_release_regions(pdev);
15596 pci_disable_device(pdev);
15597 pci_set_drvdata(pdev, NULL);
15601 #ifdef CONFIG_PM_SLEEP
15602 static int tg3_suspend(struct device *device)
15604 struct pci_dev *pdev = to_pci_dev(device);
15605 struct net_device *dev = pci_get_drvdata(pdev);
15606 struct tg3 *tp = netdev_priv(dev);
15607 int err;
15609 if (!netif_running(dev))
15610 return 0;
15612 flush_work_sync(&tp->reset_task);
15613 tg3_phy_stop(tp);
15614 tg3_netif_stop(tp);
15616 del_timer_sync(&tp->timer);
15618 tg3_full_lock(tp, 1);
15619 tg3_disable_ints(tp);
15620 tg3_full_unlock(tp);
15622 netif_device_detach(dev);
15624 tg3_full_lock(tp, 0);
15625 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15626 tg3_flag_clear(tp, INIT_COMPLETE);
15627 tg3_full_unlock(tp);
15629 err = tg3_power_down_prepare(tp);
15630 if (err) {
15631 int err2;
15633 tg3_full_lock(tp, 0);
15635 tg3_flag_set(tp, INIT_COMPLETE);
15636 err2 = tg3_restart_hw(tp, 1);
15637 if (err2)
15638 goto out;
15640 tp->timer.expires = jiffies + tp->timer_offset;
15641 add_timer(&tp->timer);
15643 netif_device_attach(dev);
15644 tg3_netif_start(tp);
15646 out:
15647 tg3_full_unlock(tp);
15649 if (!err2)
15650 tg3_phy_start(tp);
15653 return err;
15656 static int tg3_resume(struct device *device)
15658 struct pci_dev *pdev = to_pci_dev(device);
15659 struct net_device *dev = pci_get_drvdata(pdev);
15660 struct tg3 *tp = netdev_priv(dev);
15661 int err;
15663 if (!netif_running(dev))
15664 return 0;
15666 netif_device_attach(dev);
15668 tg3_full_lock(tp, 0);
15670 tg3_flag_set(tp, INIT_COMPLETE);
15671 err = tg3_restart_hw(tp, 1);
15672 if (err)
15673 goto out;
15675 tp->timer.expires = jiffies + tp->timer_offset;
15676 add_timer(&tp->timer);
15678 tg3_netif_start(tp);
15680 out:
15681 tg3_full_unlock(tp);
15683 if (!err)
15684 tg3_phy_start(tp);
15686 return err;
15689 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15690 #define TG3_PM_OPS (&tg3_pm_ops)
15692 #else
15694 #define TG3_PM_OPS NULL
15696 #endif /* CONFIG_PM_SLEEP */
15699 * tg3_io_error_detected - called when PCI error is detected
15700 * @pdev: Pointer to PCI device
15701 * @state: The current pci connection state
15703 * This function is called after a PCI bus error affecting
15704 * this device has been detected.
15706 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15707 pci_channel_state_t state)
15709 struct net_device *netdev = pci_get_drvdata(pdev);
15710 struct tg3 *tp = netdev_priv(netdev);
15711 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15713 netdev_info(netdev, "PCI I/O error detected\n");
15715 rtnl_lock();
15717 if (!netif_running(netdev))
15718 goto done;
15720 tg3_phy_stop(tp);
15722 tg3_netif_stop(tp);
15724 del_timer_sync(&tp->timer);
15725 tg3_flag_clear(tp, RESTART_TIMER);
15727 /* Want to make sure that the reset task doesn't run */
15728 cancel_work_sync(&tp->reset_task);
15729 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15730 tg3_flag_clear(tp, RESTART_TIMER);
15732 netif_device_detach(netdev);
15734 /* Clean up software state, even if MMIO is blocked */
15735 tg3_full_lock(tp, 0);
15736 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15737 tg3_full_unlock(tp);
15739 done:
15740 if (state == pci_channel_io_perm_failure)
15741 err = PCI_ERS_RESULT_DISCONNECT;
15742 else
15743 pci_disable_device(pdev);
15745 rtnl_unlock();
15747 return err;
15751 * tg3_io_slot_reset - called after the pci bus has been reset.
15752 * @pdev: Pointer to PCI device
15754 * Restart the card from scratch, as if from a cold-boot.
15755 * At this point, the card has exprienced a hard reset,
15756 * followed by fixups by BIOS, and has its config space
15757 * set up identically to what it was at cold boot.
15759 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15761 struct net_device *netdev = pci_get_drvdata(pdev);
15762 struct tg3 *tp = netdev_priv(netdev);
15763 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15764 int err;
15766 rtnl_lock();
15768 if (pci_enable_device(pdev)) {
15769 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15770 goto done;
15773 pci_set_master(pdev);
15774 pci_restore_state(pdev);
15775 pci_save_state(pdev);
15777 if (!netif_running(netdev)) {
15778 rc = PCI_ERS_RESULT_RECOVERED;
15779 goto done;
15782 err = tg3_power_up(tp);
15783 if (err)
15784 goto done;
15786 rc = PCI_ERS_RESULT_RECOVERED;
15788 done:
15789 rtnl_unlock();
15791 return rc;
15795 * tg3_io_resume - called when traffic can start flowing again.
15796 * @pdev: Pointer to PCI device
15798 * This callback is called when the error recovery driver tells
15799 * us that its OK to resume normal operation.
15801 static void tg3_io_resume(struct pci_dev *pdev)
15803 struct net_device *netdev = pci_get_drvdata(pdev);
15804 struct tg3 *tp = netdev_priv(netdev);
15805 int err;
15807 rtnl_lock();
15809 if (!netif_running(netdev))
15810 goto done;
15812 tg3_full_lock(tp, 0);
15813 tg3_flag_set(tp, INIT_COMPLETE);
15814 err = tg3_restart_hw(tp, 1);
15815 tg3_full_unlock(tp);
15816 if (err) {
15817 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15818 goto done;
15821 netif_device_attach(netdev);
15823 tp->timer.expires = jiffies + tp->timer_offset;
15824 add_timer(&tp->timer);
15826 tg3_netif_start(tp);
15828 tg3_phy_start(tp);
15830 done:
15831 rtnl_unlock();
15834 static struct pci_error_handlers tg3_err_handler = {
15835 .error_detected = tg3_io_error_detected,
15836 .slot_reset = tg3_io_slot_reset,
15837 .resume = tg3_io_resume
15840 static struct pci_driver tg3_driver = {
15841 .name = DRV_MODULE_NAME,
15842 .id_table = tg3_pci_tbl,
15843 .probe = tg3_init_one,
15844 .remove = __devexit_p(tg3_remove_one),
15845 .err_handler = &tg3_err_handler,
15846 .driver.pm = TG3_PM_OPS,
15849 static int __init tg3_init(void)
15851 return pci_register_driver(&tg3_driver);
15854 static void __exit tg3_cleanup(void)
15856 pci_unregister_driver(&tg3_driver);
15859 module_init(tg3_init);
15860 module_exit(tg3_cleanup);