tg3: Generalize tg3_skb_error_unmap()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tg3.c
blob3f69f1ace26737e2d4748f8915e3a14d00303288
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
61 #define BAR_0 0
62 #define BAR_2 2
64 #include "tg3.h"
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 set_bit(flag, bits);
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
101 (NETIF_MSG_DRV | \
102 NETIF_MSG_PROBE | \
103 NETIF_MSG_LINK | \
104 NETIF_MSG_TIMER | \
105 NETIF_MSG_IFDOWN | \
106 NETIF_MSG_IFUP | \
107 NETIF_MSG_RX_ERR | \
108 NETIF_MSG_TX_ERR)
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
112 /* length of time before we decide the hardware is borked,
113 * and dev->tx_timeout() should be called to fix the problem
116 #define TG3_TX_TIMEOUT (5 * HZ)
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU 60
120 #define TG3_MAX_MTU(tp) \
121 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124 * You can't change the ring sizes, but you can change where you place
125 * them in the NIC onboard memory.
127 #define TG3_RX_STD_RING_SIZE(tp) \
128 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING 200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
135 #define TG3_RSS_INDIR_TBL_SIZE 128
137 /* Do not place this n-ring entries value into the tp struct itself,
138 * we really want to expose these constants to GCC so that modulo et
139 * al. operations are done with shifts and masks instead of with
140 * hw multiply/modulo instructions. Another solution would be to
141 * replace things like '% foo' with '& (foo - 1)'.
144 #define TG3_TX_RING_SIZE 512
145 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
147 #define TG3_RX_STD_RING_BYTES(tp) \
148 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
154 TG3_TX_RING_SIZE)
155 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157 #define TG3_DMA_BYTE_ENAB 64
159 #define TG3_RX_STD_DMA_SZ 1536
160 #define TG3_RX_JMB_DMA_SZ 9046
162 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
164 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174 * that are at least dword aligned when used in PCIX mode. The driver
175 * works around this bug by double copying the packet. This workaround
176 * is built into the normal double copy length check for efficiency.
178 * However, the double copy is only necessary on those architectures
179 * where unaligned memory accesses are inefficient. For those architectures
180 * where unaligned memory accesses incur little penalty, we can reintegrate
181 * the 5701 in the normal rx path. Doing so saves a device structure
182 * dereference by hardcoding the double copy threshold in place.
184 #define TG3_RX_COPY_THRESHOLD 256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
187 #else
188 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189 #endif
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
194 #define TG3_RAW_IP_ALIGN 2
196 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
198 #define FIRMWARE_TG3 "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
202 static char version[] __devinitdata =
203 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
213 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
304 static const struct {
305 const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307 { "rx_octets" },
308 { "rx_fragments" },
309 { "rx_ucast_packets" },
310 { "rx_mcast_packets" },
311 { "rx_bcast_packets" },
312 { "rx_fcs_errors" },
313 { "rx_align_errors" },
314 { "rx_xon_pause_rcvd" },
315 { "rx_xoff_pause_rcvd" },
316 { "rx_mac_ctrl_rcvd" },
317 { "rx_xoff_entered" },
318 { "rx_frame_too_long_errors" },
319 { "rx_jabbers" },
320 { "rx_undersize_packets" },
321 { "rx_in_length_errors" },
322 { "rx_out_length_errors" },
323 { "rx_64_or_less_octet_packets" },
324 { "rx_65_to_127_octet_packets" },
325 { "rx_128_to_255_octet_packets" },
326 { "rx_256_to_511_octet_packets" },
327 { "rx_512_to_1023_octet_packets" },
328 { "rx_1024_to_1522_octet_packets" },
329 { "rx_1523_to_2047_octet_packets" },
330 { "rx_2048_to_4095_octet_packets" },
331 { "rx_4096_to_8191_octet_packets" },
332 { "rx_8192_to_9022_octet_packets" },
334 { "tx_octets" },
335 { "tx_collisions" },
337 { "tx_xon_sent" },
338 { "tx_xoff_sent" },
339 { "tx_flow_control" },
340 { "tx_mac_errors" },
341 { "tx_single_collisions" },
342 { "tx_mult_collisions" },
343 { "tx_deferred" },
344 { "tx_excessive_collisions" },
345 { "tx_late_collisions" },
346 { "tx_collide_2times" },
347 { "tx_collide_3times" },
348 { "tx_collide_4times" },
349 { "tx_collide_5times" },
350 { "tx_collide_6times" },
351 { "tx_collide_7times" },
352 { "tx_collide_8times" },
353 { "tx_collide_9times" },
354 { "tx_collide_10times" },
355 { "tx_collide_11times" },
356 { "tx_collide_12times" },
357 { "tx_collide_13times" },
358 { "tx_collide_14times" },
359 { "tx_collide_15times" },
360 { "tx_ucast_packets" },
361 { "tx_mcast_packets" },
362 { "tx_bcast_packets" },
363 { "tx_carrier_sense_errors" },
364 { "tx_discards" },
365 { "tx_errors" },
367 { "dma_writeq_full" },
368 { "dma_write_prioq_full" },
369 { "rxbds_empty" },
370 { "rx_discards" },
371 { "rx_errors" },
372 { "rx_threshold_hit" },
374 { "dma_readq_full" },
375 { "dma_read_prioq_full" },
376 { "tx_comp_queue_full" },
378 { "ring_set_send_prod_index" },
379 { "ring_status_update" },
380 { "nic_irqs" },
381 { "nic_avoided_irqs" },
382 { "nic_tx_threshold_hit" },
384 { "mbuf_lwm_thresh_hit" },
387 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
390 static const struct {
391 const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393 { "nvram test (online) " },
394 { "link test (online) " },
395 { "register test (offline)" },
396 { "memory test (offline)" },
397 { "loopback test (offline)" },
398 { "interrupt test (offline)" },
401 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
406 writel(val, tp->regs + off);
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
411 return readl(tp->regs + off);
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
416 writel(val, tp->aperegs + off);
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
421 return readl(tp->aperegs + off);
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
426 unsigned long flags;
428 spin_lock_irqsave(&tp->indirect_lock, flags);
429 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431 spin_unlock_irqrestore(&tp->indirect_lock, flags);
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
436 writel(val, tp->regs + off);
437 readl(tp->regs + off);
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
442 unsigned long flags;
443 u32 val;
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 return val;
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
454 unsigned long flags;
456 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
461 if (off == TG3_RX_STD_PROD_IDX_REG) {
462 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463 TG3_64BIT_REG_LOW, val);
464 return;
467 spin_lock_irqsave(&tp->indirect_lock, flags);
468 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470 spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 /* In indirect mode when disabling interrupts, we also need
473 * to clear the interrupt bit in the GRC local ctrl register.
475 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476 (val == 0x1)) {
477 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
484 unsigned long flags;
485 u32 val;
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 return val;
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495 * where it is unsafe to read back the register without some delay.
496 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
501 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502 /* Non-posted methods */
503 tp->write32(tp, off, val);
504 else {
505 /* Posted method */
506 tg3_write32(tp, off, val);
507 if (usec_wait)
508 udelay(usec_wait);
509 tp->read32(tp, off);
511 /* Wait again after the read for the posted method to guarantee that
512 * the wait time is met.
514 if (usec_wait)
515 udelay(usec_wait);
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
520 tp->write32_mbox(tp, off, val);
521 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522 tp->read32_mbox(tp, off);
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
527 void __iomem *mbox = tp->regs + off;
528 writel(val, mbox);
529 if (tg3_flag(tp, TXD_MBOX_HWBUG))
530 writel(val, mbox);
531 if (tg3_flag(tp, MBOX_WRITE_REORDER))
532 readl(mbox);
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
537 return readl(tp->regs + off + GRCMBOX_BASE);
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
542 writel(val, tp->regs + off + GRCMBOX_BASE);
545 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
551 #define tw32(reg, val) tp->write32(tp, reg, val)
552 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg) tp->read32(tp, reg)
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
558 unsigned long flags;
560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562 return;
564 spin_lock_irqsave(&tp->indirect_lock, flags);
565 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
569 /* Always leave this as zero. */
570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571 } else {
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573 tw32_f(TG3PCI_MEM_WIN_DATA, val);
575 /* Always leave this as zero. */
576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
578 spin_unlock_irqrestore(&tp->indirect_lock, flags);
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
583 unsigned long flags;
585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587 *val = 0;
588 return;
591 spin_lock_irqsave(&tp->indirect_lock, flags);
592 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
596 /* Always leave this as zero. */
597 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598 } else {
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600 *val = tr32(TG3PCI_MEM_WIN_DATA);
602 /* Always leave this as zero. */
603 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
605 spin_unlock_irqrestore(&tp->indirect_lock, flags);
608 static void tg3_ape_lock_init(struct tg3 *tp)
610 int i;
611 u32 regbase, bit;
613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614 regbase = TG3_APE_LOCK_GRANT;
615 else
616 regbase = TG3_APE_PER_LOCK_GRANT;
618 /* Make sure the driver hasn't any stale locks. */
619 for (i = 0; i < 8; i++) {
620 if (i == TG3_APE_LOCK_GPIO)
621 continue;
622 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
625 /* Clear the correct bit of the GPIO lock too. */
626 if (!tp->pci_fn)
627 bit = APE_LOCK_GRANT_DRIVER;
628 else
629 bit = 1 << tp->pci_fn;
631 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
634 static int tg3_ape_lock(struct tg3 *tp, int locknum)
636 int i, off;
637 int ret = 0;
638 u32 status, req, gnt, bit;
640 if (!tg3_flag(tp, ENABLE_APE))
641 return 0;
643 switch (locknum) {
644 case TG3_APE_LOCK_GPIO:
645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646 return 0;
647 case TG3_APE_LOCK_GRC:
648 case TG3_APE_LOCK_MEM:
649 break;
650 default:
651 return -EINVAL;
654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655 req = TG3_APE_LOCK_REQ;
656 gnt = TG3_APE_LOCK_GRANT;
657 } else {
658 req = TG3_APE_PER_LOCK_REQ;
659 gnt = TG3_APE_PER_LOCK_GRANT;
662 off = 4 * locknum;
664 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665 bit = APE_LOCK_REQ_DRIVER;
666 else
667 bit = 1 << tp->pci_fn;
669 tg3_ape_write32(tp, req + off, bit);
671 /* Wait for up to 1 millisecond to acquire lock. */
672 for (i = 0; i < 100; i++) {
673 status = tg3_ape_read32(tp, gnt + off);
674 if (status == bit)
675 break;
676 udelay(10);
679 if (status != bit) {
680 /* Revoke the lock request. */
681 tg3_ape_write32(tp, gnt + off, bit);
682 ret = -EBUSY;
685 return ret;
688 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
690 u32 gnt, bit;
692 if (!tg3_flag(tp, ENABLE_APE))
693 return;
695 switch (locknum) {
696 case TG3_APE_LOCK_GPIO:
697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698 return;
699 case TG3_APE_LOCK_GRC:
700 case TG3_APE_LOCK_MEM:
701 break;
702 default:
703 return;
706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707 gnt = TG3_APE_LOCK_GRANT;
708 else
709 gnt = TG3_APE_PER_LOCK_GRANT;
711 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712 bit = APE_LOCK_GRANT_DRIVER;
713 else
714 bit = 1 << tp->pci_fn;
716 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
719 static void tg3_disable_ints(struct tg3 *tp)
721 int i;
723 tw32(TG3PCI_MISC_HOST_CTRL,
724 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
725 for (i = 0; i < tp->irq_max; i++)
726 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
729 static void tg3_enable_ints(struct tg3 *tp)
731 int i;
733 tp->irq_sync = 0;
734 wmb();
736 tw32(TG3PCI_MISC_HOST_CTRL,
737 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
739 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
740 for (i = 0; i < tp->irq_cnt; i++) {
741 struct tg3_napi *tnapi = &tp->napi[i];
743 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
744 if (tg3_flag(tp, 1SHOT_MSI))
745 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
747 tp->coal_now |= tnapi->coal_now;
750 /* Force an initial interrupt */
751 if (!tg3_flag(tp, TAGGED_STATUS) &&
752 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754 else
755 tw32(HOSTCC_MODE, tp->coal_now);
757 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
760 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
762 struct tg3 *tp = tnapi->tp;
763 struct tg3_hw_status *sblk = tnapi->hw_status;
764 unsigned int work_exists = 0;
766 /* check for phy events */
767 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
768 if (sblk->status & SD_STATUS_LINK_CHG)
769 work_exists = 1;
771 /* check for RX/TX work to do */
772 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
773 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
774 work_exists = 1;
776 return work_exists;
779 /* tg3_int_reenable
780 * similar to tg3_enable_ints, but it accurately determines whether there
781 * is new work pending and can return without flushing the PIO write
782 * which reenables interrupts
784 static void tg3_int_reenable(struct tg3_napi *tnapi)
786 struct tg3 *tp = tnapi->tp;
788 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
789 mmiowb();
791 /* When doing tagged status, this work check is unnecessary.
792 * The last_tag we write above tells the chip which piece of
793 * work we've completed.
795 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
796 tw32(HOSTCC_MODE, tp->coalesce_mode |
797 HOSTCC_MODE_ENABLE | tnapi->coal_now);
800 static void tg3_switch_clocks(struct tg3 *tp)
802 u32 clock_ctrl;
803 u32 orig_clock_ctrl;
805 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
806 return;
808 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
810 orig_clock_ctrl = clock_ctrl;
811 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812 CLOCK_CTRL_CLKRUN_OENABLE |
813 0x1f);
814 tp->pci_clock_ctrl = clock_ctrl;
816 if (tg3_flag(tp, 5705_PLUS)) {
817 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
818 tw32_wait_f(TG3PCI_CLOCK_CTRL,
819 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
821 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
822 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823 clock_ctrl |
824 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825 40);
826 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827 clock_ctrl | (CLOCK_CTRL_ALTCLK),
828 40);
830 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
833 #define PHY_BUSY_LOOPS 5000
835 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
837 u32 frame_val;
838 unsigned int loops;
839 int ret;
841 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842 tw32_f(MAC_MI_MODE,
843 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844 udelay(80);
847 *val = 0x0;
849 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850 MI_COM_PHY_ADDR_MASK);
851 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852 MI_COM_REG_ADDR_MASK);
853 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
855 tw32_f(MAC_MI_COM, frame_val);
857 loops = PHY_BUSY_LOOPS;
858 while (loops != 0) {
859 udelay(10);
860 frame_val = tr32(MAC_MI_COM);
862 if ((frame_val & MI_COM_BUSY) == 0) {
863 udelay(5);
864 frame_val = tr32(MAC_MI_COM);
865 break;
867 loops -= 1;
870 ret = -EBUSY;
871 if (loops != 0) {
872 *val = frame_val & MI_COM_DATA_MASK;
873 ret = 0;
876 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877 tw32_f(MAC_MI_MODE, tp->mi_mode);
878 udelay(80);
881 return ret;
884 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
886 u32 frame_val;
887 unsigned int loops;
888 int ret;
890 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
891 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
892 return 0;
894 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895 tw32_f(MAC_MI_MODE,
896 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897 udelay(80);
900 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
901 MI_COM_PHY_ADDR_MASK);
902 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903 MI_COM_REG_ADDR_MASK);
904 frame_val |= (val & MI_COM_DATA_MASK);
905 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
907 tw32_f(MAC_MI_COM, frame_val);
909 loops = PHY_BUSY_LOOPS;
910 while (loops != 0) {
911 udelay(10);
912 frame_val = tr32(MAC_MI_COM);
913 if ((frame_val & MI_COM_BUSY) == 0) {
914 udelay(5);
915 frame_val = tr32(MAC_MI_COM);
916 break;
918 loops -= 1;
921 ret = -EBUSY;
922 if (loops != 0)
923 ret = 0;
925 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926 tw32_f(MAC_MI_MODE, tp->mi_mode);
927 udelay(80);
930 return ret;
933 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
935 int err;
937 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938 if (err)
939 goto done;
941 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942 if (err)
943 goto done;
945 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947 if (err)
948 goto done;
950 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
952 done:
953 return err;
956 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
958 int err;
960 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961 if (err)
962 goto done;
964 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965 if (err)
966 goto done;
968 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970 if (err)
971 goto done;
973 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
975 done:
976 return err;
979 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
981 int err;
983 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984 if (!err)
985 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
987 return err;
990 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
992 int err;
994 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995 if (!err)
996 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
998 return err;
1001 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1003 int err;
1005 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007 MII_TG3_AUXCTL_SHDWSEL_MISC);
1008 if (!err)
1009 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1011 return err;
1014 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1016 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017 set |= MII_TG3_AUXCTL_MISC_WREN;
1019 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025 MII_TG3_AUXCTL_ACTL_TX_6DB)
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029 MII_TG3_AUXCTL_ACTL_TX_6DB);
1031 static int tg3_bmcr_reset(struct tg3 *tp)
1033 u32 phy_control;
1034 int limit, err;
1036 /* OK, reset it, and poll the BMCR_RESET bit until it
1037 * clears or we time out.
1039 phy_control = BMCR_RESET;
1040 err = tg3_writephy(tp, MII_BMCR, phy_control);
1041 if (err != 0)
1042 return -EBUSY;
1044 limit = 5000;
1045 while (limit--) {
1046 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047 if (err != 0)
1048 return -EBUSY;
1050 if ((phy_control & BMCR_RESET) == 0) {
1051 udelay(40);
1052 break;
1054 udelay(10);
1056 if (limit < 0)
1057 return -EBUSY;
1059 return 0;
1062 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1064 struct tg3 *tp = bp->priv;
1065 u32 val;
1067 spin_lock_bh(&tp->lock);
1069 if (tg3_readphy(tp, reg, &val))
1070 val = -EIO;
1072 spin_unlock_bh(&tp->lock);
1074 return val;
1077 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1079 struct tg3 *tp = bp->priv;
1080 u32 ret = 0;
1082 spin_lock_bh(&tp->lock);
1084 if (tg3_writephy(tp, reg, val))
1085 ret = -EIO;
1087 spin_unlock_bh(&tp->lock);
1089 return ret;
1092 static int tg3_mdio_reset(struct mii_bus *bp)
1094 return 0;
1097 static void tg3_mdio_config_5785(struct tg3 *tp)
1099 u32 val;
1100 struct phy_device *phydev;
1102 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104 case PHY_ID_BCM50610:
1105 case PHY_ID_BCM50610M:
1106 val = MAC_PHYCFG2_50610_LED_MODES;
1107 break;
1108 case PHY_ID_BCMAC131:
1109 val = MAC_PHYCFG2_AC131_LED_MODES;
1110 break;
1111 case PHY_ID_RTL8211C:
1112 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113 break;
1114 case PHY_ID_RTL8201E:
1115 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116 break;
1117 default:
1118 return;
1121 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122 tw32(MAC_PHYCFG2, val);
1124 val = tr32(MAC_PHYCFG1);
1125 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1128 tw32(MAC_PHYCFG1, val);
1130 return;
1133 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1134 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135 MAC_PHYCFG2_FMODE_MASK_MASK |
1136 MAC_PHYCFG2_GMODE_MASK_MASK |
1137 MAC_PHYCFG2_ACT_MASK_MASK |
1138 MAC_PHYCFG2_QUAL_MASK_MASK |
1139 MAC_PHYCFG2_INBAND_ENABLE;
1141 tw32(MAC_PHYCFG2, val);
1143 val = tr32(MAC_PHYCFG1);
1144 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1146 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1148 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1149 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1150 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1152 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154 tw32(MAC_PHYCFG1, val);
1156 val = tr32(MAC_EXT_RGMII_MODE);
1157 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158 MAC_RGMII_MODE_RX_QUALITY |
1159 MAC_RGMII_MODE_RX_ACTIVITY |
1160 MAC_RGMII_MODE_RX_ENG_DET |
1161 MAC_RGMII_MODE_TX_ENABLE |
1162 MAC_RGMII_MODE_TX_LOWPWR |
1163 MAC_RGMII_MODE_TX_RESET);
1164 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1166 val |= MAC_RGMII_MODE_RX_INT_B |
1167 MAC_RGMII_MODE_RX_QUALITY |
1168 MAC_RGMII_MODE_RX_ACTIVITY |
1169 MAC_RGMII_MODE_RX_ENG_DET;
1170 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1171 val |= MAC_RGMII_MODE_TX_ENABLE |
1172 MAC_RGMII_MODE_TX_LOWPWR |
1173 MAC_RGMII_MODE_TX_RESET;
1175 tw32(MAC_EXT_RGMII_MODE, val);
1178 static void tg3_mdio_start(struct tg3 *tp)
1180 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181 tw32_f(MAC_MI_MODE, tp->mi_mode);
1182 udelay(80);
1184 if (tg3_flag(tp, MDIOBUS_INITED) &&
1185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186 tg3_mdio_config_5785(tp);
1189 static int tg3_mdio_init(struct tg3 *tp)
1191 int i;
1192 u32 reg;
1193 struct phy_device *phydev;
1195 if (tg3_flag(tp, 5717_PLUS)) {
1196 u32 is_serdes;
1198 tp->phy_addr = tp->pci_fn + 1;
1200 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202 else
1203 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204 TG3_CPMU_PHY_STRAP_IS_SERDES;
1205 if (is_serdes)
1206 tp->phy_addr += 7;
1207 } else
1208 tp->phy_addr = TG3_PHY_MII_ADDR;
1210 tg3_mdio_start(tp);
1212 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1213 return 0;
1215 tp->mdio_bus = mdiobus_alloc();
1216 if (tp->mdio_bus == NULL)
1217 return -ENOMEM;
1219 tp->mdio_bus->name = "tg3 mdio bus";
1220 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1221 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1222 tp->mdio_bus->priv = tp;
1223 tp->mdio_bus->parent = &tp->pdev->dev;
1224 tp->mdio_bus->read = &tg3_mdio_read;
1225 tp->mdio_bus->write = &tg3_mdio_write;
1226 tp->mdio_bus->reset = &tg3_mdio_reset;
1227 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1228 tp->mdio_bus->irq = &tp->mdio_irq[0];
1230 for (i = 0; i < PHY_MAX_ADDR; i++)
1231 tp->mdio_bus->irq[i] = PHY_POLL;
1233 /* The bus registration will look for all the PHYs on the mdio bus.
1234 * Unfortunately, it does not ensure the PHY is powered up before
1235 * accessing the PHY ID registers. A chip reset is the
1236 * quickest way to bring the device back to an operational state..
1238 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239 tg3_bmcr_reset(tp);
1241 i = mdiobus_register(tp->mdio_bus);
1242 if (i) {
1243 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1244 mdiobus_free(tp->mdio_bus);
1245 return i;
1248 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1250 if (!phydev || !phydev->drv) {
1251 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1252 mdiobus_unregister(tp->mdio_bus);
1253 mdiobus_free(tp->mdio_bus);
1254 return -ENODEV;
1257 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1258 case PHY_ID_BCM57780:
1259 phydev->interface = PHY_INTERFACE_MODE_GMII;
1260 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1261 break;
1262 case PHY_ID_BCM50610:
1263 case PHY_ID_BCM50610M:
1264 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1265 PHY_BRCM_RX_REFCLK_UNUSED |
1266 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1267 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1268 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1269 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1270 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1271 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1272 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1273 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1274 /* fallthru */
1275 case PHY_ID_RTL8211C:
1276 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1277 break;
1278 case PHY_ID_RTL8201E:
1279 case PHY_ID_BCMAC131:
1280 phydev->interface = PHY_INTERFACE_MODE_MII;
1281 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1282 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1283 break;
1286 tg3_flag_set(tp, MDIOBUS_INITED);
1288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289 tg3_mdio_config_5785(tp);
1291 return 0;
1294 static void tg3_mdio_fini(struct tg3 *tp)
1296 if (tg3_flag(tp, MDIOBUS_INITED)) {
1297 tg3_flag_clear(tp, MDIOBUS_INITED);
1298 mdiobus_unregister(tp->mdio_bus);
1299 mdiobus_free(tp->mdio_bus);
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3 *tp)
1306 u32 val;
1308 val = tr32(GRC_RX_CPU_EVENT);
1309 val |= GRC_RX_CPU_DRIVER_EVENT;
1310 tw32_f(GRC_RX_CPU_EVENT, val);
1312 tp->last_event_jiffies = jiffies;
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3 *tp)
1320 int i;
1321 unsigned int delay_cnt;
1322 long time_remain;
1324 /* If enough time has passed, no wait is necessary. */
1325 time_remain = (long)(tp->last_event_jiffies + 1 +
1326 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327 (long)jiffies;
1328 if (time_remain < 0)
1329 return;
1331 /* Check if we can shorten the wait time. */
1332 delay_cnt = jiffies_to_usecs(time_remain);
1333 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335 delay_cnt = (delay_cnt >> 3) + 1;
1337 for (i = 0; i < delay_cnt; i++) {
1338 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339 break;
1340 udelay(8);
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3 *tp)
1347 u32 reg;
1348 u32 val;
1350 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1351 return;
1353 tg3_wait_for_event_ack(tp);
1355 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1357 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1359 val = 0;
1360 if (!tg3_readphy(tp, MII_BMCR, &reg))
1361 val = reg << 16;
1362 if (!tg3_readphy(tp, MII_BMSR, &reg))
1363 val |= (reg & 0xffff);
1364 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1366 val = 0;
1367 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368 val = reg << 16;
1369 if (!tg3_readphy(tp, MII_LPA, &reg))
1370 val |= (reg & 0xffff);
1371 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1373 val = 0;
1374 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1375 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376 val = reg << 16;
1377 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378 val |= (reg & 0xffff);
1380 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1382 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383 val = reg << 16;
1384 else
1385 val = 0;
1386 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1388 tg3_generate_fw_event(tp);
1391 static void tg3_link_report(struct tg3 *tp)
1393 if (!netif_carrier_ok(tp->dev)) {
1394 netif_info(tp, link, tp->dev, "Link is down\n");
1395 tg3_ump_link_report(tp);
1396 } else if (netif_msg_link(tp)) {
1397 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398 (tp->link_config.active_speed == SPEED_1000 ?
1399 1000 :
1400 (tp->link_config.active_speed == SPEED_100 ?
1401 100 : 10)),
1402 (tp->link_config.active_duplex == DUPLEX_FULL ?
1403 "full" : "half"));
1405 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407 "on" : "off",
1408 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409 "on" : "off");
1411 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412 netdev_info(tp->dev, "EEE is %s\n",
1413 tp->setlpicnt ? "enabled" : "disabled");
1415 tg3_ump_link_report(tp);
1419 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1421 u16 miireg;
1423 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1424 miireg = ADVERTISE_PAUSE_CAP;
1425 else if (flow_ctrl & FLOW_CTRL_TX)
1426 miireg = ADVERTISE_PAUSE_ASYM;
1427 else if (flow_ctrl & FLOW_CTRL_RX)
1428 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429 else
1430 miireg = 0;
1432 return miireg;
1435 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1437 u16 miireg;
1439 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1440 miireg = ADVERTISE_1000XPAUSE;
1441 else if (flow_ctrl & FLOW_CTRL_TX)
1442 miireg = ADVERTISE_1000XPSE_ASYM;
1443 else if (flow_ctrl & FLOW_CTRL_RX)
1444 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445 else
1446 miireg = 0;
1448 return miireg;
1451 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1453 u8 cap = 0;
1455 if (lcladv & ADVERTISE_1000XPAUSE) {
1456 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457 if (rmtadv & LPA_1000XPAUSE)
1458 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1459 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1460 cap = FLOW_CTRL_RX;
1461 } else {
1462 if (rmtadv & LPA_1000XPAUSE)
1463 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1465 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1467 cap = FLOW_CTRL_TX;
1470 return cap;
1473 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1475 u8 autoneg;
1476 u8 flowctrl = 0;
1477 u32 old_rx_mode = tp->rx_mode;
1478 u32 old_tx_mode = tp->tx_mode;
1480 if (tg3_flag(tp, USE_PHYLIB))
1481 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1482 else
1483 autoneg = tp->link_config.autoneg;
1485 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1486 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1487 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1488 else
1489 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1490 } else
1491 flowctrl = tp->link_config.flowctrl;
1493 tp->link_config.active_flowctrl = flowctrl;
1495 if (flowctrl & FLOW_CTRL_RX)
1496 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497 else
1498 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1500 if (old_rx_mode != tp->rx_mode)
1501 tw32_f(MAC_RX_MODE, tp->rx_mode);
1503 if (flowctrl & FLOW_CTRL_TX)
1504 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505 else
1506 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1508 if (old_tx_mode != tp->tx_mode)
1509 tw32_f(MAC_TX_MODE, tp->tx_mode);
1512 static void tg3_adjust_link(struct net_device *dev)
1514 u8 oldflowctrl, linkmesg = 0;
1515 u32 mac_mode, lcl_adv, rmt_adv;
1516 struct tg3 *tp = netdev_priv(dev);
1517 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1519 spin_lock_bh(&tp->lock);
1521 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522 MAC_MODE_HALF_DUPLEX);
1524 oldflowctrl = tp->link_config.active_flowctrl;
1526 if (phydev->link) {
1527 lcl_adv = 0;
1528 rmt_adv = 0;
1530 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531 mac_mode |= MAC_MODE_PORT_MODE_MII;
1532 else if (phydev->speed == SPEED_1000 ||
1533 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1534 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1535 else
1536 mac_mode |= MAC_MODE_PORT_MODE_MII;
1538 if (phydev->duplex == DUPLEX_HALF)
1539 mac_mode |= MAC_MODE_HALF_DUPLEX;
1540 else {
1541 lcl_adv = tg3_advert_flowctrl_1000T(
1542 tp->link_config.flowctrl);
1544 if (phydev->pause)
1545 rmt_adv = LPA_PAUSE_CAP;
1546 if (phydev->asym_pause)
1547 rmt_adv |= LPA_PAUSE_ASYM;
1550 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551 } else
1552 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1554 if (mac_mode != tp->mac_mode) {
1555 tp->mac_mode = mac_mode;
1556 tw32_f(MAC_MODE, tp->mac_mode);
1557 udelay(40);
1560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561 if (phydev->speed == SPEED_10)
1562 tw32(MAC_MI_STAT,
1563 MAC_MI_STAT_10MBPS_MODE |
1564 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565 else
1566 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1569 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570 tw32(MAC_TX_LENGTHS,
1571 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572 (6 << TX_LENGTHS_IPG_SHIFT) |
1573 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574 else
1575 tw32(MAC_TX_LENGTHS,
1576 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577 (6 << TX_LENGTHS_IPG_SHIFT) |
1578 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1580 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582 phydev->speed != tp->link_config.active_speed ||
1583 phydev->duplex != tp->link_config.active_duplex ||
1584 oldflowctrl != tp->link_config.active_flowctrl)
1585 linkmesg = 1;
1587 tp->link_config.active_speed = phydev->speed;
1588 tp->link_config.active_duplex = phydev->duplex;
1590 spin_unlock_bh(&tp->lock);
1592 if (linkmesg)
1593 tg3_link_report(tp);
1596 static int tg3_phy_init(struct tg3 *tp)
1598 struct phy_device *phydev;
1600 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1601 return 0;
1603 /* Bring the PHY back to a known state. */
1604 tg3_bmcr_reset(tp);
1606 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1608 /* Attach the MAC to the PHY. */
1609 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1610 phydev->dev_flags, phydev->interface);
1611 if (IS_ERR(phydev)) {
1612 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1613 return PTR_ERR(phydev);
1616 /* Mask with MAC supported features. */
1617 switch (phydev->interface) {
1618 case PHY_INTERFACE_MODE_GMII:
1619 case PHY_INTERFACE_MODE_RGMII:
1620 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1621 phydev->supported &= (PHY_GBIT_FEATURES |
1622 SUPPORTED_Pause |
1623 SUPPORTED_Asym_Pause);
1624 break;
1626 /* fallthru */
1627 case PHY_INTERFACE_MODE_MII:
1628 phydev->supported &= (PHY_BASIC_FEATURES |
1629 SUPPORTED_Pause |
1630 SUPPORTED_Asym_Pause);
1631 break;
1632 default:
1633 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1634 return -EINVAL;
1637 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1639 phydev->advertising = phydev->supported;
1641 return 0;
1644 static void tg3_phy_start(struct tg3 *tp)
1646 struct phy_device *phydev;
1648 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1649 return;
1651 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1653 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1655 phydev->speed = tp->link_config.orig_speed;
1656 phydev->duplex = tp->link_config.orig_duplex;
1657 phydev->autoneg = tp->link_config.orig_autoneg;
1658 phydev->advertising = tp->link_config.orig_advertising;
1661 phy_start(phydev);
1663 phy_start_aneg(phydev);
1666 static void tg3_phy_stop(struct tg3 *tp)
1668 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1669 return;
1671 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1674 static void tg3_phy_fini(struct tg3 *tp)
1676 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1677 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1678 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1682 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1684 u32 phytest;
1686 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687 u32 phy;
1689 tg3_writephy(tp, MII_TG3_FET_TEST,
1690 phytest | MII_TG3_FET_SHADOW_EN);
1691 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692 if (enable)
1693 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694 else
1695 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1698 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1702 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1704 u32 reg;
1706 if (!tg3_flag(tp, 5705_PLUS) ||
1707 (tg3_flag(tp, 5717_PLUS) &&
1708 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1709 return;
1711 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1712 tg3_phy_fet_toggle_apd(tp, enable);
1713 return;
1716 reg = MII_TG3_MISC_SHDW_WREN |
1717 MII_TG3_MISC_SHDW_SCR5_SEL |
1718 MII_TG3_MISC_SHDW_SCR5_LPED |
1719 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720 MII_TG3_MISC_SHDW_SCR5_SDTL |
1721 MII_TG3_MISC_SHDW_SCR5_C125OE;
1722 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1725 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1728 reg = MII_TG3_MISC_SHDW_WREN |
1729 MII_TG3_MISC_SHDW_APD_SEL |
1730 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731 if (enable)
1732 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1734 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1737 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1739 u32 phy;
1741 if (!tg3_flag(tp, 5705_PLUS) ||
1742 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1743 return;
1745 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1746 u32 ephy;
1748 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1751 tg3_writephy(tp, MII_TG3_FET_TEST,
1752 ephy | MII_TG3_FET_SHADOW_EN);
1753 if (!tg3_readphy(tp, reg, &phy)) {
1754 if (enable)
1755 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1756 else
1757 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758 tg3_writephy(tp, reg, phy);
1760 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1762 } else {
1763 int ret;
1765 ret = tg3_phy_auxctl_read(tp,
1766 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767 if (!ret) {
1768 if (enable)
1769 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770 else
1771 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1772 tg3_phy_auxctl_write(tp,
1773 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1778 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1780 int ret;
1781 u32 val;
1783 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1784 return;
1786 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787 if (!ret)
1788 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1792 static void tg3_phy_apply_otp(struct tg3 *tp)
1794 u32 otp, phy;
1796 if (!tp->phy_otp)
1797 return;
1799 otp = tp->phy_otp;
1801 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802 return;
1804 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1808 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1812 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1816 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1819 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1822 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1826 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1829 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1831 u32 val;
1833 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834 return;
1836 tp->setlpicnt = 0;
1838 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839 current_link_up == 1 &&
1840 tp->link_config.active_duplex == DUPLEX_FULL &&
1841 (tp->link_config.active_speed == SPEED_100 ||
1842 tp->link_config.active_speed == SPEED_1000)) {
1843 u32 eeectl;
1845 if (tp->link_config.active_speed == SPEED_1000)
1846 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847 else
1848 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1850 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1852 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853 TG3_CL45_D7_EEERES_STAT, &val);
1855 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1857 tp->setlpicnt = 2;
1860 if (!tp->setlpicnt) {
1861 if (current_link_up == 1 &&
1862 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1863 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1864 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1867 val = tr32(TG3_CPMU_EEE_MODE);
1868 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1872 static void tg3_phy_eee_enable(struct tg3 *tp)
1874 u32 val;
1876 if (tp->link_config.active_speed == SPEED_1000 &&
1877 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1880 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1881 val = MII_TG3_DSP_TAP26_ALNOKO |
1882 MII_TG3_DSP_TAP26_RMRXSTO;
1883 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1884 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1887 val = tr32(TG3_CPMU_EEE_MODE);
1888 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1891 static int tg3_wait_macro_done(struct tg3 *tp)
1893 int limit = 100;
1895 while (limit--) {
1896 u32 tmp32;
1898 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1899 if ((tmp32 & 0x1000) == 0)
1900 break;
1903 if (limit < 0)
1904 return -EBUSY;
1906 return 0;
1909 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1911 static const u32 test_pat[4][6] = {
1912 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1913 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1914 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1915 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1917 int chan;
1919 for (chan = 0; chan < 4; chan++) {
1920 int i;
1922 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1923 (chan * 0x2000) | 0x0200);
1924 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1926 for (i = 0; i < 6; i++)
1927 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1928 test_pat[chan][i]);
1930 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1931 if (tg3_wait_macro_done(tp)) {
1932 *resetp = 1;
1933 return -EBUSY;
1936 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1937 (chan * 0x2000) | 0x0200);
1938 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1939 if (tg3_wait_macro_done(tp)) {
1940 *resetp = 1;
1941 return -EBUSY;
1944 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1945 if (tg3_wait_macro_done(tp)) {
1946 *resetp = 1;
1947 return -EBUSY;
1950 for (i = 0; i < 6; i += 2) {
1951 u32 low, high;
1953 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1954 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1955 tg3_wait_macro_done(tp)) {
1956 *resetp = 1;
1957 return -EBUSY;
1959 low &= 0x7fff;
1960 high &= 0x000f;
1961 if (low != test_pat[chan][i] ||
1962 high != test_pat[chan][i+1]) {
1963 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1964 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1965 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1967 return -EBUSY;
1972 return 0;
1975 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1977 int chan;
1979 for (chan = 0; chan < 4; chan++) {
1980 int i;
1982 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1983 (chan * 0x2000) | 0x0200);
1984 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1985 for (i = 0; i < 6; i++)
1986 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1987 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1988 if (tg3_wait_macro_done(tp))
1989 return -EBUSY;
1992 return 0;
1995 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1997 u32 reg32, phy9_orig;
1998 int retries, do_phy_reset, err;
2000 retries = 10;
2001 do_phy_reset = 1;
2002 do {
2003 if (do_phy_reset) {
2004 err = tg3_bmcr_reset(tp);
2005 if (err)
2006 return err;
2007 do_phy_reset = 0;
2010 /* Disable transmitter and interrupt. */
2011 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2012 continue;
2014 reg32 |= 0x3000;
2015 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2017 /* Set full-duplex, 1000 mbps. */
2018 tg3_writephy(tp, MII_BMCR,
2019 BMCR_FULLDPLX | BMCR_SPEED1000);
2021 /* Set to master mode. */
2022 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2023 continue;
2025 tg3_writephy(tp, MII_CTRL1000,
2026 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2028 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2029 if (err)
2030 return err;
2032 /* Block the PHY control access. */
2033 tg3_phydsp_write(tp, 0x8005, 0x0800);
2035 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2036 if (!err)
2037 break;
2038 } while (--retries);
2040 err = tg3_phy_reset_chanpat(tp);
2041 if (err)
2042 return err;
2044 tg3_phydsp_write(tp, 0x8005, 0x0000);
2046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2047 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2049 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2051 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2053 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2054 reg32 &= ~0x3000;
2055 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2056 } else if (!err)
2057 err = -EBUSY;
2059 return err;
2062 /* This will reset the tigon3 PHY if there is no valid
2063 * link unless the FORCE argument is non-zero.
2065 static int tg3_phy_reset(struct tg3 *tp)
2067 u32 val, cpmuctrl;
2068 int err;
2070 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2071 val = tr32(GRC_MISC_CFG);
2072 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2073 udelay(40);
2075 err = tg3_readphy(tp, MII_BMSR, &val);
2076 err |= tg3_readphy(tp, MII_BMSR, &val);
2077 if (err != 0)
2078 return -EBUSY;
2080 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2081 netif_carrier_off(tp->dev);
2082 tg3_link_report(tp);
2085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2086 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2087 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2088 err = tg3_phy_reset_5703_4_5(tp);
2089 if (err)
2090 return err;
2091 goto out;
2094 cpmuctrl = 0;
2095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2096 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2097 cpmuctrl = tr32(TG3_CPMU_CTRL);
2098 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2099 tw32(TG3_CPMU_CTRL,
2100 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2103 err = tg3_bmcr_reset(tp);
2104 if (err)
2105 return err;
2107 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2108 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2109 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2111 tw32(TG3_CPMU_CTRL, cpmuctrl);
2114 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2115 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2116 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2117 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2118 CPMU_LSPD_1000MB_MACCLK_12_5) {
2119 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2120 udelay(40);
2121 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2125 if (tg3_flag(tp, 5717_PLUS) &&
2126 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2127 return 0;
2129 tg3_phy_apply_otp(tp);
2131 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2132 tg3_phy_toggle_apd(tp, true);
2133 else
2134 tg3_phy_toggle_apd(tp, false);
2136 out:
2137 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2138 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2139 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2140 tg3_phydsp_write(tp, 0x000a, 0x0323);
2141 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2144 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2145 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2146 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2149 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2150 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2151 tg3_phydsp_write(tp, 0x000a, 0x310b);
2152 tg3_phydsp_write(tp, 0x201f, 0x9506);
2153 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2154 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2156 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2157 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2159 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2160 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2161 tg3_writephy(tp, MII_TG3_TEST1,
2162 MII_TG3_TEST1_TRIM_EN | 0x4);
2163 } else
2164 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2166 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2170 /* Set Extended packet length bit (bit 14) on all chips that */
2171 /* support jumbo frames */
2172 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2173 /* Cannot do read-modify-write on 5401 */
2174 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2175 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2176 /* Set bit 14 with read-modify-write to preserve other bits */
2177 err = tg3_phy_auxctl_read(tp,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2179 if (!err)
2180 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2184 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2185 * jumbo frames transmission.
2187 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2188 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2189 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2190 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2193 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2194 /* adjust output voltage */
2195 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2198 tg3_phy_toggle_automdix(tp, 1);
2199 tg3_phy_set_wirespeed(tp);
2200 return 0;
2203 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2204 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2205 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2206 TG3_GPIO_MSG_NEED_VAUX)
2207 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2208 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2209 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2210 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2211 (TG3_GPIO_MSG_DRVR_PRES << 12))
2213 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2214 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2215 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2216 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2217 (TG3_GPIO_MSG_NEED_VAUX << 12))
2219 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2221 u32 status, shift;
2223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2224 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2225 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2226 else
2227 status = tr32(TG3_CPMU_DRV_STATUS);
2229 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2230 status &= ~(TG3_GPIO_MSG_MASK << shift);
2231 status |= (newstat << shift);
2233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2234 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2235 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2236 else
2237 tw32(TG3_CPMU_DRV_STATUS, status);
2239 return status >> TG3_APE_GPIO_MSG_SHIFT;
2242 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2244 if (!tg3_flag(tp, IS_NIC))
2245 return 0;
2247 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2248 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2249 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2250 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2251 return -EIO;
2253 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2255 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2256 TG3_GRC_LCLCTL_PWRSW_DELAY);
2258 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2259 } else {
2260 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2261 TG3_GRC_LCLCTL_PWRSW_DELAY);
2264 return 0;
2267 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2269 u32 grc_local_ctrl;
2271 if (!tg3_flag(tp, IS_NIC) ||
2272 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2273 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2274 return;
2276 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2278 tw32_wait_f(GRC_LOCAL_CTRL,
2279 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280 TG3_GRC_LCLCTL_PWRSW_DELAY);
2282 tw32_wait_f(GRC_LOCAL_CTRL,
2283 grc_local_ctrl,
2284 TG3_GRC_LCLCTL_PWRSW_DELAY);
2286 tw32_wait_f(GRC_LOCAL_CTRL,
2287 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2288 TG3_GRC_LCLCTL_PWRSW_DELAY);
2291 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2293 if (!tg3_flag(tp, IS_NIC))
2294 return;
2296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2298 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2299 (GRC_LCLCTRL_GPIO_OE0 |
2300 GRC_LCLCTRL_GPIO_OE1 |
2301 GRC_LCLCTRL_GPIO_OE2 |
2302 GRC_LCLCTRL_GPIO_OUTPUT0 |
2303 GRC_LCLCTRL_GPIO_OUTPUT1),
2304 TG3_GRC_LCLCTL_PWRSW_DELAY);
2305 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2306 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2307 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2308 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2309 GRC_LCLCTRL_GPIO_OE1 |
2310 GRC_LCLCTRL_GPIO_OE2 |
2311 GRC_LCLCTRL_GPIO_OUTPUT0 |
2312 GRC_LCLCTRL_GPIO_OUTPUT1 |
2313 tp->grc_local_ctrl;
2314 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315 TG3_GRC_LCLCTL_PWRSW_DELAY);
2317 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2318 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2319 TG3_GRC_LCLCTL_PWRSW_DELAY);
2321 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2322 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2323 TG3_GRC_LCLCTL_PWRSW_DELAY);
2324 } else {
2325 u32 no_gpio2;
2326 u32 grc_local_ctrl = 0;
2328 /* Workaround to prevent overdrawing Amps. */
2329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2330 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2331 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2332 grc_local_ctrl,
2333 TG3_GRC_LCLCTL_PWRSW_DELAY);
2336 /* On 5753 and variants, GPIO2 cannot be used. */
2337 no_gpio2 = tp->nic_sram_data_cfg &
2338 NIC_SRAM_DATA_CFG_NO_GPIO2;
2340 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2341 GRC_LCLCTRL_GPIO_OE1 |
2342 GRC_LCLCTRL_GPIO_OE2 |
2343 GRC_LCLCTRL_GPIO_OUTPUT1 |
2344 GRC_LCLCTRL_GPIO_OUTPUT2;
2345 if (no_gpio2) {
2346 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2347 GRC_LCLCTRL_GPIO_OUTPUT2);
2349 tw32_wait_f(GRC_LOCAL_CTRL,
2350 tp->grc_local_ctrl | grc_local_ctrl,
2351 TG3_GRC_LCLCTL_PWRSW_DELAY);
2353 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2355 tw32_wait_f(GRC_LOCAL_CTRL,
2356 tp->grc_local_ctrl | grc_local_ctrl,
2357 TG3_GRC_LCLCTL_PWRSW_DELAY);
2359 if (!no_gpio2) {
2360 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2361 tw32_wait_f(GRC_LOCAL_CTRL,
2362 tp->grc_local_ctrl | grc_local_ctrl,
2363 TG3_GRC_LCLCTL_PWRSW_DELAY);
2368 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2370 u32 msg = 0;
2372 /* Serialize power state transitions */
2373 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2374 return;
2376 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2377 msg = TG3_GPIO_MSG_NEED_VAUX;
2379 msg = tg3_set_function_status(tp, msg);
2381 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2382 goto done;
2384 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2385 tg3_pwrsrc_switch_to_vaux(tp);
2386 else
2387 tg3_pwrsrc_die_with_vmain(tp);
2389 done:
2390 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2393 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2395 bool need_vaux = false;
2397 /* The GPIOs do something completely different on 57765. */
2398 if (!tg3_flag(tp, IS_NIC) ||
2399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2400 return;
2402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2405 tg3_frob_aux_power_5717(tp, include_wol ?
2406 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2407 return;
2410 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2411 struct net_device *dev_peer;
2413 dev_peer = pci_get_drvdata(tp->pdev_peer);
2415 /* remove_one() may have been run on the peer. */
2416 if (dev_peer) {
2417 struct tg3 *tp_peer = netdev_priv(dev_peer);
2419 if (tg3_flag(tp_peer, INIT_COMPLETE))
2420 return;
2422 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2423 tg3_flag(tp_peer, ENABLE_ASF))
2424 need_vaux = true;
2428 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2429 tg3_flag(tp, ENABLE_ASF))
2430 need_vaux = true;
2432 if (need_vaux)
2433 tg3_pwrsrc_switch_to_vaux(tp);
2434 else
2435 tg3_pwrsrc_die_with_vmain(tp);
2438 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2440 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2441 return 1;
2442 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2443 if (speed != SPEED_10)
2444 return 1;
2445 } else if (speed == SPEED_10)
2446 return 1;
2448 return 0;
2451 static int tg3_setup_phy(struct tg3 *, int);
2453 #define RESET_KIND_SHUTDOWN 0
2454 #define RESET_KIND_INIT 1
2455 #define RESET_KIND_SUSPEND 2
2457 static void tg3_write_sig_post_reset(struct tg3 *, int);
2458 static int tg3_halt_cpu(struct tg3 *, u32);
2460 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2462 u32 val;
2464 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2466 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2467 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2469 sg_dig_ctrl |=
2470 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2471 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2472 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2474 return;
2477 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478 tg3_bmcr_reset(tp);
2479 val = tr32(GRC_MISC_CFG);
2480 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2481 udelay(40);
2482 return;
2483 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2484 u32 phytest;
2485 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2486 u32 phy;
2488 tg3_writephy(tp, MII_ADVERTISE, 0);
2489 tg3_writephy(tp, MII_BMCR,
2490 BMCR_ANENABLE | BMCR_ANRESTART);
2492 tg3_writephy(tp, MII_TG3_FET_TEST,
2493 phytest | MII_TG3_FET_SHADOW_EN);
2494 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2495 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2496 tg3_writephy(tp,
2497 MII_TG3_FET_SHDW_AUXMODE4,
2498 phy);
2500 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2502 return;
2503 } else if (do_low_power) {
2504 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2505 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2507 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2508 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2509 MII_TG3_AUXCTL_PCTL_VREG_11V;
2510 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2513 /* The PHY should not be powered down on some chips because
2514 * of bugs.
2516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2518 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2519 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2520 return;
2522 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2523 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2524 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2525 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2526 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2527 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2530 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2533 /* tp->lock is held. */
2534 static int tg3_nvram_lock(struct tg3 *tp)
2536 if (tg3_flag(tp, NVRAM)) {
2537 int i;
2539 if (tp->nvram_lock_cnt == 0) {
2540 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2541 for (i = 0; i < 8000; i++) {
2542 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2543 break;
2544 udelay(20);
2546 if (i == 8000) {
2547 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2548 return -ENODEV;
2551 tp->nvram_lock_cnt++;
2553 return 0;
2556 /* tp->lock is held. */
2557 static void tg3_nvram_unlock(struct tg3 *tp)
2559 if (tg3_flag(tp, NVRAM)) {
2560 if (tp->nvram_lock_cnt > 0)
2561 tp->nvram_lock_cnt--;
2562 if (tp->nvram_lock_cnt == 0)
2563 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2567 /* tp->lock is held. */
2568 static void tg3_enable_nvram_access(struct tg3 *tp)
2570 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2571 u32 nvaccess = tr32(NVRAM_ACCESS);
2573 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2577 /* tp->lock is held. */
2578 static void tg3_disable_nvram_access(struct tg3 *tp)
2580 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2581 u32 nvaccess = tr32(NVRAM_ACCESS);
2583 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2587 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2588 u32 offset, u32 *val)
2590 u32 tmp;
2591 int i;
2593 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2594 return -EINVAL;
2596 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2597 EEPROM_ADDR_DEVID_MASK |
2598 EEPROM_ADDR_READ);
2599 tw32(GRC_EEPROM_ADDR,
2600 tmp |
2601 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2602 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2603 EEPROM_ADDR_ADDR_MASK) |
2604 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2606 for (i = 0; i < 1000; i++) {
2607 tmp = tr32(GRC_EEPROM_ADDR);
2609 if (tmp & EEPROM_ADDR_COMPLETE)
2610 break;
2611 msleep(1);
2613 if (!(tmp & EEPROM_ADDR_COMPLETE))
2614 return -EBUSY;
2616 tmp = tr32(GRC_EEPROM_DATA);
2619 * The data will always be opposite the native endian
2620 * format. Perform a blind byteswap to compensate.
2622 *val = swab32(tmp);
2624 return 0;
2627 #define NVRAM_CMD_TIMEOUT 10000
2629 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2631 int i;
2633 tw32(NVRAM_CMD, nvram_cmd);
2634 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2635 udelay(10);
2636 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2637 udelay(10);
2638 break;
2642 if (i == NVRAM_CMD_TIMEOUT)
2643 return -EBUSY;
2645 return 0;
2648 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2650 if (tg3_flag(tp, NVRAM) &&
2651 tg3_flag(tp, NVRAM_BUFFERED) &&
2652 tg3_flag(tp, FLASH) &&
2653 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2654 (tp->nvram_jedecnum == JEDEC_ATMEL))
2656 addr = ((addr / tp->nvram_pagesize) <<
2657 ATMEL_AT45DB0X1B_PAGE_POS) +
2658 (addr % tp->nvram_pagesize);
2660 return addr;
2663 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2665 if (tg3_flag(tp, NVRAM) &&
2666 tg3_flag(tp, NVRAM_BUFFERED) &&
2667 tg3_flag(tp, FLASH) &&
2668 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2669 (tp->nvram_jedecnum == JEDEC_ATMEL))
2671 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2672 tp->nvram_pagesize) +
2673 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2675 return addr;
2678 /* NOTE: Data read in from NVRAM is byteswapped according to
2679 * the byteswapping settings for all other register accesses.
2680 * tg3 devices are BE devices, so on a BE machine, the data
2681 * returned will be exactly as it is seen in NVRAM. On a LE
2682 * machine, the 32-bit value will be byteswapped.
2684 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2686 int ret;
2688 if (!tg3_flag(tp, NVRAM))
2689 return tg3_nvram_read_using_eeprom(tp, offset, val);
2691 offset = tg3_nvram_phys_addr(tp, offset);
2693 if (offset > NVRAM_ADDR_MSK)
2694 return -EINVAL;
2696 ret = tg3_nvram_lock(tp);
2697 if (ret)
2698 return ret;
2700 tg3_enable_nvram_access(tp);
2702 tw32(NVRAM_ADDR, offset);
2703 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2704 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2706 if (ret == 0)
2707 *val = tr32(NVRAM_RDDATA);
2709 tg3_disable_nvram_access(tp);
2711 tg3_nvram_unlock(tp);
2713 return ret;
2716 /* Ensures NVRAM data is in bytestream format. */
2717 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2719 u32 v;
2720 int res = tg3_nvram_read(tp, offset, &v);
2721 if (!res)
2722 *val = cpu_to_be32(v);
2723 return res;
2726 /* tp->lock is held. */
2727 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2729 u32 addr_high, addr_low;
2730 int i;
2732 addr_high = ((tp->dev->dev_addr[0] << 8) |
2733 tp->dev->dev_addr[1]);
2734 addr_low = ((tp->dev->dev_addr[2] << 24) |
2735 (tp->dev->dev_addr[3] << 16) |
2736 (tp->dev->dev_addr[4] << 8) |
2737 (tp->dev->dev_addr[5] << 0));
2738 for (i = 0; i < 4; i++) {
2739 if (i == 1 && skip_mac_1)
2740 continue;
2741 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2742 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2747 for (i = 0; i < 12; i++) {
2748 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2749 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2753 addr_high = (tp->dev->dev_addr[0] +
2754 tp->dev->dev_addr[1] +
2755 tp->dev->dev_addr[2] +
2756 tp->dev->dev_addr[3] +
2757 tp->dev->dev_addr[4] +
2758 tp->dev->dev_addr[5]) &
2759 TX_BACKOFF_SEED_MASK;
2760 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2763 static void tg3_enable_register_access(struct tg3 *tp)
2766 * Make sure register accesses (indirect or otherwise) will function
2767 * correctly.
2769 pci_write_config_dword(tp->pdev,
2770 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2773 static int tg3_power_up(struct tg3 *tp)
2775 int err;
2777 tg3_enable_register_access(tp);
2779 err = pci_set_power_state(tp->pdev, PCI_D0);
2780 if (!err) {
2781 /* Switch out of Vaux if it is a NIC */
2782 tg3_pwrsrc_switch_to_vmain(tp);
2783 } else {
2784 netdev_err(tp->dev, "Transition to D0 failed\n");
2787 return err;
2790 static int tg3_power_down_prepare(struct tg3 *tp)
2792 u32 misc_host_ctrl;
2793 bool device_should_wake, do_low_power;
2795 tg3_enable_register_access(tp);
2797 /* Restore the CLKREQ setting. */
2798 if (tg3_flag(tp, CLKREQ_BUG)) {
2799 u16 lnkctl;
2801 pci_read_config_word(tp->pdev,
2802 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2803 &lnkctl);
2804 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2805 pci_write_config_word(tp->pdev,
2806 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2807 lnkctl);
2810 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2811 tw32(TG3PCI_MISC_HOST_CTRL,
2812 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2814 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2815 tg3_flag(tp, WOL_ENABLE);
2817 if (tg3_flag(tp, USE_PHYLIB)) {
2818 do_low_power = false;
2819 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2820 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2821 struct phy_device *phydev;
2822 u32 phyid, advertising;
2824 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2826 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2828 tp->link_config.orig_speed = phydev->speed;
2829 tp->link_config.orig_duplex = phydev->duplex;
2830 tp->link_config.orig_autoneg = phydev->autoneg;
2831 tp->link_config.orig_advertising = phydev->advertising;
2833 advertising = ADVERTISED_TP |
2834 ADVERTISED_Pause |
2835 ADVERTISED_Autoneg |
2836 ADVERTISED_10baseT_Half;
2838 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2839 if (tg3_flag(tp, WOL_SPEED_100MB))
2840 advertising |=
2841 ADVERTISED_100baseT_Half |
2842 ADVERTISED_100baseT_Full |
2843 ADVERTISED_10baseT_Full;
2844 else
2845 advertising |= ADVERTISED_10baseT_Full;
2848 phydev->advertising = advertising;
2850 phy_start_aneg(phydev);
2852 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2853 if (phyid != PHY_ID_BCMAC131) {
2854 phyid &= PHY_BCM_OUI_MASK;
2855 if (phyid == PHY_BCM_OUI_1 ||
2856 phyid == PHY_BCM_OUI_2 ||
2857 phyid == PHY_BCM_OUI_3)
2858 do_low_power = true;
2861 } else {
2862 do_low_power = true;
2864 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2865 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2866 tp->link_config.orig_speed = tp->link_config.speed;
2867 tp->link_config.orig_duplex = tp->link_config.duplex;
2868 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2871 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2872 tp->link_config.speed = SPEED_10;
2873 tp->link_config.duplex = DUPLEX_HALF;
2874 tp->link_config.autoneg = AUTONEG_ENABLE;
2875 tg3_setup_phy(tp, 0);
2879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2880 u32 val;
2882 val = tr32(GRC_VCPU_EXT_CTRL);
2883 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2884 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2885 int i;
2886 u32 val;
2888 for (i = 0; i < 200; i++) {
2889 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2890 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2891 break;
2892 msleep(1);
2895 if (tg3_flag(tp, WOL_CAP))
2896 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2897 WOL_DRV_STATE_SHUTDOWN |
2898 WOL_DRV_WOL |
2899 WOL_SET_MAGIC_PKT);
2901 if (device_should_wake) {
2902 u32 mac_mode;
2904 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2905 if (do_low_power &&
2906 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2907 tg3_phy_auxctl_write(tp,
2908 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2909 MII_TG3_AUXCTL_PCTL_WOL_EN |
2910 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2911 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2912 udelay(40);
2915 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2916 mac_mode = MAC_MODE_PORT_MODE_GMII;
2917 else
2918 mac_mode = MAC_MODE_PORT_MODE_MII;
2920 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2921 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2922 ASIC_REV_5700) {
2923 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2924 SPEED_100 : SPEED_10;
2925 if (tg3_5700_link_polarity(tp, speed))
2926 mac_mode |= MAC_MODE_LINK_POLARITY;
2927 else
2928 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2930 } else {
2931 mac_mode = MAC_MODE_PORT_MODE_TBI;
2934 if (!tg3_flag(tp, 5750_PLUS))
2935 tw32(MAC_LED_CTRL, tp->led_ctrl);
2937 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2938 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2939 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2940 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2942 if (tg3_flag(tp, ENABLE_APE))
2943 mac_mode |= MAC_MODE_APE_TX_EN |
2944 MAC_MODE_APE_RX_EN |
2945 MAC_MODE_TDE_ENABLE;
2947 tw32_f(MAC_MODE, mac_mode);
2948 udelay(100);
2950 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2951 udelay(10);
2954 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2955 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2956 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2957 u32 base_val;
2959 base_val = tp->pci_clock_ctrl;
2960 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2961 CLOCK_CTRL_TXCLK_DISABLE);
2963 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2964 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2965 } else if (tg3_flag(tp, 5780_CLASS) ||
2966 tg3_flag(tp, CPMU_PRESENT) ||
2967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2968 /* do nothing */
2969 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2970 u32 newbits1, newbits2;
2972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2974 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2975 CLOCK_CTRL_TXCLK_DISABLE |
2976 CLOCK_CTRL_ALTCLK);
2977 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2978 } else if (tg3_flag(tp, 5705_PLUS)) {
2979 newbits1 = CLOCK_CTRL_625_CORE;
2980 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2981 } else {
2982 newbits1 = CLOCK_CTRL_ALTCLK;
2983 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2986 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2987 40);
2989 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2990 40);
2992 if (!tg3_flag(tp, 5705_PLUS)) {
2993 u32 newbits3;
2995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2996 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2997 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2998 CLOCK_CTRL_TXCLK_DISABLE |
2999 CLOCK_CTRL_44MHZ_CORE);
3000 } else {
3001 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3004 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3005 tp->pci_clock_ctrl | newbits3, 40);
3009 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3010 tg3_power_down_phy(tp, do_low_power);
3012 tg3_frob_aux_power(tp, true);
3014 /* Workaround for unstable PLL clock */
3015 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3016 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3017 u32 val = tr32(0x7d00);
3019 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3020 tw32(0x7d00, val);
3021 if (!tg3_flag(tp, ENABLE_ASF)) {
3022 int err;
3024 err = tg3_nvram_lock(tp);
3025 tg3_halt_cpu(tp, RX_CPU_BASE);
3026 if (!err)
3027 tg3_nvram_unlock(tp);
3031 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3033 return 0;
3036 static void tg3_power_down(struct tg3 *tp)
3038 tg3_power_down_prepare(tp);
3040 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3041 pci_set_power_state(tp->pdev, PCI_D3hot);
3044 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3046 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3047 case MII_TG3_AUX_STAT_10HALF:
3048 *speed = SPEED_10;
3049 *duplex = DUPLEX_HALF;
3050 break;
3052 case MII_TG3_AUX_STAT_10FULL:
3053 *speed = SPEED_10;
3054 *duplex = DUPLEX_FULL;
3055 break;
3057 case MII_TG3_AUX_STAT_100HALF:
3058 *speed = SPEED_100;
3059 *duplex = DUPLEX_HALF;
3060 break;
3062 case MII_TG3_AUX_STAT_100FULL:
3063 *speed = SPEED_100;
3064 *duplex = DUPLEX_FULL;
3065 break;
3067 case MII_TG3_AUX_STAT_1000HALF:
3068 *speed = SPEED_1000;
3069 *duplex = DUPLEX_HALF;
3070 break;
3072 case MII_TG3_AUX_STAT_1000FULL:
3073 *speed = SPEED_1000;
3074 *duplex = DUPLEX_FULL;
3075 break;
3077 default:
3078 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3079 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3080 SPEED_10;
3081 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3082 DUPLEX_HALF;
3083 break;
3085 *speed = SPEED_INVALID;
3086 *duplex = DUPLEX_INVALID;
3087 break;
3091 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3093 int err = 0;
3094 u32 val, new_adv;
3096 new_adv = ADVERTISE_CSMA;
3097 if (advertise & ADVERTISED_10baseT_Half)
3098 new_adv |= ADVERTISE_10HALF;
3099 if (advertise & ADVERTISED_10baseT_Full)
3100 new_adv |= ADVERTISE_10FULL;
3101 if (advertise & ADVERTISED_100baseT_Half)
3102 new_adv |= ADVERTISE_100HALF;
3103 if (advertise & ADVERTISED_100baseT_Full)
3104 new_adv |= ADVERTISE_100FULL;
3106 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3108 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3109 if (err)
3110 goto done;
3112 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3113 goto done;
3115 new_adv = 0;
3116 if (advertise & ADVERTISED_1000baseT_Half)
3117 new_adv |= ADVERTISE_1000HALF;
3118 if (advertise & ADVERTISED_1000baseT_Full)
3119 new_adv |= ADVERTISE_1000FULL;
3121 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3122 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3123 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3125 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3126 if (err)
3127 goto done;
3129 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3130 goto done;
3132 tw32(TG3_CPMU_EEE_MODE,
3133 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3135 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3136 if (!err) {
3137 u32 err2;
3139 val = 0;
3140 /* Advertise 100-BaseTX EEE ability */
3141 if (advertise & ADVERTISED_100baseT_Full)
3142 val |= MDIO_AN_EEE_ADV_100TX;
3143 /* Advertise 1000-BaseT EEE ability */
3144 if (advertise & ADVERTISED_1000baseT_Full)
3145 val |= MDIO_AN_EEE_ADV_1000T;
3146 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3147 if (err)
3148 val = 0;
3150 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3151 case ASIC_REV_5717:
3152 case ASIC_REV_57765:
3153 case ASIC_REV_5719:
3154 /* If we advertised any eee advertisements above... */
3155 if (val)
3156 val = MII_TG3_DSP_TAP26_ALNOKO |
3157 MII_TG3_DSP_TAP26_RMRXSTO |
3158 MII_TG3_DSP_TAP26_OPCSINPT;
3159 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3160 /* Fall through */
3161 case ASIC_REV_5720:
3162 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3163 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3164 MII_TG3_DSP_CH34TP2_HIBW01);
3167 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3168 if (!err)
3169 err = err2;
3172 done:
3173 return err;
3176 static void tg3_phy_copper_begin(struct tg3 *tp)
3178 u32 new_adv;
3179 int i;
3181 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3182 new_adv = ADVERTISED_10baseT_Half |
3183 ADVERTISED_10baseT_Full;
3184 if (tg3_flag(tp, WOL_SPEED_100MB))
3185 new_adv |= ADVERTISED_100baseT_Half |
3186 ADVERTISED_100baseT_Full;
3188 tg3_phy_autoneg_cfg(tp, new_adv,
3189 FLOW_CTRL_TX | FLOW_CTRL_RX);
3190 } else if (tp->link_config.speed == SPEED_INVALID) {
3191 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3192 tp->link_config.advertising &=
3193 ~(ADVERTISED_1000baseT_Half |
3194 ADVERTISED_1000baseT_Full);
3196 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3197 tp->link_config.flowctrl);
3198 } else {
3199 /* Asking for a specific link mode. */
3200 if (tp->link_config.speed == SPEED_1000) {
3201 if (tp->link_config.duplex == DUPLEX_FULL)
3202 new_adv = ADVERTISED_1000baseT_Full;
3203 else
3204 new_adv = ADVERTISED_1000baseT_Half;
3205 } else if (tp->link_config.speed == SPEED_100) {
3206 if (tp->link_config.duplex == DUPLEX_FULL)
3207 new_adv = ADVERTISED_100baseT_Full;
3208 else
3209 new_adv = ADVERTISED_100baseT_Half;
3210 } else {
3211 if (tp->link_config.duplex == DUPLEX_FULL)
3212 new_adv = ADVERTISED_10baseT_Full;
3213 else
3214 new_adv = ADVERTISED_10baseT_Half;
3217 tg3_phy_autoneg_cfg(tp, new_adv,
3218 tp->link_config.flowctrl);
3221 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3222 tp->link_config.speed != SPEED_INVALID) {
3223 u32 bmcr, orig_bmcr;
3225 tp->link_config.active_speed = tp->link_config.speed;
3226 tp->link_config.active_duplex = tp->link_config.duplex;
3228 bmcr = 0;
3229 switch (tp->link_config.speed) {
3230 default:
3231 case SPEED_10:
3232 break;
3234 case SPEED_100:
3235 bmcr |= BMCR_SPEED100;
3236 break;
3238 case SPEED_1000:
3239 bmcr |= BMCR_SPEED1000;
3240 break;
3243 if (tp->link_config.duplex == DUPLEX_FULL)
3244 bmcr |= BMCR_FULLDPLX;
3246 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3247 (bmcr != orig_bmcr)) {
3248 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3249 for (i = 0; i < 1500; i++) {
3250 u32 tmp;
3252 udelay(10);
3253 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3254 tg3_readphy(tp, MII_BMSR, &tmp))
3255 continue;
3256 if (!(tmp & BMSR_LSTATUS)) {
3257 udelay(40);
3258 break;
3261 tg3_writephy(tp, MII_BMCR, bmcr);
3262 udelay(40);
3264 } else {
3265 tg3_writephy(tp, MII_BMCR,
3266 BMCR_ANENABLE | BMCR_ANRESTART);
3270 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3272 int err;
3274 /* Turn off tap power management. */
3275 /* Set Extended packet length bit */
3276 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3278 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3279 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3280 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3281 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3282 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3284 udelay(40);
3286 return err;
3289 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3291 u32 adv_reg, all_mask = 0;
3293 if (mask & ADVERTISED_10baseT_Half)
3294 all_mask |= ADVERTISE_10HALF;
3295 if (mask & ADVERTISED_10baseT_Full)
3296 all_mask |= ADVERTISE_10FULL;
3297 if (mask & ADVERTISED_100baseT_Half)
3298 all_mask |= ADVERTISE_100HALF;
3299 if (mask & ADVERTISED_100baseT_Full)
3300 all_mask |= ADVERTISE_100FULL;
3302 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3303 return 0;
3305 if ((adv_reg & all_mask) != all_mask)
3306 return 0;
3307 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3308 u32 tg3_ctrl;
3310 all_mask = 0;
3311 if (mask & ADVERTISED_1000baseT_Half)
3312 all_mask |= ADVERTISE_1000HALF;
3313 if (mask & ADVERTISED_1000baseT_Full)
3314 all_mask |= ADVERTISE_1000FULL;
3316 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3317 return 0;
3319 if ((tg3_ctrl & all_mask) != all_mask)
3320 return 0;
3322 return 1;
3325 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3327 u32 curadv, reqadv;
3329 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3330 return 1;
3332 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3333 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3335 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3336 if (curadv != reqadv)
3337 return 0;
3339 if (tg3_flag(tp, PAUSE_AUTONEG))
3340 tg3_readphy(tp, MII_LPA, rmtadv);
3341 } else {
3342 /* Reprogram the advertisement register, even if it
3343 * does not affect the current link. If the link
3344 * gets renegotiated in the future, we can save an
3345 * additional renegotiation cycle by advertising
3346 * it correctly in the first place.
3348 if (curadv != reqadv) {
3349 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3350 ADVERTISE_PAUSE_ASYM);
3351 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3355 return 1;
3358 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3360 int current_link_up;
3361 u32 bmsr, val;
3362 u32 lcl_adv, rmt_adv;
3363 u16 current_speed;
3364 u8 current_duplex;
3365 int i, err;
3367 tw32(MAC_EVENT, 0);
3369 tw32_f(MAC_STATUS,
3370 (MAC_STATUS_SYNC_CHANGED |
3371 MAC_STATUS_CFG_CHANGED |
3372 MAC_STATUS_MI_COMPLETION |
3373 MAC_STATUS_LNKSTATE_CHANGED));
3374 udelay(40);
3376 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3377 tw32_f(MAC_MI_MODE,
3378 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3379 udelay(80);
3382 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3384 /* Some third-party PHYs need to be reset on link going
3385 * down.
3387 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3390 netif_carrier_ok(tp->dev)) {
3391 tg3_readphy(tp, MII_BMSR, &bmsr);
3392 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3393 !(bmsr & BMSR_LSTATUS))
3394 force_reset = 1;
3396 if (force_reset)
3397 tg3_phy_reset(tp);
3399 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3400 tg3_readphy(tp, MII_BMSR, &bmsr);
3401 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3402 !tg3_flag(tp, INIT_COMPLETE))
3403 bmsr = 0;
3405 if (!(bmsr & BMSR_LSTATUS)) {
3406 err = tg3_init_5401phy_dsp(tp);
3407 if (err)
3408 return err;
3410 tg3_readphy(tp, MII_BMSR, &bmsr);
3411 for (i = 0; i < 1000; i++) {
3412 udelay(10);
3413 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3414 (bmsr & BMSR_LSTATUS)) {
3415 udelay(40);
3416 break;
3420 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3421 TG3_PHY_REV_BCM5401_B0 &&
3422 !(bmsr & BMSR_LSTATUS) &&
3423 tp->link_config.active_speed == SPEED_1000) {
3424 err = tg3_phy_reset(tp);
3425 if (!err)
3426 err = tg3_init_5401phy_dsp(tp);
3427 if (err)
3428 return err;
3431 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3432 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3433 /* 5701 {A0,B0} CRC bug workaround */
3434 tg3_writephy(tp, 0x15, 0x0a75);
3435 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3436 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3437 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3440 /* Clear pending interrupts... */
3441 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3442 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3444 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3445 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3446 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3447 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3452 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3453 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3454 else
3455 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3458 current_link_up = 0;
3459 current_speed = SPEED_INVALID;
3460 current_duplex = DUPLEX_INVALID;
3462 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3463 err = tg3_phy_auxctl_read(tp,
3464 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3465 &val);
3466 if (!err && !(val & (1 << 10))) {
3467 tg3_phy_auxctl_write(tp,
3468 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3469 val | (1 << 10));
3470 goto relink;
3474 bmsr = 0;
3475 for (i = 0; i < 100; i++) {
3476 tg3_readphy(tp, MII_BMSR, &bmsr);
3477 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3478 (bmsr & BMSR_LSTATUS))
3479 break;
3480 udelay(40);
3483 if (bmsr & BMSR_LSTATUS) {
3484 u32 aux_stat, bmcr;
3486 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3487 for (i = 0; i < 2000; i++) {
3488 udelay(10);
3489 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3490 aux_stat)
3491 break;
3494 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3495 &current_speed,
3496 &current_duplex);
3498 bmcr = 0;
3499 for (i = 0; i < 200; i++) {
3500 tg3_readphy(tp, MII_BMCR, &bmcr);
3501 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3502 continue;
3503 if (bmcr && bmcr != 0x7fff)
3504 break;
3505 udelay(10);
3508 lcl_adv = 0;
3509 rmt_adv = 0;
3511 tp->link_config.active_speed = current_speed;
3512 tp->link_config.active_duplex = current_duplex;
3514 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3515 if ((bmcr & BMCR_ANENABLE) &&
3516 tg3_copper_is_advertising_all(tp,
3517 tp->link_config.advertising)) {
3518 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3519 &rmt_adv))
3520 current_link_up = 1;
3522 } else {
3523 if (!(bmcr & BMCR_ANENABLE) &&
3524 tp->link_config.speed == current_speed &&
3525 tp->link_config.duplex == current_duplex &&
3526 tp->link_config.flowctrl ==
3527 tp->link_config.active_flowctrl) {
3528 current_link_up = 1;
3532 if (current_link_up == 1 &&
3533 tp->link_config.active_duplex == DUPLEX_FULL)
3534 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3537 relink:
3538 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3539 tg3_phy_copper_begin(tp);
3541 tg3_readphy(tp, MII_BMSR, &bmsr);
3542 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3543 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3544 current_link_up = 1;
3547 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3548 if (current_link_up == 1) {
3549 if (tp->link_config.active_speed == SPEED_100 ||
3550 tp->link_config.active_speed == SPEED_10)
3551 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3552 else
3553 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3554 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3555 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3556 else
3557 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3559 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3560 if (tp->link_config.active_duplex == DUPLEX_HALF)
3561 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3564 if (current_link_up == 1 &&
3565 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3566 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3567 else
3568 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3571 /* ??? Without this setting Netgear GA302T PHY does not
3572 * ??? send/receive packets...
3574 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3575 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3576 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3577 tw32_f(MAC_MI_MODE, tp->mi_mode);
3578 udelay(80);
3581 tw32_f(MAC_MODE, tp->mac_mode);
3582 udelay(40);
3584 tg3_phy_eee_adjust(tp, current_link_up);
3586 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3587 /* Polled via timer. */
3588 tw32_f(MAC_EVENT, 0);
3589 } else {
3590 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3592 udelay(40);
3594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3595 current_link_up == 1 &&
3596 tp->link_config.active_speed == SPEED_1000 &&
3597 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3598 udelay(120);
3599 tw32_f(MAC_STATUS,
3600 (MAC_STATUS_SYNC_CHANGED |
3601 MAC_STATUS_CFG_CHANGED));
3602 udelay(40);
3603 tg3_write_mem(tp,
3604 NIC_SRAM_FIRMWARE_MBOX,
3605 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3608 /* Prevent send BD corruption. */
3609 if (tg3_flag(tp, CLKREQ_BUG)) {
3610 u16 oldlnkctl, newlnkctl;
3612 pci_read_config_word(tp->pdev,
3613 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3614 &oldlnkctl);
3615 if (tp->link_config.active_speed == SPEED_100 ||
3616 tp->link_config.active_speed == SPEED_10)
3617 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3618 else
3619 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3620 if (newlnkctl != oldlnkctl)
3621 pci_write_config_word(tp->pdev,
3622 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3623 newlnkctl);
3626 if (current_link_up != netif_carrier_ok(tp->dev)) {
3627 if (current_link_up)
3628 netif_carrier_on(tp->dev);
3629 else
3630 netif_carrier_off(tp->dev);
3631 tg3_link_report(tp);
3634 return 0;
3637 struct tg3_fiber_aneginfo {
3638 int state;
3639 #define ANEG_STATE_UNKNOWN 0
3640 #define ANEG_STATE_AN_ENABLE 1
3641 #define ANEG_STATE_RESTART_INIT 2
3642 #define ANEG_STATE_RESTART 3
3643 #define ANEG_STATE_DISABLE_LINK_OK 4
3644 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3645 #define ANEG_STATE_ABILITY_DETECT 6
3646 #define ANEG_STATE_ACK_DETECT_INIT 7
3647 #define ANEG_STATE_ACK_DETECT 8
3648 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3649 #define ANEG_STATE_COMPLETE_ACK 10
3650 #define ANEG_STATE_IDLE_DETECT_INIT 11
3651 #define ANEG_STATE_IDLE_DETECT 12
3652 #define ANEG_STATE_LINK_OK 13
3653 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3654 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3656 u32 flags;
3657 #define MR_AN_ENABLE 0x00000001
3658 #define MR_RESTART_AN 0x00000002
3659 #define MR_AN_COMPLETE 0x00000004
3660 #define MR_PAGE_RX 0x00000008
3661 #define MR_NP_LOADED 0x00000010
3662 #define MR_TOGGLE_TX 0x00000020
3663 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3664 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3665 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3666 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3667 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3668 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3669 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3670 #define MR_TOGGLE_RX 0x00002000
3671 #define MR_NP_RX 0x00004000
3673 #define MR_LINK_OK 0x80000000
3675 unsigned long link_time, cur_time;
3677 u32 ability_match_cfg;
3678 int ability_match_count;
3680 char ability_match, idle_match, ack_match;
3682 u32 txconfig, rxconfig;
3683 #define ANEG_CFG_NP 0x00000080
3684 #define ANEG_CFG_ACK 0x00000040
3685 #define ANEG_CFG_RF2 0x00000020
3686 #define ANEG_CFG_RF1 0x00000010
3687 #define ANEG_CFG_PS2 0x00000001
3688 #define ANEG_CFG_PS1 0x00008000
3689 #define ANEG_CFG_HD 0x00004000
3690 #define ANEG_CFG_FD 0x00002000
3691 #define ANEG_CFG_INVAL 0x00001f06
3694 #define ANEG_OK 0
3695 #define ANEG_DONE 1
3696 #define ANEG_TIMER_ENAB 2
3697 #define ANEG_FAILED -1
3699 #define ANEG_STATE_SETTLE_TIME 10000
3701 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3702 struct tg3_fiber_aneginfo *ap)
3704 u16 flowctrl;
3705 unsigned long delta;
3706 u32 rx_cfg_reg;
3707 int ret;
3709 if (ap->state == ANEG_STATE_UNKNOWN) {
3710 ap->rxconfig = 0;
3711 ap->link_time = 0;
3712 ap->cur_time = 0;
3713 ap->ability_match_cfg = 0;
3714 ap->ability_match_count = 0;
3715 ap->ability_match = 0;
3716 ap->idle_match = 0;
3717 ap->ack_match = 0;
3719 ap->cur_time++;
3721 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3722 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3724 if (rx_cfg_reg != ap->ability_match_cfg) {
3725 ap->ability_match_cfg = rx_cfg_reg;
3726 ap->ability_match = 0;
3727 ap->ability_match_count = 0;
3728 } else {
3729 if (++ap->ability_match_count > 1) {
3730 ap->ability_match = 1;
3731 ap->ability_match_cfg = rx_cfg_reg;
3734 if (rx_cfg_reg & ANEG_CFG_ACK)
3735 ap->ack_match = 1;
3736 else
3737 ap->ack_match = 0;
3739 ap->idle_match = 0;
3740 } else {
3741 ap->idle_match = 1;
3742 ap->ability_match_cfg = 0;
3743 ap->ability_match_count = 0;
3744 ap->ability_match = 0;
3745 ap->ack_match = 0;
3747 rx_cfg_reg = 0;
3750 ap->rxconfig = rx_cfg_reg;
3751 ret = ANEG_OK;
3753 switch (ap->state) {
3754 case ANEG_STATE_UNKNOWN:
3755 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3756 ap->state = ANEG_STATE_AN_ENABLE;
3758 /* fallthru */
3759 case ANEG_STATE_AN_ENABLE:
3760 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3761 if (ap->flags & MR_AN_ENABLE) {
3762 ap->link_time = 0;
3763 ap->cur_time = 0;
3764 ap->ability_match_cfg = 0;
3765 ap->ability_match_count = 0;
3766 ap->ability_match = 0;
3767 ap->idle_match = 0;
3768 ap->ack_match = 0;
3770 ap->state = ANEG_STATE_RESTART_INIT;
3771 } else {
3772 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3774 break;
3776 case ANEG_STATE_RESTART_INIT:
3777 ap->link_time = ap->cur_time;
3778 ap->flags &= ~(MR_NP_LOADED);
3779 ap->txconfig = 0;
3780 tw32(MAC_TX_AUTO_NEG, 0);
3781 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3782 tw32_f(MAC_MODE, tp->mac_mode);
3783 udelay(40);
3785 ret = ANEG_TIMER_ENAB;
3786 ap->state = ANEG_STATE_RESTART;
3788 /* fallthru */
3789 case ANEG_STATE_RESTART:
3790 delta = ap->cur_time - ap->link_time;
3791 if (delta > ANEG_STATE_SETTLE_TIME)
3792 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3793 else
3794 ret = ANEG_TIMER_ENAB;
3795 break;
3797 case ANEG_STATE_DISABLE_LINK_OK:
3798 ret = ANEG_DONE;
3799 break;
3801 case ANEG_STATE_ABILITY_DETECT_INIT:
3802 ap->flags &= ~(MR_TOGGLE_TX);
3803 ap->txconfig = ANEG_CFG_FD;
3804 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3805 if (flowctrl & ADVERTISE_1000XPAUSE)
3806 ap->txconfig |= ANEG_CFG_PS1;
3807 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3808 ap->txconfig |= ANEG_CFG_PS2;
3809 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3810 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3811 tw32_f(MAC_MODE, tp->mac_mode);
3812 udelay(40);
3814 ap->state = ANEG_STATE_ABILITY_DETECT;
3815 break;
3817 case ANEG_STATE_ABILITY_DETECT:
3818 if (ap->ability_match != 0 && ap->rxconfig != 0)
3819 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3820 break;
3822 case ANEG_STATE_ACK_DETECT_INIT:
3823 ap->txconfig |= ANEG_CFG_ACK;
3824 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3825 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3826 tw32_f(MAC_MODE, tp->mac_mode);
3827 udelay(40);
3829 ap->state = ANEG_STATE_ACK_DETECT;
3831 /* fallthru */
3832 case ANEG_STATE_ACK_DETECT:
3833 if (ap->ack_match != 0) {
3834 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3835 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3836 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3837 } else {
3838 ap->state = ANEG_STATE_AN_ENABLE;
3840 } else if (ap->ability_match != 0 &&
3841 ap->rxconfig == 0) {
3842 ap->state = ANEG_STATE_AN_ENABLE;
3844 break;
3846 case ANEG_STATE_COMPLETE_ACK_INIT:
3847 if (ap->rxconfig & ANEG_CFG_INVAL) {
3848 ret = ANEG_FAILED;
3849 break;
3851 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3852 MR_LP_ADV_HALF_DUPLEX |
3853 MR_LP_ADV_SYM_PAUSE |
3854 MR_LP_ADV_ASYM_PAUSE |
3855 MR_LP_ADV_REMOTE_FAULT1 |
3856 MR_LP_ADV_REMOTE_FAULT2 |
3857 MR_LP_ADV_NEXT_PAGE |
3858 MR_TOGGLE_RX |
3859 MR_NP_RX);
3860 if (ap->rxconfig & ANEG_CFG_FD)
3861 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3862 if (ap->rxconfig & ANEG_CFG_HD)
3863 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3864 if (ap->rxconfig & ANEG_CFG_PS1)
3865 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3866 if (ap->rxconfig & ANEG_CFG_PS2)
3867 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3868 if (ap->rxconfig & ANEG_CFG_RF1)
3869 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3870 if (ap->rxconfig & ANEG_CFG_RF2)
3871 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3872 if (ap->rxconfig & ANEG_CFG_NP)
3873 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3875 ap->link_time = ap->cur_time;
3877 ap->flags ^= (MR_TOGGLE_TX);
3878 if (ap->rxconfig & 0x0008)
3879 ap->flags |= MR_TOGGLE_RX;
3880 if (ap->rxconfig & ANEG_CFG_NP)
3881 ap->flags |= MR_NP_RX;
3882 ap->flags |= MR_PAGE_RX;
3884 ap->state = ANEG_STATE_COMPLETE_ACK;
3885 ret = ANEG_TIMER_ENAB;
3886 break;
3888 case ANEG_STATE_COMPLETE_ACK:
3889 if (ap->ability_match != 0 &&
3890 ap->rxconfig == 0) {
3891 ap->state = ANEG_STATE_AN_ENABLE;
3892 break;
3894 delta = ap->cur_time - ap->link_time;
3895 if (delta > ANEG_STATE_SETTLE_TIME) {
3896 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3897 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3898 } else {
3899 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3900 !(ap->flags & MR_NP_RX)) {
3901 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3902 } else {
3903 ret = ANEG_FAILED;
3907 break;
3909 case ANEG_STATE_IDLE_DETECT_INIT:
3910 ap->link_time = ap->cur_time;
3911 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3912 tw32_f(MAC_MODE, tp->mac_mode);
3913 udelay(40);
3915 ap->state = ANEG_STATE_IDLE_DETECT;
3916 ret = ANEG_TIMER_ENAB;
3917 break;
3919 case ANEG_STATE_IDLE_DETECT:
3920 if (ap->ability_match != 0 &&
3921 ap->rxconfig == 0) {
3922 ap->state = ANEG_STATE_AN_ENABLE;
3923 break;
3925 delta = ap->cur_time - ap->link_time;
3926 if (delta > ANEG_STATE_SETTLE_TIME) {
3927 /* XXX another gem from the Broadcom driver :( */
3928 ap->state = ANEG_STATE_LINK_OK;
3930 break;
3932 case ANEG_STATE_LINK_OK:
3933 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3934 ret = ANEG_DONE;
3935 break;
3937 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3938 /* ??? unimplemented */
3939 break;
3941 case ANEG_STATE_NEXT_PAGE_WAIT:
3942 /* ??? unimplemented */
3943 break;
3945 default:
3946 ret = ANEG_FAILED;
3947 break;
3950 return ret;
3953 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3955 int res = 0;
3956 struct tg3_fiber_aneginfo aninfo;
3957 int status = ANEG_FAILED;
3958 unsigned int tick;
3959 u32 tmp;
3961 tw32_f(MAC_TX_AUTO_NEG, 0);
3963 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3964 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3965 udelay(40);
3967 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3968 udelay(40);
3970 memset(&aninfo, 0, sizeof(aninfo));
3971 aninfo.flags |= MR_AN_ENABLE;
3972 aninfo.state = ANEG_STATE_UNKNOWN;
3973 aninfo.cur_time = 0;
3974 tick = 0;
3975 while (++tick < 195000) {
3976 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3977 if (status == ANEG_DONE || status == ANEG_FAILED)
3978 break;
3980 udelay(1);
3983 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3984 tw32_f(MAC_MODE, tp->mac_mode);
3985 udelay(40);
3987 *txflags = aninfo.txconfig;
3988 *rxflags = aninfo.flags;
3990 if (status == ANEG_DONE &&
3991 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3992 MR_LP_ADV_FULL_DUPLEX)))
3993 res = 1;
3995 return res;
3998 static void tg3_init_bcm8002(struct tg3 *tp)
4000 u32 mac_status = tr32(MAC_STATUS);
4001 int i;
4003 /* Reset when initting first time or we have a link. */
4004 if (tg3_flag(tp, INIT_COMPLETE) &&
4005 !(mac_status & MAC_STATUS_PCS_SYNCED))
4006 return;
4008 /* Set PLL lock range. */
4009 tg3_writephy(tp, 0x16, 0x8007);
4011 /* SW reset */
4012 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4014 /* Wait for reset to complete. */
4015 /* XXX schedule_timeout() ... */
4016 for (i = 0; i < 500; i++)
4017 udelay(10);
4019 /* Config mode; select PMA/Ch 1 regs. */
4020 tg3_writephy(tp, 0x10, 0x8411);
4022 /* Enable auto-lock and comdet, select txclk for tx. */
4023 tg3_writephy(tp, 0x11, 0x0a10);
4025 tg3_writephy(tp, 0x18, 0x00a0);
4026 tg3_writephy(tp, 0x16, 0x41ff);
4028 /* Assert and deassert POR. */
4029 tg3_writephy(tp, 0x13, 0x0400);
4030 udelay(40);
4031 tg3_writephy(tp, 0x13, 0x0000);
4033 tg3_writephy(tp, 0x11, 0x0a50);
4034 udelay(40);
4035 tg3_writephy(tp, 0x11, 0x0a10);
4037 /* Wait for signal to stabilize */
4038 /* XXX schedule_timeout() ... */
4039 for (i = 0; i < 15000; i++)
4040 udelay(10);
4042 /* Deselect the channel register so we can read the PHYID
4043 * later.
4045 tg3_writephy(tp, 0x10, 0x8011);
4048 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4050 u16 flowctrl;
4051 u32 sg_dig_ctrl, sg_dig_status;
4052 u32 serdes_cfg, expected_sg_dig_ctrl;
4053 int workaround, port_a;
4054 int current_link_up;
4056 serdes_cfg = 0;
4057 expected_sg_dig_ctrl = 0;
4058 workaround = 0;
4059 port_a = 1;
4060 current_link_up = 0;
4062 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4063 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4064 workaround = 1;
4065 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4066 port_a = 0;
4068 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4069 /* preserve bits 20-23 for voltage regulator */
4070 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4073 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4075 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4076 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4077 if (workaround) {
4078 u32 val = serdes_cfg;
4080 if (port_a)
4081 val |= 0xc010000;
4082 else
4083 val |= 0x4010000;
4084 tw32_f(MAC_SERDES_CFG, val);
4087 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4089 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4090 tg3_setup_flow_control(tp, 0, 0);
4091 current_link_up = 1;
4093 goto out;
4096 /* Want auto-negotiation. */
4097 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4099 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4100 if (flowctrl & ADVERTISE_1000XPAUSE)
4101 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4102 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4103 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4105 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4106 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4107 tp->serdes_counter &&
4108 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4109 MAC_STATUS_RCVD_CFG)) ==
4110 MAC_STATUS_PCS_SYNCED)) {
4111 tp->serdes_counter--;
4112 current_link_up = 1;
4113 goto out;
4115 restart_autoneg:
4116 if (workaround)
4117 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4118 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4119 udelay(5);
4120 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4122 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4123 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4124 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4125 MAC_STATUS_SIGNAL_DET)) {
4126 sg_dig_status = tr32(SG_DIG_STATUS);
4127 mac_status = tr32(MAC_STATUS);
4129 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4130 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4131 u32 local_adv = 0, remote_adv = 0;
4133 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4134 local_adv |= ADVERTISE_1000XPAUSE;
4135 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4136 local_adv |= ADVERTISE_1000XPSE_ASYM;
4138 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4139 remote_adv |= LPA_1000XPAUSE;
4140 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4141 remote_adv |= LPA_1000XPAUSE_ASYM;
4143 tg3_setup_flow_control(tp, local_adv, remote_adv);
4144 current_link_up = 1;
4145 tp->serdes_counter = 0;
4146 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4147 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4148 if (tp->serdes_counter)
4149 tp->serdes_counter--;
4150 else {
4151 if (workaround) {
4152 u32 val = serdes_cfg;
4154 if (port_a)
4155 val |= 0xc010000;
4156 else
4157 val |= 0x4010000;
4159 tw32_f(MAC_SERDES_CFG, val);
4162 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4163 udelay(40);
4165 /* Link parallel detection - link is up */
4166 /* only if we have PCS_SYNC and not */
4167 /* receiving config code words */
4168 mac_status = tr32(MAC_STATUS);
4169 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4170 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4171 tg3_setup_flow_control(tp, 0, 0);
4172 current_link_up = 1;
4173 tp->phy_flags |=
4174 TG3_PHYFLG_PARALLEL_DETECT;
4175 tp->serdes_counter =
4176 SERDES_PARALLEL_DET_TIMEOUT;
4177 } else
4178 goto restart_autoneg;
4181 } else {
4182 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4183 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4186 out:
4187 return current_link_up;
4190 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4192 int current_link_up = 0;
4194 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4195 goto out;
4197 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4198 u32 txflags, rxflags;
4199 int i;
4201 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4202 u32 local_adv = 0, remote_adv = 0;
4204 if (txflags & ANEG_CFG_PS1)
4205 local_adv |= ADVERTISE_1000XPAUSE;
4206 if (txflags & ANEG_CFG_PS2)
4207 local_adv |= ADVERTISE_1000XPSE_ASYM;
4209 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4210 remote_adv |= LPA_1000XPAUSE;
4211 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4212 remote_adv |= LPA_1000XPAUSE_ASYM;
4214 tg3_setup_flow_control(tp, local_adv, remote_adv);
4216 current_link_up = 1;
4218 for (i = 0; i < 30; i++) {
4219 udelay(20);
4220 tw32_f(MAC_STATUS,
4221 (MAC_STATUS_SYNC_CHANGED |
4222 MAC_STATUS_CFG_CHANGED));
4223 udelay(40);
4224 if ((tr32(MAC_STATUS) &
4225 (MAC_STATUS_SYNC_CHANGED |
4226 MAC_STATUS_CFG_CHANGED)) == 0)
4227 break;
4230 mac_status = tr32(MAC_STATUS);
4231 if (current_link_up == 0 &&
4232 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4233 !(mac_status & MAC_STATUS_RCVD_CFG))
4234 current_link_up = 1;
4235 } else {
4236 tg3_setup_flow_control(tp, 0, 0);
4238 /* Forcing 1000FD link up. */
4239 current_link_up = 1;
4241 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4242 udelay(40);
4244 tw32_f(MAC_MODE, tp->mac_mode);
4245 udelay(40);
4248 out:
4249 return current_link_up;
4252 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4254 u32 orig_pause_cfg;
4255 u16 orig_active_speed;
4256 u8 orig_active_duplex;
4257 u32 mac_status;
4258 int current_link_up;
4259 int i;
4261 orig_pause_cfg = tp->link_config.active_flowctrl;
4262 orig_active_speed = tp->link_config.active_speed;
4263 orig_active_duplex = tp->link_config.active_duplex;
4265 if (!tg3_flag(tp, HW_AUTONEG) &&
4266 netif_carrier_ok(tp->dev) &&
4267 tg3_flag(tp, INIT_COMPLETE)) {
4268 mac_status = tr32(MAC_STATUS);
4269 mac_status &= (MAC_STATUS_PCS_SYNCED |
4270 MAC_STATUS_SIGNAL_DET |
4271 MAC_STATUS_CFG_CHANGED |
4272 MAC_STATUS_RCVD_CFG);
4273 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4274 MAC_STATUS_SIGNAL_DET)) {
4275 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4276 MAC_STATUS_CFG_CHANGED));
4277 return 0;
4281 tw32_f(MAC_TX_AUTO_NEG, 0);
4283 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4284 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4285 tw32_f(MAC_MODE, tp->mac_mode);
4286 udelay(40);
4288 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4289 tg3_init_bcm8002(tp);
4291 /* Enable link change event even when serdes polling. */
4292 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4293 udelay(40);
4295 current_link_up = 0;
4296 mac_status = tr32(MAC_STATUS);
4298 if (tg3_flag(tp, HW_AUTONEG))
4299 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4300 else
4301 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4303 tp->napi[0].hw_status->status =
4304 (SD_STATUS_UPDATED |
4305 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4307 for (i = 0; i < 100; i++) {
4308 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4309 MAC_STATUS_CFG_CHANGED));
4310 udelay(5);
4311 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4312 MAC_STATUS_CFG_CHANGED |
4313 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4314 break;
4317 mac_status = tr32(MAC_STATUS);
4318 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4319 current_link_up = 0;
4320 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4321 tp->serdes_counter == 0) {
4322 tw32_f(MAC_MODE, (tp->mac_mode |
4323 MAC_MODE_SEND_CONFIGS));
4324 udelay(1);
4325 tw32_f(MAC_MODE, tp->mac_mode);
4329 if (current_link_up == 1) {
4330 tp->link_config.active_speed = SPEED_1000;
4331 tp->link_config.active_duplex = DUPLEX_FULL;
4332 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4333 LED_CTRL_LNKLED_OVERRIDE |
4334 LED_CTRL_1000MBPS_ON));
4335 } else {
4336 tp->link_config.active_speed = SPEED_INVALID;
4337 tp->link_config.active_duplex = DUPLEX_INVALID;
4338 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4339 LED_CTRL_LNKLED_OVERRIDE |
4340 LED_CTRL_TRAFFIC_OVERRIDE));
4343 if (current_link_up != netif_carrier_ok(tp->dev)) {
4344 if (current_link_up)
4345 netif_carrier_on(tp->dev);
4346 else
4347 netif_carrier_off(tp->dev);
4348 tg3_link_report(tp);
4349 } else {
4350 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4351 if (orig_pause_cfg != now_pause_cfg ||
4352 orig_active_speed != tp->link_config.active_speed ||
4353 orig_active_duplex != tp->link_config.active_duplex)
4354 tg3_link_report(tp);
4357 return 0;
4360 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4362 int current_link_up, err = 0;
4363 u32 bmsr, bmcr;
4364 u16 current_speed;
4365 u8 current_duplex;
4366 u32 local_adv, remote_adv;
4368 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4369 tw32_f(MAC_MODE, tp->mac_mode);
4370 udelay(40);
4372 tw32(MAC_EVENT, 0);
4374 tw32_f(MAC_STATUS,
4375 (MAC_STATUS_SYNC_CHANGED |
4376 MAC_STATUS_CFG_CHANGED |
4377 MAC_STATUS_MI_COMPLETION |
4378 MAC_STATUS_LNKSTATE_CHANGED));
4379 udelay(40);
4381 if (force_reset)
4382 tg3_phy_reset(tp);
4384 current_link_up = 0;
4385 current_speed = SPEED_INVALID;
4386 current_duplex = DUPLEX_INVALID;
4388 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4389 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4391 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4392 bmsr |= BMSR_LSTATUS;
4393 else
4394 bmsr &= ~BMSR_LSTATUS;
4397 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4399 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4400 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4401 /* do nothing, just check for link up at the end */
4402 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4403 u32 adv, new_adv;
4405 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4406 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4407 ADVERTISE_1000XPAUSE |
4408 ADVERTISE_1000XPSE_ASYM |
4409 ADVERTISE_SLCT);
4411 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4413 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4414 new_adv |= ADVERTISE_1000XHALF;
4415 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4416 new_adv |= ADVERTISE_1000XFULL;
4418 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4419 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4420 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4421 tg3_writephy(tp, MII_BMCR, bmcr);
4423 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4424 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4425 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4427 return err;
4429 } else {
4430 u32 new_bmcr;
4432 bmcr &= ~BMCR_SPEED1000;
4433 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4435 if (tp->link_config.duplex == DUPLEX_FULL)
4436 new_bmcr |= BMCR_FULLDPLX;
4438 if (new_bmcr != bmcr) {
4439 /* BMCR_SPEED1000 is a reserved bit that needs
4440 * to be set on write.
4442 new_bmcr |= BMCR_SPEED1000;
4444 /* Force a linkdown */
4445 if (netif_carrier_ok(tp->dev)) {
4446 u32 adv;
4448 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4449 adv &= ~(ADVERTISE_1000XFULL |
4450 ADVERTISE_1000XHALF |
4451 ADVERTISE_SLCT);
4452 tg3_writephy(tp, MII_ADVERTISE, adv);
4453 tg3_writephy(tp, MII_BMCR, bmcr |
4454 BMCR_ANRESTART |
4455 BMCR_ANENABLE);
4456 udelay(10);
4457 netif_carrier_off(tp->dev);
4459 tg3_writephy(tp, MII_BMCR, new_bmcr);
4460 bmcr = new_bmcr;
4461 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4462 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4463 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4464 ASIC_REV_5714) {
4465 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4466 bmsr |= BMSR_LSTATUS;
4467 else
4468 bmsr &= ~BMSR_LSTATUS;
4470 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4474 if (bmsr & BMSR_LSTATUS) {
4475 current_speed = SPEED_1000;
4476 current_link_up = 1;
4477 if (bmcr & BMCR_FULLDPLX)
4478 current_duplex = DUPLEX_FULL;
4479 else
4480 current_duplex = DUPLEX_HALF;
4482 local_adv = 0;
4483 remote_adv = 0;
4485 if (bmcr & BMCR_ANENABLE) {
4486 u32 common;
4488 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4489 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4490 common = local_adv & remote_adv;
4491 if (common & (ADVERTISE_1000XHALF |
4492 ADVERTISE_1000XFULL)) {
4493 if (common & ADVERTISE_1000XFULL)
4494 current_duplex = DUPLEX_FULL;
4495 else
4496 current_duplex = DUPLEX_HALF;
4497 } else if (!tg3_flag(tp, 5780_CLASS)) {
4498 /* Link is up via parallel detect */
4499 } else {
4500 current_link_up = 0;
4505 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4506 tg3_setup_flow_control(tp, local_adv, remote_adv);
4508 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4509 if (tp->link_config.active_duplex == DUPLEX_HALF)
4510 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4512 tw32_f(MAC_MODE, tp->mac_mode);
4513 udelay(40);
4515 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4517 tp->link_config.active_speed = current_speed;
4518 tp->link_config.active_duplex = current_duplex;
4520 if (current_link_up != netif_carrier_ok(tp->dev)) {
4521 if (current_link_up)
4522 netif_carrier_on(tp->dev);
4523 else {
4524 netif_carrier_off(tp->dev);
4525 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4527 tg3_link_report(tp);
4529 return err;
4532 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4534 if (tp->serdes_counter) {
4535 /* Give autoneg time to complete. */
4536 tp->serdes_counter--;
4537 return;
4540 if (!netif_carrier_ok(tp->dev) &&
4541 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4542 u32 bmcr;
4544 tg3_readphy(tp, MII_BMCR, &bmcr);
4545 if (bmcr & BMCR_ANENABLE) {
4546 u32 phy1, phy2;
4548 /* Select shadow register 0x1f */
4549 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4550 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4552 /* Select expansion interrupt status register */
4553 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4554 MII_TG3_DSP_EXP1_INT_STAT);
4555 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4556 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4558 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4559 /* We have signal detect and not receiving
4560 * config code words, link is up by parallel
4561 * detection.
4564 bmcr &= ~BMCR_ANENABLE;
4565 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4566 tg3_writephy(tp, MII_BMCR, bmcr);
4567 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4570 } else if (netif_carrier_ok(tp->dev) &&
4571 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4572 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4573 u32 phy2;
4575 /* Select expansion interrupt status register */
4576 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4577 MII_TG3_DSP_EXP1_INT_STAT);
4578 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4579 if (phy2 & 0x20) {
4580 u32 bmcr;
4582 /* Config code words received, turn on autoneg. */
4583 tg3_readphy(tp, MII_BMCR, &bmcr);
4584 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4586 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4592 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4594 u32 val;
4595 int err;
4597 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4598 err = tg3_setup_fiber_phy(tp, force_reset);
4599 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4600 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4601 else
4602 err = tg3_setup_copper_phy(tp, force_reset);
4604 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4605 u32 scale;
4607 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4608 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4609 scale = 65;
4610 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4611 scale = 6;
4612 else
4613 scale = 12;
4615 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4616 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4617 tw32(GRC_MISC_CFG, val);
4620 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4621 (6 << TX_LENGTHS_IPG_SHIFT);
4622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4623 val |= tr32(MAC_TX_LENGTHS) &
4624 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4625 TX_LENGTHS_CNT_DWN_VAL_MSK);
4627 if (tp->link_config.active_speed == SPEED_1000 &&
4628 tp->link_config.active_duplex == DUPLEX_HALF)
4629 tw32(MAC_TX_LENGTHS, val |
4630 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4631 else
4632 tw32(MAC_TX_LENGTHS, val |
4633 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4635 if (!tg3_flag(tp, 5705_PLUS)) {
4636 if (netif_carrier_ok(tp->dev)) {
4637 tw32(HOSTCC_STAT_COAL_TICKS,
4638 tp->coal.stats_block_coalesce_usecs);
4639 } else {
4640 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4644 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4645 val = tr32(PCIE_PWR_MGMT_THRESH);
4646 if (!netif_carrier_ok(tp->dev))
4647 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4648 tp->pwrmgmt_thresh;
4649 else
4650 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4651 tw32(PCIE_PWR_MGMT_THRESH, val);
4654 return err;
4657 static inline int tg3_irq_sync(struct tg3 *tp)
4659 return tp->irq_sync;
4662 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4664 int i;
4666 dst = (u32 *)((u8 *)dst + off);
4667 for (i = 0; i < len; i += sizeof(u32))
4668 *dst++ = tr32(off + i);
4671 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4673 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4674 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4675 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4676 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4677 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4678 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4679 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4680 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4681 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4682 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4683 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4684 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4685 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4686 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4687 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4688 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4689 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4690 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4691 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4693 if (tg3_flag(tp, SUPPORT_MSIX))
4694 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4696 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4697 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4698 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4699 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4700 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4701 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4702 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4703 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4705 if (!tg3_flag(tp, 5705_PLUS)) {
4706 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4707 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4708 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4711 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4712 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4713 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4714 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4715 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4717 if (tg3_flag(tp, NVRAM))
4718 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4721 static void tg3_dump_state(struct tg3 *tp)
4723 int i;
4724 u32 *regs;
4726 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4727 if (!regs) {
4728 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4729 return;
4732 if (tg3_flag(tp, PCI_EXPRESS)) {
4733 /* Read up to but not including private PCI registers */
4734 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4735 regs[i / sizeof(u32)] = tr32(i);
4736 } else
4737 tg3_dump_legacy_regs(tp, regs);
4739 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4740 if (!regs[i + 0] && !regs[i + 1] &&
4741 !regs[i + 2] && !regs[i + 3])
4742 continue;
4744 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4745 i * 4,
4746 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4749 kfree(regs);
4751 for (i = 0; i < tp->irq_cnt; i++) {
4752 struct tg3_napi *tnapi = &tp->napi[i];
4754 /* SW status block */
4755 netdev_err(tp->dev,
4756 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4758 tnapi->hw_status->status,
4759 tnapi->hw_status->status_tag,
4760 tnapi->hw_status->rx_jumbo_consumer,
4761 tnapi->hw_status->rx_consumer,
4762 tnapi->hw_status->rx_mini_consumer,
4763 tnapi->hw_status->idx[0].rx_producer,
4764 tnapi->hw_status->idx[0].tx_consumer);
4766 netdev_err(tp->dev,
4767 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4769 tnapi->last_tag, tnapi->last_irq_tag,
4770 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4771 tnapi->rx_rcb_ptr,
4772 tnapi->prodring.rx_std_prod_idx,
4773 tnapi->prodring.rx_std_cons_idx,
4774 tnapi->prodring.rx_jmb_prod_idx,
4775 tnapi->prodring.rx_jmb_cons_idx);
4779 /* This is called whenever we suspect that the system chipset is re-
4780 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4781 * is bogus tx completions. We try to recover by setting the
4782 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4783 * in the workqueue.
4785 static void tg3_tx_recover(struct tg3 *tp)
4787 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4788 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4790 netdev_warn(tp->dev,
4791 "The system may be re-ordering memory-mapped I/O "
4792 "cycles to the network device, attempting to recover. "
4793 "Please report the problem to the driver maintainer "
4794 "and include system chipset information.\n");
4796 spin_lock(&tp->lock);
4797 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4798 spin_unlock(&tp->lock);
4801 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4803 /* Tell compiler to fetch tx indices from memory. */
4804 barrier();
4805 return tnapi->tx_pending -
4806 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4809 /* Tigon3 never reports partial packet sends. So we do not
4810 * need special logic to handle SKBs that have not had all
4811 * of their frags sent yet, like SunGEM does.
4813 static void tg3_tx(struct tg3_napi *tnapi)
4815 struct tg3 *tp = tnapi->tp;
4816 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4817 u32 sw_idx = tnapi->tx_cons;
4818 struct netdev_queue *txq;
4819 int index = tnapi - tp->napi;
4821 if (tg3_flag(tp, ENABLE_TSS))
4822 index--;
4824 txq = netdev_get_tx_queue(tp->dev, index);
4826 while (sw_idx != hw_idx) {
4827 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4828 struct sk_buff *skb = ri->skb;
4829 int i, tx_bug = 0;
4831 if (unlikely(skb == NULL)) {
4832 tg3_tx_recover(tp);
4833 return;
4836 pci_unmap_single(tp->pdev,
4837 dma_unmap_addr(ri, mapping),
4838 skb_headlen(skb),
4839 PCI_DMA_TODEVICE);
4841 ri->skb = NULL;
4843 sw_idx = NEXT_TX(sw_idx);
4845 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4846 ri = &tnapi->tx_buffers[sw_idx];
4847 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4848 tx_bug = 1;
4850 pci_unmap_page(tp->pdev,
4851 dma_unmap_addr(ri, mapping),
4852 skb_shinfo(skb)->frags[i].size,
4853 PCI_DMA_TODEVICE);
4854 sw_idx = NEXT_TX(sw_idx);
4857 dev_kfree_skb(skb);
4859 if (unlikely(tx_bug)) {
4860 tg3_tx_recover(tp);
4861 return;
4865 tnapi->tx_cons = sw_idx;
4867 /* Need to make the tx_cons update visible to tg3_start_xmit()
4868 * before checking for netif_queue_stopped(). Without the
4869 * memory barrier, there is a small possibility that tg3_start_xmit()
4870 * will miss it and cause the queue to be stopped forever.
4872 smp_mb();
4874 if (unlikely(netif_tx_queue_stopped(txq) &&
4875 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4876 __netif_tx_lock(txq, smp_processor_id());
4877 if (netif_tx_queue_stopped(txq) &&
4878 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4879 netif_tx_wake_queue(txq);
4880 __netif_tx_unlock(txq);
4884 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4886 if (!ri->skb)
4887 return;
4889 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4890 map_sz, PCI_DMA_FROMDEVICE);
4891 dev_kfree_skb_any(ri->skb);
4892 ri->skb = NULL;
4895 /* Returns size of skb allocated or < 0 on error.
4897 * We only need to fill in the address because the other members
4898 * of the RX descriptor are invariant, see tg3_init_rings.
4900 * Note the purposeful assymetry of cpu vs. chip accesses. For
4901 * posting buffers we only dirty the first cache line of the RX
4902 * descriptor (containing the address). Whereas for the RX status
4903 * buffers the cpu only reads the last cacheline of the RX descriptor
4904 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4906 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4907 u32 opaque_key, u32 dest_idx_unmasked)
4909 struct tg3_rx_buffer_desc *desc;
4910 struct ring_info *map;
4911 struct sk_buff *skb;
4912 dma_addr_t mapping;
4913 int skb_size, dest_idx;
4915 switch (opaque_key) {
4916 case RXD_OPAQUE_RING_STD:
4917 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4918 desc = &tpr->rx_std[dest_idx];
4919 map = &tpr->rx_std_buffers[dest_idx];
4920 skb_size = tp->rx_pkt_map_sz;
4921 break;
4923 case RXD_OPAQUE_RING_JUMBO:
4924 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4925 desc = &tpr->rx_jmb[dest_idx].std;
4926 map = &tpr->rx_jmb_buffers[dest_idx];
4927 skb_size = TG3_RX_JMB_MAP_SZ;
4928 break;
4930 default:
4931 return -EINVAL;
4934 /* Do not overwrite any of the map or rp information
4935 * until we are sure we can commit to a new buffer.
4937 * Callers depend upon this behavior and assume that
4938 * we leave everything unchanged if we fail.
4940 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4941 if (skb == NULL)
4942 return -ENOMEM;
4944 skb_reserve(skb, tp->rx_offset);
4946 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4947 PCI_DMA_FROMDEVICE);
4948 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4949 dev_kfree_skb(skb);
4950 return -EIO;
4953 map->skb = skb;
4954 dma_unmap_addr_set(map, mapping, mapping);
4956 desc->addr_hi = ((u64)mapping >> 32);
4957 desc->addr_lo = ((u64)mapping & 0xffffffff);
4959 return skb_size;
4962 /* We only need to move over in the address because the other
4963 * members of the RX descriptor are invariant. See notes above
4964 * tg3_alloc_rx_skb for full details.
4966 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4967 struct tg3_rx_prodring_set *dpr,
4968 u32 opaque_key, int src_idx,
4969 u32 dest_idx_unmasked)
4971 struct tg3 *tp = tnapi->tp;
4972 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4973 struct ring_info *src_map, *dest_map;
4974 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4975 int dest_idx;
4977 switch (opaque_key) {
4978 case RXD_OPAQUE_RING_STD:
4979 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4980 dest_desc = &dpr->rx_std[dest_idx];
4981 dest_map = &dpr->rx_std_buffers[dest_idx];
4982 src_desc = &spr->rx_std[src_idx];
4983 src_map = &spr->rx_std_buffers[src_idx];
4984 break;
4986 case RXD_OPAQUE_RING_JUMBO:
4987 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4988 dest_desc = &dpr->rx_jmb[dest_idx].std;
4989 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4990 src_desc = &spr->rx_jmb[src_idx].std;
4991 src_map = &spr->rx_jmb_buffers[src_idx];
4992 break;
4994 default:
4995 return;
4998 dest_map->skb = src_map->skb;
4999 dma_unmap_addr_set(dest_map, mapping,
5000 dma_unmap_addr(src_map, mapping));
5001 dest_desc->addr_hi = src_desc->addr_hi;
5002 dest_desc->addr_lo = src_desc->addr_lo;
5004 /* Ensure that the update to the skb happens after the physical
5005 * addresses have been transferred to the new BD location.
5007 smp_wmb();
5009 src_map->skb = NULL;
5012 /* The RX ring scheme is composed of multiple rings which post fresh
5013 * buffers to the chip, and one special ring the chip uses to report
5014 * status back to the host.
5016 * The special ring reports the status of received packets to the
5017 * host. The chip does not write into the original descriptor the
5018 * RX buffer was obtained from. The chip simply takes the original
5019 * descriptor as provided by the host, updates the status and length
5020 * field, then writes this into the next status ring entry.
5022 * Each ring the host uses to post buffers to the chip is described
5023 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5024 * it is first placed into the on-chip ram. When the packet's length
5025 * is known, it walks down the TG3_BDINFO entries to select the ring.
5026 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5027 * which is within the range of the new packet's length is chosen.
5029 * The "separate ring for rx status" scheme may sound queer, but it makes
5030 * sense from a cache coherency perspective. If only the host writes
5031 * to the buffer post rings, and only the chip writes to the rx status
5032 * rings, then cache lines never move beyond shared-modified state.
5033 * If both the host and chip were to write into the same ring, cache line
5034 * eviction could occur since both entities want it in an exclusive state.
5036 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5038 struct tg3 *tp = tnapi->tp;
5039 u32 work_mask, rx_std_posted = 0;
5040 u32 std_prod_idx, jmb_prod_idx;
5041 u32 sw_idx = tnapi->rx_rcb_ptr;
5042 u16 hw_idx;
5043 int received;
5044 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5046 hw_idx = *(tnapi->rx_rcb_prod_idx);
5048 * We need to order the read of hw_idx and the read of
5049 * the opaque cookie.
5051 rmb();
5052 work_mask = 0;
5053 received = 0;
5054 std_prod_idx = tpr->rx_std_prod_idx;
5055 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5056 while (sw_idx != hw_idx && budget > 0) {
5057 struct ring_info *ri;
5058 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5059 unsigned int len;
5060 struct sk_buff *skb;
5061 dma_addr_t dma_addr;
5062 u32 opaque_key, desc_idx, *post_ptr;
5064 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5065 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5066 if (opaque_key == RXD_OPAQUE_RING_STD) {
5067 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5068 dma_addr = dma_unmap_addr(ri, mapping);
5069 skb = ri->skb;
5070 post_ptr = &std_prod_idx;
5071 rx_std_posted++;
5072 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5073 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5074 dma_addr = dma_unmap_addr(ri, mapping);
5075 skb = ri->skb;
5076 post_ptr = &jmb_prod_idx;
5077 } else
5078 goto next_pkt_nopost;
5080 work_mask |= opaque_key;
5082 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5083 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5084 drop_it:
5085 tg3_recycle_rx(tnapi, tpr, opaque_key,
5086 desc_idx, *post_ptr);
5087 drop_it_no_recycle:
5088 /* Other statistics kept track of by card. */
5089 tp->rx_dropped++;
5090 goto next_pkt;
5093 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5094 ETH_FCS_LEN;
5096 if (len > TG3_RX_COPY_THRESH(tp)) {
5097 int skb_size;
5099 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5100 *post_ptr);
5101 if (skb_size < 0)
5102 goto drop_it;
5104 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5105 PCI_DMA_FROMDEVICE);
5107 /* Ensure that the update to the skb happens
5108 * after the usage of the old DMA mapping.
5110 smp_wmb();
5112 ri->skb = NULL;
5114 skb_put(skb, len);
5115 } else {
5116 struct sk_buff *copy_skb;
5118 tg3_recycle_rx(tnapi, tpr, opaque_key,
5119 desc_idx, *post_ptr);
5121 copy_skb = netdev_alloc_skb(tp->dev, len +
5122 TG3_RAW_IP_ALIGN);
5123 if (copy_skb == NULL)
5124 goto drop_it_no_recycle;
5126 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5127 skb_put(copy_skb, len);
5128 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5129 skb_copy_from_linear_data(skb, copy_skb->data, len);
5130 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5132 /* We'll reuse the original ring buffer. */
5133 skb = copy_skb;
5136 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5137 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5138 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5139 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5140 skb->ip_summed = CHECKSUM_UNNECESSARY;
5141 else
5142 skb_checksum_none_assert(skb);
5144 skb->protocol = eth_type_trans(skb, tp->dev);
5146 if (len > (tp->dev->mtu + ETH_HLEN) &&
5147 skb->protocol != htons(ETH_P_8021Q)) {
5148 dev_kfree_skb(skb);
5149 goto drop_it_no_recycle;
5152 if (desc->type_flags & RXD_FLAG_VLAN &&
5153 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5154 __vlan_hwaccel_put_tag(skb,
5155 desc->err_vlan & RXD_VLAN_MASK);
5157 napi_gro_receive(&tnapi->napi, skb);
5159 received++;
5160 budget--;
5162 next_pkt:
5163 (*post_ptr)++;
5165 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5166 tpr->rx_std_prod_idx = std_prod_idx &
5167 tp->rx_std_ring_mask;
5168 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5169 tpr->rx_std_prod_idx);
5170 work_mask &= ~RXD_OPAQUE_RING_STD;
5171 rx_std_posted = 0;
5173 next_pkt_nopost:
5174 sw_idx++;
5175 sw_idx &= tp->rx_ret_ring_mask;
5177 /* Refresh hw_idx to see if there is new work */
5178 if (sw_idx == hw_idx) {
5179 hw_idx = *(tnapi->rx_rcb_prod_idx);
5180 rmb();
5184 /* ACK the status ring. */
5185 tnapi->rx_rcb_ptr = sw_idx;
5186 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5188 /* Refill RX ring(s). */
5189 if (!tg3_flag(tp, ENABLE_RSS)) {
5190 if (work_mask & RXD_OPAQUE_RING_STD) {
5191 tpr->rx_std_prod_idx = std_prod_idx &
5192 tp->rx_std_ring_mask;
5193 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5194 tpr->rx_std_prod_idx);
5196 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5197 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5198 tp->rx_jmb_ring_mask;
5199 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5200 tpr->rx_jmb_prod_idx);
5202 mmiowb();
5203 } else if (work_mask) {
5204 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5205 * updated before the producer indices can be updated.
5207 smp_wmb();
5209 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5210 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5212 if (tnapi != &tp->napi[1])
5213 napi_schedule(&tp->napi[1].napi);
5216 return received;
5219 static void tg3_poll_link(struct tg3 *tp)
5221 /* handle link change and other phy events */
5222 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5223 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5225 if (sblk->status & SD_STATUS_LINK_CHG) {
5226 sblk->status = SD_STATUS_UPDATED |
5227 (sblk->status & ~SD_STATUS_LINK_CHG);
5228 spin_lock(&tp->lock);
5229 if (tg3_flag(tp, USE_PHYLIB)) {
5230 tw32_f(MAC_STATUS,
5231 (MAC_STATUS_SYNC_CHANGED |
5232 MAC_STATUS_CFG_CHANGED |
5233 MAC_STATUS_MI_COMPLETION |
5234 MAC_STATUS_LNKSTATE_CHANGED));
5235 udelay(40);
5236 } else
5237 tg3_setup_phy(tp, 0);
5238 spin_unlock(&tp->lock);
5243 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5244 struct tg3_rx_prodring_set *dpr,
5245 struct tg3_rx_prodring_set *spr)
5247 u32 si, di, cpycnt, src_prod_idx;
5248 int i, err = 0;
5250 while (1) {
5251 src_prod_idx = spr->rx_std_prod_idx;
5253 /* Make sure updates to the rx_std_buffers[] entries and the
5254 * standard producer index are seen in the correct order.
5256 smp_rmb();
5258 if (spr->rx_std_cons_idx == src_prod_idx)
5259 break;
5261 if (spr->rx_std_cons_idx < src_prod_idx)
5262 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5263 else
5264 cpycnt = tp->rx_std_ring_mask + 1 -
5265 spr->rx_std_cons_idx;
5267 cpycnt = min(cpycnt,
5268 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5270 si = spr->rx_std_cons_idx;
5271 di = dpr->rx_std_prod_idx;
5273 for (i = di; i < di + cpycnt; i++) {
5274 if (dpr->rx_std_buffers[i].skb) {
5275 cpycnt = i - di;
5276 err = -ENOSPC;
5277 break;
5281 if (!cpycnt)
5282 break;
5284 /* Ensure that updates to the rx_std_buffers ring and the
5285 * shadowed hardware producer ring from tg3_recycle_skb() are
5286 * ordered correctly WRT the skb check above.
5288 smp_rmb();
5290 memcpy(&dpr->rx_std_buffers[di],
5291 &spr->rx_std_buffers[si],
5292 cpycnt * sizeof(struct ring_info));
5294 for (i = 0; i < cpycnt; i++, di++, si++) {
5295 struct tg3_rx_buffer_desc *sbd, *dbd;
5296 sbd = &spr->rx_std[si];
5297 dbd = &dpr->rx_std[di];
5298 dbd->addr_hi = sbd->addr_hi;
5299 dbd->addr_lo = sbd->addr_lo;
5302 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5303 tp->rx_std_ring_mask;
5304 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5305 tp->rx_std_ring_mask;
5308 while (1) {
5309 src_prod_idx = spr->rx_jmb_prod_idx;
5311 /* Make sure updates to the rx_jmb_buffers[] entries and
5312 * the jumbo producer index are seen in the correct order.
5314 smp_rmb();
5316 if (spr->rx_jmb_cons_idx == src_prod_idx)
5317 break;
5319 if (spr->rx_jmb_cons_idx < src_prod_idx)
5320 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5321 else
5322 cpycnt = tp->rx_jmb_ring_mask + 1 -
5323 spr->rx_jmb_cons_idx;
5325 cpycnt = min(cpycnt,
5326 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5328 si = spr->rx_jmb_cons_idx;
5329 di = dpr->rx_jmb_prod_idx;
5331 for (i = di; i < di + cpycnt; i++) {
5332 if (dpr->rx_jmb_buffers[i].skb) {
5333 cpycnt = i - di;
5334 err = -ENOSPC;
5335 break;
5339 if (!cpycnt)
5340 break;
5342 /* Ensure that updates to the rx_jmb_buffers ring and the
5343 * shadowed hardware producer ring from tg3_recycle_skb() are
5344 * ordered correctly WRT the skb check above.
5346 smp_rmb();
5348 memcpy(&dpr->rx_jmb_buffers[di],
5349 &spr->rx_jmb_buffers[si],
5350 cpycnt * sizeof(struct ring_info));
5352 for (i = 0; i < cpycnt; i++, di++, si++) {
5353 struct tg3_rx_buffer_desc *sbd, *dbd;
5354 sbd = &spr->rx_jmb[si].std;
5355 dbd = &dpr->rx_jmb[di].std;
5356 dbd->addr_hi = sbd->addr_hi;
5357 dbd->addr_lo = sbd->addr_lo;
5360 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5361 tp->rx_jmb_ring_mask;
5362 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5363 tp->rx_jmb_ring_mask;
5366 return err;
5369 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5371 struct tg3 *tp = tnapi->tp;
5373 /* run TX completion thread */
5374 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5375 tg3_tx(tnapi);
5376 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5377 return work_done;
5380 /* run RX thread, within the bounds set by NAPI.
5381 * All RX "locking" is done by ensuring outside
5382 * code synchronizes with tg3->napi.poll()
5384 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5385 work_done += tg3_rx(tnapi, budget - work_done);
5387 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5388 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5389 int i, err = 0;
5390 u32 std_prod_idx = dpr->rx_std_prod_idx;
5391 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5393 for (i = 1; i < tp->irq_cnt; i++)
5394 err |= tg3_rx_prodring_xfer(tp, dpr,
5395 &tp->napi[i].prodring);
5397 wmb();
5399 if (std_prod_idx != dpr->rx_std_prod_idx)
5400 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5401 dpr->rx_std_prod_idx);
5403 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5404 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5405 dpr->rx_jmb_prod_idx);
5407 mmiowb();
5409 if (err)
5410 tw32_f(HOSTCC_MODE, tp->coal_now);
5413 return work_done;
5416 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5418 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5419 struct tg3 *tp = tnapi->tp;
5420 int work_done = 0;
5421 struct tg3_hw_status *sblk = tnapi->hw_status;
5423 while (1) {
5424 work_done = tg3_poll_work(tnapi, work_done, budget);
5426 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5427 goto tx_recovery;
5429 if (unlikely(work_done >= budget))
5430 break;
5432 /* tp->last_tag is used in tg3_int_reenable() below
5433 * to tell the hw how much work has been processed,
5434 * so we must read it before checking for more work.
5436 tnapi->last_tag = sblk->status_tag;
5437 tnapi->last_irq_tag = tnapi->last_tag;
5438 rmb();
5440 /* check for RX/TX work to do */
5441 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5442 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5443 napi_complete(napi);
5444 /* Reenable interrupts. */
5445 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5446 mmiowb();
5447 break;
5451 return work_done;
5453 tx_recovery:
5454 /* work_done is guaranteed to be less than budget. */
5455 napi_complete(napi);
5456 schedule_work(&tp->reset_task);
5457 return work_done;
5460 static void tg3_process_error(struct tg3 *tp)
5462 u32 val;
5463 bool real_error = false;
5465 if (tg3_flag(tp, ERROR_PROCESSED))
5466 return;
5468 /* Check Flow Attention register */
5469 val = tr32(HOSTCC_FLOW_ATTN);
5470 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5471 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5472 real_error = true;
5475 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5476 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5477 real_error = true;
5480 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5481 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5482 real_error = true;
5485 if (!real_error)
5486 return;
5488 tg3_dump_state(tp);
5490 tg3_flag_set(tp, ERROR_PROCESSED);
5491 schedule_work(&tp->reset_task);
5494 static int tg3_poll(struct napi_struct *napi, int budget)
5496 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5497 struct tg3 *tp = tnapi->tp;
5498 int work_done = 0;
5499 struct tg3_hw_status *sblk = tnapi->hw_status;
5501 while (1) {
5502 if (sblk->status & SD_STATUS_ERROR)
5503 tg3_process_error(tp);
5505 tg3_poll_link(tp);
5507 work_done = tg3_poll_work(tnapi, work_done, budget);
5509 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5510 goto tx_recovery;
5512 if (unlikely(work_done >= budget))
5513 break;
5515 if (tg3_flag(tp, TAGGED_STATUS)) {
5516 /* tp->last_tag is used in tg3_int_reenable() below
5517 * to tell the hw how much work has been processed,
5518 * so we must read it before checking for more work.
5520 tnapi->last_tag = sblk->status_tag;
5521 tnapi->last_irq_tag = tnapi->last_tag;
5522 rmb();
5523 } else
5524 sblk->status &= ~SD_STATUS_UPDATED;
5526 if (likely(!tg3_has_work(tnapi))) {
5527 napi_complete(napi);
5528 tg3_int_reenable(tnapi);
5529 break;
5533 return work_done;
5535 tx_recovery:
5536 /* work_done is guaranteed to be less than budget. */
5537 napi_complete(napi);
5538 schedule_work(&tp->reset_task);
5539 return work_done;
5542 static void tg3_napi_disable(struct tg3 *tp)
5544 int i;
5546 for (i = tp->irq_cnt - 1; i >= 0; i--)
5547 napi_disable(&tp->napi[i].napi);
5550 static void tg3_napi_enable(struct tg3 *tp)
5552 int i;
5554 for (i = 0; i < tp->irq_cnt; i++)
5555 napi_enable(&tp->napi[i].napi);
5558 static void tg3_napi_init(struct tg3 *tp)
5560 int i;
5562 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5563 for (i = 1; i < tp->irq_cnt; i++)
5564 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5567 static void tg3_napi_fini(struct tg3 *tp)
5569 int i;
5571 for (i = 0; i < tp->irq_cnt; i++)
5572 netif_napi_del(&tp->napi[i].napi);
5575 static inline void tg3_netif_stop(struct tg3 *tp)
5577 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5578 tg3_napi_disable(tp);
5579 netif_tx_disable(tp->dev);
5582 static inline void tg3_netif_start(struct tg3 *tp)
5584 /* NOTE: unconditional netif_tx_wake_all_queues is only
5585 * appropriate so long as all callers are assured to
5586 * have free tx slots (such as after tg3_init_hw)
5588 netif_tx_wake_all_queues(tp->dev);
5590 tg3_napi_enable(tp);
5591 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5592 tg3_enable_ints(tp);
5595 static void tg3_irq_quiesce(struct tg3 *tp)
5597 int i;
5599 BUG_ON(tp->irq_sync);
5601 tp->irq_sync = 1;
5602 smp_mb();
5604 for (i = 0; i < tp->irq_cnt; i++)
5605 synchronize_irq(tp->napi[i].irq_vec);
5608 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5609 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5610 * with as well. Most of the time, this is not necessary except when
5611 * shutting down the device.
5613 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5615 spin_lock_bh(&tp->lock);
5616 if (irq_sync)
5617 tg3_irq_quiesce(tp);
5620 static inline void tg3_full_unlock(struct tg3 *tp)
5622 spin_unlock_bh(&tp->lock);
5625 /* One-shot MSI handler - Chip automatically disables interrupt
5626 * after sending MSI so driver doesn't have to do it.
5628 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5630 struct tg3_napi *tnapi = dev_id;
5631 struct tg3 *tp = tnapi->tp;
5633 prefetch(tnapi->hw_status);
5634 if (tnapi->rx_rcb)
5635 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5637 if (likely(!tg3_irq_sync(tp)))
5638 napi_schedule(&tnapi->napi);
5640 return IRQ_HANDLED;
5643 /* MSI ISR - No need to check for interrupt sharing and no need to
5644 * flush status block and interrupt mailbox. PCI ordering rules
5645 * guarantee that MSI will arrive after the status block.
5647 static irqreturn_t tg3_msi(int irq, void *dev_id)
5649 struct tg3_napi *tnapi = dev_id;
5650 struct tg3 *tp = tnapi->tp;
5652 prefetch(tnapi->hw_status);
5653 if (tnapi->rx_rcb)
5654 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5656 * Writing any value to intr-mbox-0 clears PCI INTA# and
5657 * chip-internal interrupt pending events.
5658 * Writing non-zero to intr-mbox-0 additional tells the
5659 * NIC to stop sending us irqs, engaging "in-intr-handler"
5660 * event coalescing.
5662 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5663 if (likely(!tg3_irq_sync(tp)))
5664 napi_schedule(&tnapi->napi);
5666 return IRQ_RETVAL(1);
5669 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5671 struct tg3_napi *tnapi = dev_id;
5672 struct tg3 *tp = tnapi->tp;
5673 struct tg3_hw_status *sblk = tnapi->hw_status;
5674 unsigned int handled = 1;
5676 /* In INTx mode, it is possible for the interrupt to arrive at
5677 * the CPU before the status block posted prior to the interrupt.
5678 * Reading the PCI State register will confirm whether the
5679 * interrupt is ours and will flush the status block.
5681 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5682 if (tg3_flag(tp, CHIP_RESETTING) ||
5683 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5684 handled = 0;
5685 goto out;
5690 * Writing any value to intr-mbox-0 clears PCI INTA# and
5691 * chip-internal interrupt pending events.
5692 * Writing non-zero to intr-mbox-0 additional tells the
5693 * NIC to stop sending us irqs, engaging "in-intr-handler"
5694 * event coalescing.
5696 * Flush the mailbox to de-assert the IRQ immediately to prevent
5697 * spurious interrupts. The flush impacts performance but
5698 * excessive spurious interrupts can be worse in some cases.
5700 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5701 if (tg3_irq_sync(tp))
5702 goto out;
5703 sblk->status &= ~SD_STATUS_UPDATED;
5704 if (likely(tg3_has_work(tnapi))) {
5705 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5706 napi_schedule(&tnapi->napi);
5707 } else {
5708 /* No work, shared interrupt perhaps? re-enable
5709 * interrupts, and flush that PCI write
5711 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5712 0x00000000);
5714 out:
5715 return IRQ_RETVAL(handled);
5718 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5720 struct tg3_napi *tnapi = dev_id;
5721 struct tg3 *tp = tnapi->tp;
5722 struct tg3_hw_status *sblk = tnapi->hw_status;
5723 unsigned int handled = 1;
5725 /* In INTx mode, it is possible for the interrupt to arrive at
5726 * the CPU before the status block posted prior to the interrupt.
5727 * Reading the PCI State register will confirm whether the
5728 * interrupt is ours and will flush the status block.
5730 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5731 if (tg3_flag(tp, CHIP_RESETTING) ||
5732 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5733 handled = 0;
5734 goto out;
5739 * writing any value to intr-mbox-0 clears PCI INTA# and
5740 * chip-internal interrupt pending events.
5741 * writing non-zero to intr-mbox-0 additional tells the
5742 * NIC to stop sending us irqs, engaging "in-intr-handler"
5743 * event coalescing.
5745 * Flush the mailbox to de-assert the IRQ immediately to prevent
5746 * spurious interrupts. The flush impacts performance but
5747 * excessive spurious interrupts can be worse in some cases.
5749 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5752 * In a shared interrupt configuration, sometimes other devices'
5753 * interrupts will scream. We record the current status tag here
5754 * so that the above check can report that the screaming interrupts
5755 * are unhandled. Eventually they will be silenced.
5757 tnapi->last_irq_tag = sblk->status_tag;
5759 if (tg3_irq_sync(tp))
5760 goto out;
5762 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5764 napi_schedule(&tnapi->napi);
5766 out:
5767 return IRQ_RETVAL(handled);
5770 /* ISR for interrupt test */
5771 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5773 struct tg3_napi *tnapi = dev_id;
5774 struct tg3 *tp = tnapi->tp;
5775 struct tg3_hw_status *sblk = tnapi->hw_status;
5777 if ((sblk->status & SD_STATUS_UPDATED) ||
5778 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5779 tg3_disable_ints(tp);
5780 return IRQ_RETVAL(1);
5782 return IRQ_RETVAL(0);
5785 static int tg3_init_hw(struct tg3 *, int);
5786 static int tg3_halt(struct tg3 *, int, int);
5788 /* Restart hardware after configuration changes, self-test, etc.
5789 * Invoked with tp->lock held.
5791 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5792 __releases(tp->lock)
5793 __acquires(tp->lock)
5795 int err;
5797 err = tg3_init_hw(tp, reset_phy);
5798 if (err) {
5799 netdev_err(tp->dev,
5800 "Failed to re-initialize device, aborting\n");
5801 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5802 tg3_full_unlock(tp);
5803 del_timer_sync(&tp->timer);
5804 tp->irq_sync = 0;
5805 tg3_napi_enable(tp);
5806 dev_close(tp->dev);
5807 tg3_full_lock(tp, 0);
5809 return err;
5812 #ifdef CONFIG_NET_POLL_CONTROLLER
5813 static void tg3_poll_controller(struct net_device *dev)
5815 int i;
5816 struct tg3 *tp = netdev_priv(dev);
5818 for (i = 0; i < tp->irq_cnt; i++)
5819 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5821 #endif
5823 static void tg3_reset_task(struct work_struct *work)
5825 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5826 int err;
5827 unsigned int restart_timer;
5829 tg3_full_lock(tp, 0);
5831 if (!netif_running(tp->dev)) {
5832 tg3_full_unlock(tp);
5833 return;
5836 tg3_full_unlock(tp);
5838 tg3_phy_stop(tp);
5840 tg3_netif_stop(tp);
5842 tg3_full_lock(tp, 1);
5844 restart_timer = tg3_flag(tp, RESTART_TIMER);
5845 tg3_flag_clear(tp, RESTART_TIMER);
5847 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5848 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5849 tp->write32_rx_mbox = tg3_write_flush_reg32;
5850 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5851 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5854 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5855 err = tg3_init_hw(tp, 1);
5856 if (err)
5857 goto out;
5859 tg3_netif_start(tp);
5861 if (restart_timer)
5862 mod_timer(&tp->timer, jiffies + 1);
5864 out:
5865 tg3_full_unlock(tp);
5867 if (!err)
5868 tg3_phy_start(tp);
5871 static void tg3_tx_timeout(struct net_device *dev)
5873 struct tg3 *tp = netdev_priv(dev);
5875 if (netif_msg_tx_err(tp)) {
5876 netdev_err(dev, "transmit timed out, resetting\n");
5877 tg3_dump_state(tp);
5880 schedule_work(&tp->reset_task);
5883 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5884 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5886 u32 base = (u32) mapping & 0xffffffff;
5888 return (base > 0xffffdcc0) && (base + len + 8 < base);
5891 /* Test for DMA addresses > 40-bit */
5892 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5893 int len)
5895 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5896 if (tg3_flag(tp, 40BIT_DMA_BUG))
5897 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5898 return 0;
5899 #else
5900 return 0;
5901 #endif
5904 static inline void tg3_tx_set_bd(struct tg3_napi *tnapi, u32 entry,
5905 dma_addr_t mapping, u32 len, u32 flags,
5906 u32 mss, u32 vlan)
5908 struct tg3_tx_buffer_desc *txbd = &tnapi->tx_ring[entry];
5910 txbd->addr_hi = ((u64) mapping >> 32);
5911 txbd->addr_lo = ((u64) mapping & 0xffffffff);
5912 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
5913 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
5916 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
5918 int i;
5919 struct sk_buff *skb;
5920 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
5922 skb = txb->skb;
5923 txb->skb = NULL;
5925 pci_unmap_single(tnapi->tp->pdev,
5926 dma_unmap_addr(txb, mapping),
5927 skb_headlen(skb),
5928 PCI_DMA_TODEVICE);
5929 for (i = 0; i < last; i++) {
5930 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5932 entry = NEXT_TX(entry);
5933 txb = &tnapi->tx_buffers[entry];
5935 pci_unmap_page(tnapi->tp->pdev,
5936 dma_unmap_addr(txb, mapping),
5937 frag->size, PCI_DMA_TODEVICE);
5941 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5942 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5943 struct sk_buff *skb,
5944 u32 base_flags, u32 mss, u32 vlan)
5946 struct tg3 *tp = tnapi->tp;
5947 struct sk_buff *new_skb;
5948 dma_addr_t new_addr = 0;
5949 u32 entry = tnapi->tx_prod;
5950 int ret = 0;
5952 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5953 new_skb = skb_copy(skb, GFP_ATOMIC);
5954 else {
5955 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5957 new_skb = skb_copy_expand(skb,
5958 skb_headroom(skb) + more_headroom,
5959 skb_tailroom(skb), GFP_ATOMIC);
5962 if (!new_skb) {
5963 ret = -1;
5964 } else {
5965 /* New SKB is guaranteed to be linear. */
5966 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5967 PCI_DMA_TODEVICE);
5968 /* Make sure the mapping succeeded */
5969 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5970 ret = -1;
5971 dev_kfree_skb(new_skb);
5973 /* Make sure new skb does not cross any 4G boundaries.
5974 * Drop the packet if it does.
5976 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5977 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5978 PCI_DMA_TODEVICE);
5979 ret = -1;
5980 dev_kfree_skb(new_skb);
5981 } else {
5982 base_flags |= TXD_FLAG_END;
5984 tnapi->tx_buffers[entry].skb = new_skb;
5985 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5986 mapping, new_addr);
5988 tg3_tx_set_bd(tnapi, entry, new_addr, new_skb->len,
5989 base_flags, mss, vlan);
5993 dev_kfree_skb(skb);
5995 return ret;
5998 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6000 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6001 * TSO header is greater than 80 bytes.
6003 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6005 struct sk_buff *segs, *nskb;
6006 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6008 /* Estimate the number of fragments in the worst case */
6009 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6010 netif_stop_queue(tp->dev);
6012 /* netif_tx_stop_queue() must be done before checking
6013 * checking tx index in tg3_tx_avail() below, because in
6014 * tg3_tx(), we update tx index before checking for
6015 * netif_tx_queue_stopped().
6017 smp_mb();
6018 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6019 return NETDEV_TX_BUSY;
6021 netif_wake_queue(tp->dev);
6024 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6025 if (IS_ERR(segs))
6026 goto tg3_tso_bug_end;
6028 do {
6029 nskb = segs;
6030 segs = segs->next;
6031 nskb->next = NULL;
6032 tg3_start_xmit(nskb, tp->dev);
6033 } while (segs);
6035 tg3_tso_bug_end:
6036 dev_kfree_skb(skb);
6038 return NETDEV_TX_OK;
6041 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6042 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6044 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6046 struct tg3 *tp = netdev_priv(dev);
6047 u32 len, entry, base_flags, mss, vlan = 0;
6048 int i = -1, would_hit_hwbug;
6049 dma_addr_t mapping;
6050 struct tg3_napi *tnapi;
6051 struct netdev_queue *txq;
6052 unsigned int last;
6054 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6055 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6056 if (tg3_flag(tp, ENABLE_TSS))
6057 tnapi++;
6059 /* We are running in BH disabled context with netif_tx_lock
6060 * and TX reclaim runs via tp->napi.poll inside of a software
6061 * interrupt. Furthermore, IRQ processing runs lockless so we have
6062 * no IRQ context deadlocks to worry about either. Rejoice!
6064 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6065 if (!netif_tx_queue_stopped(txq)) {
6066 netif_tx_stop_queue(txq);
6068 /* This is a hard error, log it. */
6069 netdev_err(dev,
6070 "BUG! Tx Ring full when queue awake!\n");
6072 return NETDEV_TX_BUSY;
6075 entry = tnapi->tx_prod;
6076 base_flags = 0;
6077 if (skb->ip_summed == CHECKSUM_PARTIAL)
6078 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6080 mss = skb_shinfo(skb)->gso_size;
6081 if (mss) {
6082 struct iphdr *iph;
6083 u32 tcp_opt_len, hdr_len;
6085 if (skb_header_cloned(skb) &&
6086 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6087 dev_kfree_skb(skb);
6088 goto out_unlock;
6091 iph = ip_hdr(skb);
6092 tcp_opt_len = tcp_optlen(skb);
6094 if (skb_is_gso_v6(skb)) {
6095 hdr_len = skb_headlen(skb) - ETH_HLEN;
6096 } else {
6097 u32 ip_tcp_len;
6099 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6100 hdr_len = ip_tcp_len + tcp_opt_len;
6102 iph->check = 0;
6103 iph->tot_len = htons(mss + hdr_len);
6106 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6107 tg3_flag(tp, TSO_BUG))
6108 return tg3_tso_bug(tp, skb);
6110 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6111 TXD_FLAG_CPU_POST_DMA);
6113 if (tg3_flag(tp, HW_TSO_1) ||
6114 tg3_flag(tp, HW_TSO_2) ||
6115 tg3_flag(tp, HW_TSO_3)) {
6116 tcp_hdr(skb)->check = 0;
6117 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6118 } else
6119 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6120 iph->daddr, 0,
6121 IPPROTO_TCP,
6124 if (tg3_flag(tp, HW_TSO_3)) {
6125 mss |= (hdr_len & 0xc) << 12;
6126 if (hdr_len & 0x10)
6127 base_flags |= 0x00000010;
6128 base_flags |= (hdr_len & 0x3e0) << 5;
6129 } else if (tg3_flag(tp, HW_TSO_2))
6130 mss |= hdr_len << 9;
6131 else if (tg3_flag(tp, HW_TSO_1) ||
6132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6133 if (tcp_opt_len || iph->ihl > 5) {
6134 int tsflags;
6136 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6137 mss |= (tsflags << 11);
6139 } else {
6140 if (tcp_opt_len || iph->ihl > 5) {
6141 int tsflags;
6143 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6144 base_flags |= tsflags << 12;
6149 #ifdef BCM_KERNEL_SUPPORTS_8021Q
6150 if (vlan_tx_tag_present(skb)) {
6151 base_flags |= TXD_FLAG_VLAN;
6152 vlan = vlan_tx_tag_get(skb);
6154 #endif
6156 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6157 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6158 base_flags |= TXD_FLAG_JMB_PKT;
6160 len = skb_headlen(skb);
6162 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6163 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6164 dev_kfree_skb(skb);
6165 goto out_unlock;
6168 tnapi->tx_buffers[entry].skb = skb;
6169 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6171 would_hit_hwbug = 0;
6173 if (tg3_4g_overflow_test(mapping, len))
6174 would_hit_hwbug = 1;
6176 if (tg3_40bit_overflow_test(tp, mapping, len))
6177 would_hit_hwbug = 1;
6179 if (tg3_flag(tp, 5701_DMA_BUG))
6180 would_hit_hwbug = 1;
6182 tg3_tx_set_bd(tnapi, entry, mapping, len, base_flags |
6183 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6184 mss, vlan);
6186 entry = NEXT_TX(entry);
6188 /* Now loop through additional data fragments, and queue them. */
6189 if (skb_shinfo(skb)->nr_frags > 0) {
6190 u32 tmp_mss = mss;
6192 if (!tg3_flag(tp, HW_TSO_1) &&
6193 !tg3_flag(tp, HW_TSO_2) &&
6194 !tg3_flag(tp, HW_TSO_3))
6195 tmp_mss = 0;
6197 last = skb_shinfo(skb)->nr_frags - 1;
6198 for (i = 0; i <= last; i++) {
6199 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6201 len = frag->size;
6202 mapping = pci_map_page(tp->pdev,
6203 frag->page,
6204 frag->page_offset,
6205 len, PCI_DMA_TODEVICE);
6207 tnapi->tx_buffers[entry].skb = NULL;
6208 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6209 mapping);
6210 if (pci_dma_mapping_error(tp->pdev, mapping))
6211 goto dma_error;
6213 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6214 len <= 8)
6215 would_hit_hwbug = 1;
6217 if (tg3_4g_overflow_test(mapping, len))
6218 would_hit_hwbug = 1;
6220 if (tg3_40bit_overflow_test(tp, mapping, len))
6221 would_hit_hwbug = 1;
6223 tg3_tx_set_bd(tnapi, entry, mapping, len, base_flags |
6224 ((i == last) ? TXD_FLAG_END : 0),
6225 tmp_mss, vlan);
6227 entry = NEXT_TX(entry);
6231 if (would_hit_hwbug) {
6232 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6234 /* If the workaround fails due to memory/mapping
6235 * failure, silently drop this packet.
6237 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags,
6238 mss, vlan))
6239 goto out_unlock;
6241 entry = NEXT_TX(tnapi->tx_prod);
6244 skb_tx_timestamp(skb);
6246 /* Packets are ready, update Tx producer idx local and on card. */
6247 tw32_tx_mbox(tnapi->prodmbox, entry);
6249 tnapi->tx_prod = entry;
6250 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6251 netif_tx_stop_queue(txq);
6253 /* netif_tx_stop_queue() must be done before checking
6254 * checking tx index in tg3_tx_avail() below, because in
6255 * tg3_tx(), we update tx index before checking for
6256 * netif_tx_queue_stopped().
6258 smp_mb();
6259 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6260 netif_tx_wake_queue(txq);
6263 out_unlock:
6264 mmiowb();
6266 return NETDEV_TX_OK;
6268 dma_error:
6269 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6270 dev_kfree_skb(skb);
6271 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6272 return NETDEV_TX_OK;
6275 static void tg3_set_loopback(struct net_device *dev, u32 features)
6277 struct tg3 *tp = netdev_priv(dev);
6279 if (features & NETIF_F_LOOPBACK) {
6280 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6281 return;
6284 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6285 * loopback mode if Half-Duplex mode was negotiated earlier.
6287 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6289 /* Enable internal MAC loopback mode */
6290 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6291 spin_lock_bh(&tp->lock);
6292 tw32(MAC_MODE, tp->mac_mode);
6293 netif_carrier_on(tp->dev);
6294 spin_unlock_bh(&tp->lock);
6295 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6296 } else {
6297 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6298 return;
6300 /* Disable internal MAC loopback mode */
6301 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6302 spin_lock_bh(&tp->lock);
6303 tw32(MAC_MODE, tp->mac_mode);
6304 /* Force link status check */
6305 tg3_setup_phy(tp, 1);
6306 spin_unlock_bh(&tp->lock);
6307 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6311 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6313 struct tg3 *tp = netdev_priv(dev);
6315 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6316 features &= ~NETIF_F_ALL_TSO;
6318 return features;
6321 static int tg3_set_features(struct net_device *dev, u32 features)
6323 u32 changed = dev->features ^ features;
6325 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6326 tg3_set_loopback(dev, features);
6328 return 0;
6331 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6332 int new_mtu)
6334 dev->mtu = new_mtu;
6336 if (new_mtu > ETH_DATA_LEN) {
6337 if (tg3_flag(tp, 5780_CLASS)) {
6338 netdev_update_features(dev);
6339 tg3_flag_clear(tp, TSO_CAPABLE);
6340 } else {
6341 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6343 } else {
6344 if (tg3_flag(tp, 5780_CLASS)) {
6345 tg3_flag_set(tp, TSO_CAPABLE);
6346 netdev_update_features(dev);
6348 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6352 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6354 struct tg3 *tp = netdev_priv(dev);
6355 int err;
6357 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6358 return -EINVAL;
6360 if (!netif_running(dev)) {
6361 /* We'll just catch it later when the
6362 * device is up'd.
6364 tg3_set_mtu(dev, tp, new_mtu);
6365 return 0;
6368 tg3_phy_stop(tp);
6370 tg3_netif_stop(tp);
6372 tg3_full_lock(tp, 1);
6374 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6376 tg3_set_mtu(dev, tp, new_mtu);
6378 err = tg3_restart_hw(tp, 0);
6380 if (!err)
6381 tg3_netif_start(tp);
6383 tg3_full_unlock(tp);
6385 if (!err)
6386 tg3_phy_start(tp);
6388 return err;
6391 static void tg3_rx_prodring_free(struct tg3 *tp,
6392 struct tg3_rx_prodring_set *tpr)
6394 int i;
6396 if (tpr != &tp->napi[0].prodring) {
6397 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6398 i = (i + 1) & tp->rx_std_ring_mask)
6399 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6400 tp->rx_pkt_map_sz);
6402 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6403 for (i = tpr->rx_jmb_cons_idx;
6404 i != tpr->rx_jmb_prod_idx;
6405 i = (i + 1) & tp->rx_jmb_ring_mask) {
6406 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6407 TG3_RX_JMB_MAP_SZ);
6411 return;
6414 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6415 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6416 tp->rx_pkt_map_sz);
6418 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6419 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6420 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6421 TG3_RX_JMB_MAP_SZ);
6425 /* Initialize rx rings for packet processing.
6427 * The chip has been shut down and the driver detached from
6428 * the networking, so no interrupts or new tx packets will
6429 * end up in the driver. tp->{tx,}lock are held and thus
6430 * we may not sleep.
6432 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6433 struct tg3_rx_prodring_set *tpr)
6435 u32 i, rx_pkt_dma_sz;
6437 tpr->rx_std_cons_idx = 0;
6438 tpr->rx_std_prod_idx = 0;
6439 tpr->rx_jmb_cons_idx = 0;
6440 tpr->rx_jmb_prod_idx = 0;
6442 if (tpr != &tp->napi[0].prodring) {
6443 memset(&tpr->rx_std_buffers[0], 0,
6444 TG3_RX_STD_BUFF_RING_SIZE(tp));
6445 if (tpr->rx_jmb_buffers)
6446 memset(&tpr->rx_jmb_buffers[0], 0,
6447 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6448 goto done;
6451 /* Zero out all descriptors. */
6452 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6454 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6455 if (tg3_flag(tp, 5780_CLASS) &&
6456 tp->dev->mtu > ETH_DATA_LEN)
6457 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6458 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6460 /* Initialize invariants of the rings, we only set this
6461 * stuff once. This works because the card does not
6462 * write into the rx buffer posting rings.
6464 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6465 struct tg3_rx_buffer_desc *rxd;
6467 rxd = &tpr->rx_std[i];
6468 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6469 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6470 rxd->opaque = (RXD_OPAQUE_RING_STD |
6471 (i << RXD_OPAQUE_INDEX_SHIFT));
6474 /* Now allocate fresh SKBs for each rx ring. */
6475 for (i = 0; i < tp->rx_pending; i++) {
6476 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6477 netdev_warn(tp->dev,
6478 "Using a smaller RX standard ring. Only "
6479 "%d out of %d buffers were allocated "
6480 "successfully\n", i, tp->rx_pending);
6481 if (i == 0)
6482 goto initfail;
6483 tp->rx_pending = i;
6484 break;
6488 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6489 goto done;
6491 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6493 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6494 goto done;
6496 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6497 struct tg3_rx_buffer_desc *rxd;
6499 rxd = &tpr->rx_jmb[i].std;
6500 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6501 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6502 RXD_FLAG_JUMBO;
6503 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6504 (i << RXD_OPAQUE_INDEX_SHIFT));
6507 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6508 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6509 netdev_warn(tp->dev,
6510 "Using a smaller RX jumbo ring. Only %d "
6511 "out of %d buffers were allocated "
6512 "successfully\n", i, tp->rx_jumbo_pending);
6513 if (i == 0)
6514 goto initfail;
6515 tp->rx_jumbo_pending = i;
6516 break;
6520 done:
6521 return 0;
6523 initfail:
6524 tg3_rx_prodring_free(tp, tpr);
6525 return -ENOMEM;
6528 static void tg3_rx_prodring_fini(struct tg3 *tp,
6529 struct tg3_rx_prodring_set *tpr)
6531 kfree(tpr->rx_std_buffers);
6532 tpr->rx_std_buffers = NULL;
6533 kfree(tpr->rx_jmb_buffers);
6534 tpr->rx_jmb_buffers = NULL;
6535 if (tpr->rx_std) {
6536 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6537 tpr->rx_std, tpr->rx_std_mapping);
6538 tpr->rx_std = NULL;
6540 if (tpr->rx_jmb) {
6541 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6542 tpr->rx_jmb, tpr->rx_jmb_mapping);
6543 tpr->rx_jmb = NULL;
6547 static int tg3_rx_prodring_init(struct tg3 *tp,
6548 struct tg3_rx_prodring_set *tpr)
6550 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6551 GFP_KERNEL);
6552 if (!tpr->rx_std_buffers)
6553 return -ENOMEM;
6555 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6556 TG3_RX_STD_RING_BYTES(tp),
6557 &tpr->rx_std_mapping,
6558 GFP_KERNEL);
6559 if (!tpr->rx_std)
6560 goto err_out;
6562 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6563 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6564 GFP_KERNEL);
6565 if (!tpr->rx_jmb_buffers)
6566 goto err_out;
6568 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6569 TG3_RX_JMB_RING_BYTES(tp),
6570 &tpr->rx_jmb_mapping,
6571 GFP_KERNEL);
6572 if (!tpr->rx_jmb)
6573 goto err_out;
6576 return 0;
6578 err_out:
6579 tg3_rx_prodring_fini(tp, tpr);
6580 return -ENOMEM;
6583 /* Free up pending packets in all rx/tx rings.
6585 * The chip has been shut down and the driver detached from
6586 * the networking, so no interrupts or new tx packets will
6587 * end up in the driver. tp->{tx,}lock is not held and we are not
6588 * in an interrupt context and thus may sleep.
6590 static void tg3_free_rings(struct tg3 *tp)
6592 int i, j;
6594 for (j = 0; j < tp->irq_cnt; j++) {
6595 struct tg3_napi *tnapi = &tp->napi[j];
6597 tg3_rx_prodring_free(tp, &tnapi->prodring);
6599 if (!tnapi->tx_buffers)
6600 continue;
6602 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
6603 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
6605 if (!skb)
6606 continue;
6608 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
6610 dev_kfree_skb_any(skb);
6615 /* Initialize tx/rx rings for packet processing.
6617 * The chip has been shut down and the driver detached from
6618 * the networking, so no interrupts or new tx packets will
6619 * end up in the driver. tp->{tx,}lock are held and thus
6620 * we may not sleep.
6622 static int tg3_init_rings(struct tg3 *tp)
6624 int i;
6626 /* Free up all the SKBs. */
6627 tg3_free_rings(tp);
6629 for (i = 0; i < tp->irq_cnt; i++) {
6630 struct tg3_napi *tnapi = &tp->napi[i];
6632 tnapi->last_tag = 0;
6633 tnapi->last_irq_tag = 0;
6634 tnapi->hw_status->status = 0;
6635 tnapi->hw_status->status_tag = 0;
6636 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6638 tnapi->tx_prod = 0;
6639 tnapi->tx_cons = 0;
6640 if (tnapi->tx_ring)
6641 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6643 tnapi->rx_rcb_ptr = 0;
6644 if (tnapi->rx_rcb)
6645 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6647 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6648 tg3_free_rings(tp);
6649 return -ENOMEM;
6653 return 0;
6657 * Must not be invoked with interrupt sources disabled and
6658 * the hardware shutdown down.
6660 static void tg3_free_consistent(struct tg3 *tp)
6662 int i;
6664 for (i = 0; i < tp->irq_cnt; i++) {
6665 struct tg3_napi *tnapi = &tp->napi[i];
6667 if (tnapi->tx_ring) {
6668 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6669 tnapi->tx_ring, tnapi->tx_desc_mapping);
6670 tnapi->tx_ring = NULL;
6673 kfree(tnapi->tx_buffers);
6674 tnapi->tx_buffers = NULL;
6676 if (tnapi->rx_rcb) {
6677 dma_free_coherent(&tp->pdev->dev,
6678 TG3_RX_RCB_RING_BYTES(tp),
6679 tnapi->rx_rcb,
6680 tnapi->rx_rcb_mapping);
6681 tnapi->rx_rcb = NULL;
6684 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6686 if (tnapi->hw_status) {
6687 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6688 tnapi->hw_status,
6689 tnapi->status_mapping);
6690 tnapi->hw_status = NULL;
6694 if (tp->hw_stats) {
6695 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6696 tp->hw_stats, tp->stats_mapping);
6697 tp->hw_stats = NULL;
6702 * Must not be invoked with interrupt sources disabled and
6703 * the hardware shutdown down. Can sleep.
6705 static int tg3_alloc_consistent(struct tg3 *tp)
6707 int i;
6709 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6710 sizeof(struct tg3_hw_stats),
6711 &tp->stats_mapping,
6712 GFP_KERNEL);
6713 if (!tp->hw_stats)
6714 goto err_out;
6716 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6718 for (i = 0; i < tp->irq_cnt; i++) {
6719 struct tg3_napi *tnapi = &tp->napi[i];
6720 struct tg3_hw_status *sblk;
6722 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6723 TG3_HW_STATUS_SIZE,
6724 &tnapi->status_mapping,
6725 GFP_KERNEL);
6726 if (!tnapi->hw_status)
6727 goto err_out;
6729 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6730 sblk = tnapi->hw_status;
6732 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6733 goto err_out;
6735 /* If multivector TSS is enabled, vector 0 does not handle
6736 * tx interrupts. Don't allocate any resources for it.
6738 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6739 (i && tg3_flag(tp, ENABLE_TSS))) {
6740 tnapi->tx_buffers = kzalloc(
6741 sizeof(struct tg3_tx_ring_info) *
6742 TG3_TX_RING_SIZE, GFP_KERNEL);
6743 if (!tnapi->tx_buffers)
6744 goto err_out;
6746 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6747 TG3_TX_RING_BYTES,
6748 &tnapi->tx_desc_mapping,
6749 GFP_KERNEL);
6750 if (!tnapi->tx_ring)
6751 goto err_out;
6755 * When RSS is enabled, the status block format changes
6756 * slightly. The "rx_jumbo_consumer", "reserved",
6757 * and "rx_mini_consumer" members get mapped to the
6758 * other three rx return ring producer indexes.
6760 switch (i) {
6761 default:
6762 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6763 break;
6764 case 2:
6765 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6766 break;
6767 case 3:
6768 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6769 break;
6770 case 4:
6771 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6772 break;
6776 * If multivector RSS is enabled, vector 0 does not handle
6777 * rx or tx interrupts. Don't allocate any resources for it.
6779 if (!i && tg3_flag(tp, ENABLE_RSS))
6780 continue;
6782 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6783 TG3_RX_RCB_RING_BYTES(tp),
6784 &tnapi->rx_rcb_mapping,
6785 GFP_KERNEL);
6786 if (!tnapi->rx_rcb)
6787 goto err_out;
6789 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6792 return 0;
6794 err_out:
6795 tg3_free_consistent(tp);
6796 return -ENOMEM;
6799 #define MAX_WAIT_CNT 1000
6801 /* To stop a block, clear the enable bit and poll till it
6802 * clears. tp->lock is held.
6804 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6806 unsigned int i;
6807 u32 val;
6809 if (tg3_flag(tp, 5705_PLUS)) {
6810 switch (ofs) {
6811 case RCVLSC_MODE:
6812 case DMAC_MODE:
6813 case MBFREE_MODE:
6814 case BUFMGR_MODE:
6815 case MEMARB_MODE:
6816 /* We can't enable/disable these bits of the
6817 * 5705/5750, just say success.
6819 return 0;
6821 default:
6822 break;
6826 val = tr32(ofs);
6827 val &= ~enable_bit;
6828 tw32_f(ofs, val);
6830 for (i = 0; i < MAX_WAIT_CNT; i++) {
6831 udelay(100);
6832 val = tr32(ofs);
6833 if ((val & enable_bit) == 0)
6834 break;
6837 if (i == MAX_WAIT_CNT && !silent) {
6838 dev_err(&tp->pdev->dev,
6839 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6840 ofs, enable_bit);
6841 return -ENODEV;
6844 return 0;
6847 /* tp->lock is held. */
6848 static int tg3_abort_hw(struct tg3 *tp, int silent)
6850 int i, err;
6852 tg3_disable_ints(tp);
6854 tp->rx_mode &= ~RX_MODE_ENABLE;
6855 tw32_f(MAC_RX_MODE, tp->rx_mode);
6856 udelay(10);
6858 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6859 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6860 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6861 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6862 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6863 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6865 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6866 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6867 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6868 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6869 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6870 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6871 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6873 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6874 tw32_f(MAC_MODE, tp->mac_mode);
6875 udelay(40);
6877 tp->tx_mode &= ~TX_MODE_ENABLE;
6878 tw32_f(MAC_TX_MODE, tp->tx_mode);
6880 for (i = 0; i < MAX_WAIT_CNT; i++) {
6881 udelay(100);
6882 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6883 break;
6885 if (i >= MAX_WAIT_CNT) {
6886 dev_err(&tp->pdev->dev,
6887 "%s timed out, TX_MODE_ENABLE will not clear "
6888 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6889 err |= -ENODEV;
6892 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6893 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6894 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6896 tw32(FTQ_RESET, 0xffffffff);
6897 tw32(FTQ_RESET, 0x00000000);
6899 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6900 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6902 for (i = 0; i < tp->irq_cnt; i++) {
6903 struct tg3_napi *tnapi = &tp->napi[i];
6904 if (tnapi->hw_status)
6905 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6907 if (tp->hw_stats)
6908 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6910 return err;
6913 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6915 int i;
6916 u32 apedata;
6918 /* NCSI does not support APE events */
6919 if (tg3_flag(tp, APE_HAS_NCSI))
6920 return;
6922 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6923 if (apedata != APE_SEG_SIG_MAGIC)
6924 return;
6926 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6927 if (!(apedata & APE_FW_STATUS_READY))
6928 return;
6930 /* Wait for up to 1 millisecond for APE to service previous event. */
6931 for (i = 0; i < 10; i++) {
6932 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6933 return;
6935 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6937 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6938 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6939 event | APE_EVENT_STATUS_EVENT_PENDING);
6941 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6943 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6944 break;
6946 udelay(100);
6949 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6950 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6953 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6955 u32 event;
6956 u32 apedata;
6958 if (!tg3_flag(tp, ENABLE_APE))
6959 return;
6961 switch (kind) {
6962 case RESET_KIND_INIT:
6963 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6964 APE_HOST_SEG_SIG_MAGIC);
6965 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6966 APE_HOST_SEG_LEN_MAGIC);
6967 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6968 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6969 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6970 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6971 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6972 APE_HOST_BEHAV_NO_PHYLOCK);
6973 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6974 TG3_APE_HOST_DRVR_STATE_START);
6976 event = APE_EVENT_STATUS_STATE_START;
6977 break;
6978 case RESET_KIND_SHUTDOWN:
6979 /* With the interface we are currently using,
6980 * APE does not track driver state. Wiping
6981 * out the HOST SEGMENT SIGNATURE forces
6982 * the APE to assume OS absent status.
6984 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6986 if (device_may_wakeup(&tp->pdev->dev) &&
6987 tg3_flag(tp, WOL_ENABLE)) {
6988 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6989 TG3_APE_HOST_WOL_SPEED_AUTO);
6990 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6991 } else
6992 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6994 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6996 event = APE_EVENT_STATUS_STATE_UNLOAD;
6997 break;
6998 case RESET_KIND_SUSPEND:
6999 event = APE_EVENT_STATUS_STATE_SUSPEND;
7000 break;
7001 default:
7002 return;
7005 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7007 tg3_ape_send_event(tp, event);
7010 /* tp->lock is held. */
7011 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7013 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7014 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7016 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7017 switch (kind) {
7018 case RESET_KIND_INIT:
7019 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7020 DRV_STATE_START);
7021 break;
7023 case RESET_KIND_SHUTDOWN:
7024 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7025 DRV_STATE_UNLOAD);
7026 break;
7028 case RESET_KIND_SUSPEND:
7029 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7030 DRV_STATE_SUSPEND);
7031 break;
7033 default:
7034 break;
7038 if (kind == RESET_KIND_INIT ||
7039 kind == RESET_KIND_SUSPEND)
7040 tg3_ape_driver_state_change(tp, kind);
7043 /* tp->lock is held. */
7044 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7046 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7047 switch (kind) {
7048 case RESET_KIND_INIT:
7049 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7050 DRV_STATE_START_DONE);
7051 break;
7053 case RESET_KIND_SHUTDOWN:
7054 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7055 DRV_STATE_UNLOAD_DONE);
7056 break;
7058 default:
7059 break;
7063 if (kind == RESET_KIND_SHUTDOWN)
7064 tg3_ape_driver_state_change(tp, kind);
7067 /* tp->lock is held. */
7068 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7070 if (tg3_flag(tp, ENABLE_ASF)) {
7071 switch (kind) {
7072 case RESET_KIND_INIT:
7073 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7074 DRV_STATE_START);
7075 break;
7077 case RESET_KIND_SHUTDOWN:
7078 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7079 DRV_STATE_UNLOAD);
7080 break;
7082 case RESET_KIND_SUSPEND:
7083 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7084 DRV_STATE_SUSPEND);
7085 break;
7087 default:
7088 break;
7093 static int tg3_poll_fw(struct tg3 *tp)
7095 int i;
7096 u32 val;
7098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7099 /* Wait up to 20ms for init done. */
7100 for (i = 0; i < 200; i++) {
7101 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7102 return 0;
7103 udelay(100);
7105 return -ENODEV;
7108 /* Wait for firmware initialization to complete. */
7109 for (i = 0; i < 100000; i++) {
7110 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7111 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7112 break;
7113 udelay(10);
7116 /* Chip might not be fitted with firmware. Some Sun onboard
7117 * parts are configured like that. So don't signal the timeout
7118 * of the above loop as an error, but do report the lack of
7119 * running firmware once.
7121 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7122 tg3_flag_set(tp, NO_FWARE_REPORTED);
7124 netdev_info(tp->dev, "No firmware running\n");
7127 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7128 /* The 57765 A0 needs a little more
7129 * time to do some important work.
7131 mdelay(10);
7134 return 0;
7137 /* Save PCI command register before chip reset */
7138 static void tg3_save_pci_state(struct tg3 *tp)
7140 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7143 /* Restore PCI state after chip reset */
7144 static void tg3_restore_pci_state(struct tg3 *tp)
7146 u32 val;
7148 /* Re-enable indirect register accesses. */
7149 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7150 tp->misc_host_ctrl);
7152 /* Set MAX PCI retry to zero. */
7153 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7154 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7155 tg3_flag(tp, PCIX_MODE))
7156 val |= PCISTATE_RETRY_SAME_DMA;
7157 /* Allow reads and writes to the APE register and memory space. */
7158 if (tg3_flag(tp, ENABLE_APE))
7159 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7160 PCISTATE_ALLOW_APE_SHMEM_WR |
7161 PCISTATE_ALLOW_APE_PSPACE_WR;
7162 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7164 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7166 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7167 if (tg3_flag(tp, PCI_EXPRESS))
7168 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7169 else {
7170 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7171 tp->pci_cacheline_sz);
7172 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7173 tp->pci_lat_timer);
7177 /* Make sure PCI-X relaxed ordering bit is clear. */
7178 if (tg3_flag(tp, PCIX_MODE)) {
7179 u16 pcix_cmd;
7181 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7182 &pcix_cmd);
7183 pcix_cmd &= ~PCI_X_CMD_ERO;
7184 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7185 pcix_cmd);
7188 if (tg3_flag(tp, 5780_CLASS)) {
7190 /* Chip reset on 5780 will reset MSI enable bit,
7191 * so need to restore it.
7193 if (tg3_flag(tp, USING_MSI)) {
7194 u16 ctrl;
7196 pci_read_config_word(tp->pdev,
7197 tp->msi_cap + PCI_MSI_FLAGS,
7198 &ctrl);
7199 pci_write_config_word(tp->pdev,
7200 tp->msi_cap + PCI_MSI_FLAGS,
7201 ctrl | PCI_MSI_FLAGS_ENABLE);
7202 val = tr32(MSGINT_MODE);
7203 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7208 static void tg3_stop_fw(struct tg3 *);
7210 /* tp->lock is held. */
7211 static int tg3_chip_reset(struct tg3 *tp)
7213 u32 val;
7214 void (*write_op)(struct tg3 *, u32, u32);
7215 int i, err;
7217 tg3_nvram_lock(tp);
7219 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7221 /* No matching tg3_nvram_unlock() after this because
7222 * chip reset below will undo the nvram lock.
7224 tp->nvram_lock_cnt = 0;
7226 /* GRC_MISC_CFG core clock reset will clear the memory
7227 * enable bit in PCI register 4 and the MSI enable bit
7228 * on some chips, so we save relevant registers here.
7230 tg3_save_pci_state(tp);
7232 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7233 tg3_flag(tp, 5755_PLUS))
7234 tw32(GRC_FASTBOOT_PC, 0);
7237 * We must avoid the readl() that normally takes place.
7238 * It locks machines, causes machine checks, and other
7239 * fun things. So, temporarily disable the 5701
7240 * hardware workaround, while we do the reset.
7242 write_op = tp->write32;
7243 if (write_op == tg3_write_flush_reg32)
7244 tp->write32 = tg3_write32;
7246 /* Prevent the irq handler from reading or writing PCI registers
7247 * during chip reset when the memory enable bit in the PCI command
7248 * register may be cleared. The chip does not generate interrupt
7249 * at this time, but the irq handler may still be called due to irq
7250 * sharing or irqpoll.
7252 tg3_flag_set(tp, CHIP_RESETTING);
7253 for (i = 0; i < tp->irq_cnt; i++) {
7254 struct tg3_napi *tnapi = &tp->napi[i];
7255 if (tnapi->hw_status) {
7256 tnapi->hw_status->status = 0;
7257 tnapi->hw_status->status_tag = 0;
7259 tnapi->last_tag = 0;
7260 tnapi->last_irq_tag = 0;
7262 smp_mb();
7264 for (i = 0; i < tp->irq_cnt; i++)
7265 synchronize_irq(tp->napi[i].irq_vec);
7267 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7268 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7269 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7272 /* do the reset */
7273 val = GRC_MISC_CFG_CORECLK_RESET;
7275 if (tg3_flag(tp, PCI_EXPRESS)) {
7276 /* Force PCIe 1.0a mode */
7277 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7278 !tg3_flag(tp, 57765_PLUS) &&
7279 tr32(TG3_PCIE_PHY_TSTCTL) ==
7280 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7281 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7283 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7284 tw32(GRC_MISC_CFG, (1 << 29));
7285 val |= (1 << 29);
7289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7290 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7291 tw32(GRC_VCPU_EXT_CTRL,
7292 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7295 /* Manage gphy power for all CPMU absent PCIe devices. */
7296 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7297 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7299 tw32(GRC_MISC_CFG, val);
7301 /* restore 5701 hardware bug workaround write method */
7302 tp->write32 = write_op;
7304 /* Unfortunately, we have to delay before the PCI read back.
7305 * Some 575X chips even will not respond to a PCI cfg access
7306 * when the reset command is given to the chip.
7308 * How do these hardware designers expect things to work
7309 * properly if the PCI write is posted for a long period
7310 * of time? It is always necessary to have some method by
7311 * which a register read back can occur to push the write
7312 * out which does the reset.
7314 * For most tg3 variants the trick below was working.
7315 * Ho hum...
7317 udelay(120);
7319 /* Flush PCI posted writes. The normal MMIO registers
7320 * are inaccessible at this time so this is the only
7321 * way to make this reliably (actually, this is no longer
7322 * the case, see above). I tried to use indirect
7323 * register read/write but this upset some 5701 variants.
7325 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7327 udelay(120);
7329 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7330 u16 val16;
7332 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7333 int i;
7334 u32 cfg_val;
7336 /* Wait for link training to complete. */
7337 for (i = 0; i < 5000; i++)
7338 udelay(100);
7340 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7341 pci_write_config_dword(tp->pdev, 0xc4,
7342 cfg_val | (1 << 15));
7345 /* Clear the "no snoop" and "relaxed ordering" bits. */
7346 pci_read_config_word(tp->pdev,
7347 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7348 &val16);
7349 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7350 PCI_EXP_DEVCTL_NOSNOOP_EN);
7352 * Older PCIe devices only support the 128 byte
7353 * MPS setting. Enforce the restriction.
7355 if (!tg3_flag(tp, CPMU_PRESENT))
7356 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7357 pci_write_config_word(tp->pdev,
7358 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7359 val16);
7361 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7363 /* Clear error status */
7364 pci_write_config_word(tp->pdev,
7365 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7366 PCI_EXP_DEVSTA_CED |
7367 PCI_EXP_DEVSTA_NFED |
7368 PCI_EXP_DEVSTA_FED |
7369 PCI_EXP_DEVSTA_URD);
7372 tg3_restore_pci_state(tp);
7374 tg3_flag_clear(tp, CHIP_RESETTING);
7375 tg3_flag_clear(tp, ERROR_PROCESSED);
7377 val = 0;
7378 if (tg3_flag(tp, 5780_CLASS))
7379 val = tr32(MEMARB_MODE);
7380 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7382 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7383 tg3_stop_fw(tp);
7384 tw32(0x5000, 0x400);
7387 tw32(GRC_MODE, tp->grc_mode);
7389 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7390 val = tr32(0xc4);
7392 tw32(0xc4, val | (1 << 15));
7395 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7396 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7397 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7398 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7399 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7400 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7403 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7404 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7405 val = tp->mac_mode;
7406 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7407 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7408 val = tp->mac_mode;
7409 } else
7410 val = 0;
7412 tw32_f(MAC_MODE, val);
7413 udelay(40);
7415 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7417 err = tg3_poll_fw(tp);
7418 if (err)
7419 return err;
7421 tg3_mdio_start(tp);
7423 if (tg3_flag(tp, PCI_EXPRESS) &&
7424 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7425 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7426 !tg3_flag(tp, 57765_PLUS)) {
7427 val = tr32(0x7c00);
7429 tw32(0x7c00, val | (1 << 25));
7432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7433 val = tr32(TG3_CPMU_CLCK_ORIDE);
7434 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7437 /* Reprobe ASF enable state. */
7438 tg3_flag_clear(tp, ENABLE_ASF);
7439 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7440 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7441 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7442 u32 nic_cfg;
7444 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7445 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7446 tg3_flag_set(tp, ENABLE_ASF);
7447 tp->last_event_jiffies = jiffies;
7448 if (tg3_flag(tp, 5750_PLUS))
7449 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7453 return 0;
7456 /* tp->lock is held. */
7457 static void tg3_stop_fw(struct tg3 *tp)
7459 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7460 /* Wait for RX cpu to ACK the previous event. */
7461 tg3_wait_for_event_ack(tp);
7463 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7465 tg3_generate_fw_event(tp);
7467 /* Wait for RX cpu to ACK this event. */
7468 tg3_wait_for_event_ack(tp);
7472 /* tp->lock is held. */
7473 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7475 int err;
7477 tg3_stop_fw(tp);
7479 tg3_write_sig_pre_reset(tp, kind);
7481 tg3_abort_hw(tp, silent);
7482 err = tg3_chip_reset(tp);
7484 __tg3_set_mac_addr(tp, 0);
7486 tg3_write_sig_legacy(tp, kind);
7487 tg3_write_sig_post_reset(tp, kind);
7489 if (err)
7490 return err;
7492 return 0;
7495 #define RX_CPU_SCRATCH_BASE 0x30000
7496 #define RX_CPU_SCRATCH_SIZE 0x04000
7497 #define TX_CPU_SCRATCH_BASE 0x34000
7498 #define TX_CPU_SCRATCH_SIZE 0x04000
7500 /* tp->lock is held. */
7501 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7503 int i;
7505 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7508 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7510 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7511 return 0;
7513 if (offset == RX_CPU_BASE) {
7514 for (i = 0; i < 10000; i++) {
7515 tw32(offset + CPU_STATE, 0xffffffff);
7516 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7517 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7518 break;
7521 tw32(offset + CPU_STATE, 0xffffffff);
7522 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7523 udelay(10);
7524 } else {
7525 for (i = 0; i < 10000; i++) {
7526 tw32(offset + CPU_STATE, 0xffffffff);
7527 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7528 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7529 break;
7533 if (i >= 10000) {
7534 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7535 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7536 return -ENODEV;
7539 /* Clear firmware's nvram arbitration. */
7540 if (tg3_flag(tp, NVRAM))
7541 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7542 return 0;
7545 struct fw_info {
7546 unsigned int fw_base;
7547 unsigned int fw_len;
7548 const __be32 *fw_data;
7551 /* tp->lock is held. */
7552 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7553 int cpu_scratch_size, struct fw_info *info)
7555 int err, lock_err, i;
7556 void (*write_op)(struct tg3 *, u32, u32);
7558 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7559 netdev_err(tp->dev,
7560 "%s: Trying to load TX cpu firmware which is 5705\n",
7561 __func__);
7562 return -EINVAL;
7565 if (tg3_flag(tp, 5705_PLUS))
7566 write_op = tg3_write_mem;
7567 else
7568 write_op = tg3_write_indirect_reg32;
7570 /* It is possible that bootcode is still loading at this point.
7571 * Get the nvram lock first before halting the cpu.
7573 lock_err = tg3_nvram_lock(tp);
7574 err = tg3_halt_cpu(tp, cpu_base);
7575 if (!lock_err)
7576 tg3_nvram_unlock(tp);
7577 if (err)
7578 goto out;
7580 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7581 write_op(tp, cpu_scratch_base + i, 0);
7582 tw32(cpu_base + CPU_STATE, 0xffffffff);
7583 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7584 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7585 write_op(tp, (cpu_scratch_base +
7586 (info->fw_base & 0xffff) +
7587 (i * sizeof(u32))),
7588 be32_to_cpu(info->fw_data[i]));
7590 err = 0;
7592 out:
7593 return err;
7596 /* tp->lock is held. */
7597 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7599 struct fw_info info;
7600 const __be32 *fw_data;
7601 int err, i;
7603 fw_data = (void *)tp->fw->data;
7605 /* Firmware blob starts with version numbers, followed by
7606 start address and length. We are setting complete length.
7607 length = end_address_of_bss - start_address_of_text.
7608 Remainder is the blob to be loaded contiguously
7609 from start address. */
7611 info.fw_base = be32_to_cpu(fw_data[1]);
7612 info.fw_len = tp->fw->size - 12;
7613 info.fw_data = &fw_data[3];
7615 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7616 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7617 &info);
7618 if (err)
7619 return err;
7621 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7622 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7623 &info);
7624 if (err)
7625 return err;
7627 /* Now startup only the RX cpu. */
7628 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7629 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7631 for (i = 0; i < 5; i++) {
7632 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7633 break;
7634 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7635 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7636 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7637 udelay(1000);
7639 if (i >= 5) {
7640 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7641 "should be %08x\n", __func__,
7642 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7643 return -ENODEV;
7645 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7646 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7648 return 0;
7651 /* tp->lock is held. */
7652 static int tg3_load_tso_firmware(struct tg3 *tp)
7654 struct fw_info info;
7655 const __be32 *fw_data;
7656 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7657 int err, i;
7659 if (tg3_flag(tp, HW_TSO_1) ||
7660 tg3_flag(tp, HW_TSO_2) ||
7661 tg3_flag(tp, HW_TSO_3))
7662 return 0;
7664 fw_data = (void *)tp->fw->data;
7666 /* Firmware blob starts with version numbers, followed by
7667 start address and length. We are setting complete length.
7668 length = end_address_of_bss - start_address_of_text.
7669 Remainder is the blob to be loaded contiguously
7670 from start address. */
7672 info.fw_base = be32_to_cpu(fw_data[1]);
7673 cpu_scratch_size = tp->fw_len;
7674 info.fw_len = tp->fw->size - 12;
7675 info.fw_data = &fw_data[3];
7677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7678 cpu_base = RX_CPU_BASE;
7679 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7680 } else {
7681 cpu_base = TX_CPU_BASE;
7682 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7683 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7686 err = tg3_load_firmware_cpu(tp, cpu_base,
7687 cpu_scratch_base, cpu_scratch_size,
7688 &info);
7689 if (err)
7690 return err;
7692 /* Now startup the cpu. */
7693 tw32(cpu_base + CPU_STATE, 0xffffffff);
7694 tw32_f(cpu_base + CPU_PC, info.fw_base);
7696 for (i = 0; i < 5; i++) {
7697 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7698 break;
7699 tw32(cpu_base + CPU_STATE, 0xffffffff);
7700 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7701 tw32_f(cpu_base + CPU_PC, info.fw_base);
7702 udelay(1000);
7704 if (i >= 5) {
7705 netdev_err(tp->dev,
7706 "%s fails to set CPU PC, is %08x should be %08x\n",
7707 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7708 return -ENODEV;
7710 tw32(cpu_base + CPU_STATE, 0xffffffff);
7711 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7712 return 0;
7716 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7718 struct tg3 *tp = netdev_priv(dev);
7719 struct sockaddr *addr = p;
7720 int err = 0, skip_mac_1 = 0;
7722 if (!is_valid_ether_addr(addr->sa_data))
7723 return -EINVAL;
7725 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7727 if (!netif_running(dev))
7728 return 0;
7730 if (tg3_flag(tp, ENABLE_ASF)) {
7731 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7733 addr0_high = tr32(MAC_ADDR_0_HIGH);
7734 addr0_low = tr32(MAC_ADDR_0_LOW);
7735 addr1_high = tr32(MAC_ADDR_1_HIGH);
7736 addr1_low = tr32(MAC_ADDR_1_LOW);
7738 /* Skip MAC addr 1 if ASF is using it. */
7739 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7740 !(addr1_high == 0 && addr1_low == 0))
7741 skip_mac_1 = 1;
7743 spin_lock_bh(&tp->lock);
7744 __tg3_set_mac_addr(tp, skip_mac_1);
7745 spin_unlock_bh(&tp->lock);
7747 return err;
7750 /* tp->lock is held. */
7751 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7752 dma_addr_t mapping, u32 maxlen_flags,
7753 u32 nic_addr)
7755 tg3_write_mem(tp,
7756 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7757 ((u64) mapping >> 32));
7758 tg3_write_mem(tp,
7759 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7760 ((u64) mapping & 0xffffffff));
7761 tg3_write_mem(tp,
7762 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7763 maxlen_flags);
7765 if (!tg3_flag(tp, 5705_PLUS))
7766 tg3_write_mem(tp,
7767 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7768 nic_addr);
7771 static void __tg3_set_rx_mode(struct net_device *);
7772 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7774 int i;
7776 if (!tg3_flag(tp, ENABLE_TSS)) {
7777 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7778 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7779 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7780 } else {
7781 tw32(HOSTCC_TXCOL_TICKS, 0);
7782 tw32(HOSTCC_TXMAX_FRAMES, 0);
7783 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7786 if (!tg3_flag(tp, ENABLE_RSS)) {
7787 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7788 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7789 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7790 } else {
7791 tw32(HOSTCC_RXCOL_TICKS, 0);
7792 tw32(HOSTCC_RXMAX_FRAMES, 0);
7793 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7796 if (!tg3_flag(tp, 5705_PLUS)) {
7797 u32 val = ec->stats_block_coalesce_usecs;
7799 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7800 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7802 if (!netif_carrier_ok(tp->dev))
7803 val = 0;
7805 tw32(HOSTCC_STAT_COAL_TICKS, val);
7808 for (i = 0; i < tp->irq_cnt - 1; i++) {
7809 u32 reg;
7811 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7812 tw32(reg, ec->rx_coalesce_usecs);
7813 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7814 tw32(reg, ec->rx_max_coalesced_frames);
7815 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7816 tw32(reg, ec->rx_max_coalesced_frames_irq);
7818 if (tg3_flag(tp, ENABLE_TSS)) {
7819 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7820 tw32(reg, ec->tx_coalesce_usecs);
7821 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7822 tw32(reg, ec->tx_max_coalesced_frames);
7823 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7824 tw32(reg, ec->tx_max_coalesced_frames_irq);
7828 for (; i < tp->irq_max - 1; i++) {
7829 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7830 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7831 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7833 if (tg3_flag(tp, ENABLE_TSS)) {
7834 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7835 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7836 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7841 /* tp->lock is held. */
7842 static void tg3_rings_reset(struct tg3 *tp)
7844 int i;
7845 u32 stblk, txrcb, rxrcb, limit;
7846 struct tg3_napi *tnapi = &tp->napi[0];
7848 /* Disable all transmit rings but the first. */
7849 if (!tg3_flag(tp, 5705_PLUS))
7850 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7851 else if (tg3_flag(tp, 5717_PLUS))
7852 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7853 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7854 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7855 else
7856 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7858 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7859 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7860 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7861 BDINFO_FLAGS_DISABLED);
7864 /* Disable all receive return rings but the first. */
7865 if (tg3_flag(tp, 5717_PLUS))
7866 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7867 else if (!tg3_flag(tp, 5705_PLUS))
7868 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7869 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7870 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7871 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7872 else
7873 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7875 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7876 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7877 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7878 BDINFO_FLAGS_DISABLED);
7880 /* Disable interrupts */
7881 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7882 tp->napi[0].chk_msi_cnt = 0;
7883 tp->napi[0].last_rx_cons = 0;
7884 tp->napi[0].last_tx_cons = 0;
7886 /* Zero mailbox registers. */
7887 if (tg3_flag(tp, SUPPORT_MSIX)) {
7888 for (i = 1; i < tp->irq_max; i++) {
7889 tp->napi[i].tx_prod = 0;
7890 tp->napi[i].tx_cons = 0;
7891 if (tg3_flag(tp, ENABLE_TSS))
7892 tw32_mailbox(tp->napi[i].prodmbox, 0);
7893 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7894 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7895 tp->napi[0].chk_msi_cnt = 0;
7896 tp->napi[i].last_rx_cons = 0;
7897 tp->napi[i].last_tx_cons = 0;
7899 if (!tg3_flag(tp, ENABLE_TSS))
7900 tw32_mailbox(tp->napi[0].prodmbox, 0);
7901 } else {
7902 tp->napi[0].tx_prod = 0;
7903 tp->napi[0].tx_cons = 0;
7904 tw32_mailbox(tp->napi[0].prodmbox, 0);
7905 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7908 /* Make sure the NIC-based send BD rings are disabled. */
7909 if (!tg3_flag(tp, 5705_PLUS)) {
7910 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7911 for (i = 0; i < 16; i++)
7912 tw32_tx_mbox(mbox + i * 8, 0);
7915 txrcb = NIC_SRAM_SEND_RCB;
7916 rxrcb = NIC_SRAM_RCV_RET_RCB;
7918 /* Clear status block in ram. */
7919 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7921 /* Set status block DMA address */
7922 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7923 ((u64) tnapi->status_mapping >> 32));
7924 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7925 ((u64) tnapi->status_mapping & 0xffffffff));
7927 if (tnapi->tx_ring) {
7928 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7929 (TG3_TX_RING_SIZE <<
7930 BDINFO_FLAGS_MAXLEN_SHIFT),
7931 NIC_SRAM_TX_BUFFER_DESC);
7932 txrcb += TG3_BDINFO_SIZE;
7935 if (tnapi->rx_rcb) {
7936 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7937 (tp->rx_ret_ring_mask + 1) <<
7938 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7939 rxrcb += TG3_BDINFO_SIZE;
7942 stblk = HOSTCC_STATBLCK_RING1;
7944 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7945 u64 mapping = (u64)tnapi->status_mapping;
7946 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7947 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7949 /* Clear status block in ram. */
7950 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7952 if (tnapi->tx_ring) {
7953 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7954 (TG3_TX_RING_SIZE <<
7955 BDINFO_FLAGS_MAXLEN_SHIFT),
7956 NIC_SRAM_TX_BUFFER_DESC);
7957 txrcb += TG3_BDINFO_SIZE;
7960 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7961 ((tp->rx_ret_ring_mask + 1) <<
7962 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7964 stblk += 8;
7965 rxrcb += TG3_BDINFO_SIZE;
7969 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7971 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7973 if (!tg3_flag(tp, 5750_PLUS) ||
7974 tg3_flag(tp, 5780_CLASS) ||
7975 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7977 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7978 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7980 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7981 else
7982 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7984 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7985 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7987 val = min(nic_rep_thresh, host_rep_thresh);
7988 tw32(RCVBDI_STD_THRESH, val);
7990 if (tg3_flag(tp, 57765_PLUS))
7991 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7993 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7994 return;
7996 if (!tg3_flag(tp, 5705_PLUS))
7997 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7998 else
7999 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8001 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8003 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8004 tw32(RCVBDI_JUMBO_THRESH, val);
8006 if (tg3_flag(tp, 57765_PLUS))
8007 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8010 /* tp->lock is held. */
8011 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8013 u32 val, rdmac_mode;
8014 int i, err, limit;
8015 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8017 tg3_disable_ints(tp);
8019 tg3_stop_fw(tp);
8021 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8023 if (tg3_flag(tp, INIT_COMPLETE))
8024 tg3_abort_hw(tp, 1);
8026 /* Enable MAC control of LPI */
8027 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8028 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8029 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8030 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8032 tw32_f(TG3_CPMU_EEE_CTRL,
8033 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8035 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8036 TG3_CPMU_EEEMD_LPI_IN_TX |
8037 TG3_CPMU_EEEMD_LPI_IN_RX |
8038 TG3_CPMU_EEEMD_EEE_ENABLE;
8040 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8041 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8043 if (tg3_flag(tp, ENABLE_APE))
8044 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8046 tw32_f(TG3_CPMU_EEE_MODE, val);
8048 tw32_f(TG3_CPMU_EEE_DBTMR1,
8049 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8050 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8052 tw32_f(TG3_CPMU_EEE_DBTMR2,
8053 TG3_CPMU_DBTMR2_APE_TX_2047US |
8054 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8057 if (reset_phy)
8058 tg3_phy_reset(tp);
8060 err = tg3_chip_reset(tp);
8061 if (err)
8062 return err;
8064 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8066 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8067 val = tr32(TG3_CPMU_CTRL);
8068 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8069 tw32(TG3_CPMU_CTRL, val);
8071 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8072 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8073 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8074 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8076 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8077 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8078 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8079 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8081 val = tr32(TG3_CPMU_HST_ACC);
8082 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8083 val |= CPMU_HST_ACC_MACCLK_6_25;
8084 tw32(TG3_CPMU_HST_ACC, val);
8087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8088 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8089 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8090 PCIE_PWR_MGMT_L1_THRESH_4MS;
8091 tw32(PCIE_PWR_MGMT_THRESH, val);
8093 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8094 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8096 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8098 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8099 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8102 if (tg3_flag(tp, L1PLLPD_EN)) {
8103 u32 grc_mode = tr32(GRC_MODE);
8105 /* Access the lower 1K of PL PCIE block registers. */
8106 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8107 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8109 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8110 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8111 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8113 tw32(GRC_MODE, grc_mode);
8116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8117 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8118 u32 grc_mode = tr32(GRC_MODE);
8120 /* Access the lower 1K of PL PCIE block registers. */
8121 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8122 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8124 val = tr32(TG3_PCIE_TLDLPL_PORT +
8125 TG3_PCIE_PL_LO_PHYCTL5);
8126 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8127 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8129 tw32(GRC_MODE, grc_mode);
8132 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8133 u32 grc_mode = tr32(GRC_MODE);
8135 /* Access the lower 1K of DL PCIE block registers. */
8136 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8137 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8139 val = tr32(TG3_PCIE_TLDLPL_PORT +
8140 TG3_PCIE_DL_LO_FTSMAX);
8141 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8142 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8143 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8145 tw32(GRC_MODE, grc_mode);
8148 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8149 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8150 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8151 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8154 /* This works around an issue with Athlon chipsets on
8155 * B3 tigon3 silicon. This bit has no effect on any
8156 * other revision. But do not set this on PCI Express
8157 * chips and don't even touch the clocks if the CPMU is present.
8159 if (!tg3_flag(tp, CPMU_PRESENT)) {
8160 if (!tg3_flag(tp, PCI_EXPRESS))
8161 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8162 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8165 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8166 tg3_flag(tp, PCIX_MODE)) {
8167 val = tr32(TG3PCI_PCISTATE);
8168 val |= PCISTATE_RETRY_SAME_DMA;
8169 tw32(TG3PCI_PCISTATE, val);
8172 if (tg3_flag(tp, ENABLE_APE)) {
8173 /* Allow reads and writes to the
8174 * APE register and memory space.
8176 val = tr32(TG3PCI_PCISTATE);
8177 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8178 PCISTATE_ALLOW_APE_SHMEM_WR |
8179 PCISTATE_ALLOW_APE_PSPACE_WR;
8180 tw32(TG3PCI_PCISTATE, val);
8183 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8184 /* Enable some hw fixes. */
8185 val = tr32(TG3PCI_MSI_DATA);
8186 val |= (1 << 26) | (1 << 28) | (1 << 29);
8187 tw32(TG3PCI_MSI_DATA, val);
8190 /* Descriptor ring init may make accesses to the
8191 * NIC SRAM area to setup the TX descriptors, so we
8192 * can only do this after the hardware has been
8193 * successfully reset.
8195 err = tg3_init_rings(tp);
8196 if (err)
8197 return err;
8199 if (tg3_flag(tp, 57765_PLUS)) {
8200 val = tr32(TG3PCI_DMA_RW_CTRL) &
8201 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8202 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8203 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8204 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8205 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8206 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8207 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8208 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8209 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8210 /* This value is determined during the probe time DMA
8211 * engine test, tg3_test_dma.
8213 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8216 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8217 GRC_MODE_4X_NIC_SEND_RINGS |
8218 GRC_MODE_NO_TX_PHDR_CSUM |
8219 GRC_MODE_NO_RX_PHDR_CSUM);
8220 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8222 /* Pseudo-header checksum is done by hardware logic and not
8223 * the offload processers, so make the chip do the pseudo-
8224 * header checksums on receive. For transmit it is more
8225 * convenient to do the pseudo-header checksum in software
8226 * as Linux does that on transmit for us in all cases.
8228 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8230 tw32(GRC_MODE,
8231 tp->grc_mode |
8232 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8234 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8235 val = tr32(GRC_MISC_CFG);
8236 val &= ~0xff;
8237 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8238 tw32(GRC_MISC_CFG, val);
8240 /* Initialize MBUF/DESC pool. */
8241 if (tg3_flag(tp, 5750_PLUS)) {
8242 /* Do nothing. */
8243 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8244 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8246 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8247 else
8248 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8249 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8250 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8251 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8252 int fw_len;
8254 fw_len = tp->fw_len;
8255 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8256 tw32(BUFMGR_MB_POOL_ADDR,
8257 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8258 tw32(BUFMGR_MB_POOL_SIZE,
8259 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8262 if (tp->dev->mtu <= ETH_DATA_LEN) {
8263 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8264 tp->bufmgr_config.mbuf_read_dma_low_water);
8265 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8266 tp->bufmgr_config.mbuf_mac_rx_low_water);
8267 tw32(BUFMGR_MB_HIGH_WATER,
8268 tp->bufmgr_config.mbuf_high_water);
8269 } else {
8270 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8271 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8272 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8273 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8274 tw32(BUFMGR_MB_HIGH_WATER,
8275 tp->bufmgr_config.mbuf_high_water_jumbo);
8277 tw32(BUFMGR_DMA_LOW_WATER,
8278 tp->bufmgr_config.dma_low_water);
8279 tw32(BUFMGR_DMA_HIGH_WATER,
8280 tp->bufmgr_config.dma_high_water);
8282 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8284 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8285 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8286 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8287 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8288 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8289 tw32(BUFMGR_MODE, val);
8290 for (i = 0; i < 2000; i++) {
8291 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8292 break;
8293 udelay(10);
8295 if (i >= 2000) {
8296 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8297 return -ENODEV;
8300 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8301 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8303 tg3_setup_rxbd_thresholds(tp);
8305 /* Initialize TG3_BDINFO's at:
8306 * RCVDBDI_STD_BD: standard eth size rx ring
8307 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8308 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8310 * like so:
8311 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8312 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8313 * ring attribute flags
8314 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8316 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8317 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8319 * The size of each ring is fixed in the firmware, but the location is
8320 * configurable.
8322 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8323 ((u64) tpr->rx_std_mapping >> 32));
8324 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8325 ((u64) tpr->rx_std_mapping & 0xffffffff));
8326 if (!tg3_flag(tp, 5717_PLUS))
8327 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8328 NIC_SRAM_RX_BUFFER_DESC);
8330 /* Disable the mini ring */
8331 if (!tg3_flag(tp, 5705_PLUS))
8332 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8333 BDINFO_FLAGS_DISABLED);
8335 /* Program the jumbo buffer descriptor ring control
8336 * blocks on those devices that have them.
8338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8339 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8341 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8342 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8343 ((u64) tpr->rx_jmb_mapping >> 32));
8344 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8345 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8346 val = TG3_RX_JMB_RING_SIZE(tp) <<
8347 BDINFO_FLAGS_MAXLEN_SHIFT;
8348 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8349 val | BDINFO_FLAGS_USE_EXT_RECV);
8350 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8352 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8353 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8354 } else {
8355 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8356 BDINFO_FLAGS_DISABLED);
8359 if (tg3_flag(tp, 57765_PLUS)) {
8360 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8361 val = TG3_RX_STD_MAX_SIZE_5700;
8362 else
8363 val = TG3_RX_STD_MAX_SIZE_5717;
8364 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8365 val |= (TG3_RX_STD_DMA_SZ << 2);
8366 } else
8367 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8368 } else
8369 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8371 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8373 tpr->rx_std_prod_idx = tp->rx_pending;
8374 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8376 tpr->rx_jmb_prod_idx =
8377 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8378 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8380 tg3_rings_reset(tp);
8382 /* Initialize MAC address and backoff seed. */
8383 __tg3_set_mac_addr(tp, 0);
8385 /* MTU + ethernet header + FCS + optional VLAN tag */
8386 tw32(MAC_RX_MTU_SIZE,
8387 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8389 /* The slot time is changed by tg3_setup_phy if we
8390 * run at gigabit with half duplex.
8392 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8393 (6 << TX_LENGTHS_IPG_SHIFT) |
8394 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8397 val |= tr32(MAC_TX_LENGTHS) &
8398 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8399 TX_LENGTHS_CNT_DWN_VAL_MSK);
8401 tw32(MAC_TX_LENGTHS, val);
8403 /* Receive rules. */
8404 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8405 tw32(RCVLPC_CONFIG, 0x0181);
8407 /* Calculate RDMAC_MODE setting early, we need it to determine
8408 * the RCVLPC_STATE_ENABLE mask.
8410 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8411 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8412 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8413 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8414 RDMAC_MODE_LNGREAD_ENAB);
8416 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8417 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8422 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8423 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8424 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8426 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8427 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8428 if (tg3_flag(tp, TSO_CAPABLE) &&
8429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8430 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8431 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8432 !tg3_flag(tp, IS_5788)) {
8433 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8437 if (tg3_flag(tp, PCI_EXPRESS))
8438 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8440 if (tg3_flag(tp, HW_TSO_1) ||
8441 tg3_flag(tp, HW_TSO_2) ||
8442 tg3_flag(tp, HW_TSO_3))
8443 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8445 if (tg3_flag(tp, 57765_PLUS) ||
8446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8447 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8448 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8450 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8451 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8457 tg3_flag(tp, 57765_PLUS)) {
8458 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8459 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8460 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8461 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8462 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8463 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8464 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8465 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8466 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8468 tw32(TG3_RDMA_RSRVCTRL_REG,
8469 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8474 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8475 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8476 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8477 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8480 /* Receive/send statistics. */
8481 if (tg3_flag(tp, 5750_PLUS)) {
8482 val = tr32(RCVLPC_STATS_ENABLE);
8483 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8484 tw32(RCVLPC_STATS_ENABLE, val);
8485 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8486 tg3_flag(tp, TSO_CAPABLE)) {
8487 val = tr32(RCVLPC_STATS_ENABLE);
8488 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8489 tw32(RCVLPC_STATS_ENABLE, val);
8490 } else {
8491 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8493 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8494 tw32(SNDDATAI_STATSENAB, 0xffffff);
8495 tw32(SNDDATAI_STATSCTRL,
8496 (SNDDATAI_SCTRL_ENABLE |
8497 SNDDATAI_SCTRL_FASTUPD));
8499 /* Setup host coalescing engine. */
8500 tw32(HOSTCC_MODE, 0);
8501 for (i = 0; i < 2000; i++) {
8502 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8503 break;
8504 udelay(10);
8507 __tg3_set_coalesce(tp, &tp->coal);
8509 if (!tg3_flag(tp, 5705_PLUS)) {
8510 /* Status/statistics block address. See tg3_timer,
8511 * the tg3_periodic_fetch_stats call there, and
8512 * tg3_get_stats to see how this works for 5705/5750 chips.
8514 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8515 ((u64) tp->stats_mapping >> 32));
8516 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8517 ((u64) tp->stats_mapping & 0xffffffff));
8518 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8520 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8522 /* Clear statistics and status block memory areas */
8523 for (i = NIC_SRAM_STATS_BLK;
8524 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8525 i += sizeof(u32)) {
8526 tg3_write_mem(tp, i, 0);
8527 udelay(40);
8531 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8533 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8534 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8535 if (!tg3_flag(tp, 5705_PLUS))
8536 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8538 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8539 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8540 /* reset to prevent losing 1st rx packet intermittently */
8541 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8542 udelay(10);
8545 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8546 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8547 MAC_MODE_FHDE_ENABLE;
8548 if (tg3_flag(tp, ENABLE_APE))
8549 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8550 if (!tg3_flag(tp, 5705_PLUS) &&
8551 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8552 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8553 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8554 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8555 udelay(40);
8557 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8558 * If TG3_FLAG_IS_NIC is zero, we should read the
8559 * register to preserve the GPIO settings for LOMs. The GPIOs,
8560 * whether used as inputs or outputs, are set by boot code after
8561 * reset.
8563 if (!tg3_flag(tp, IS_NIC)) {
8564 u32 gpio_mask;
8566 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8567 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8568 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8571 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8572 GRC_LCLCTRL_GPIO_OUTPUT3;
8574 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8575 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8577 tp->grc_local_ctrl &= ~gpio_mask;
8578 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8580 /* GPIO1 must be driven high for eeprom write protect */
8581 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8582 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8583 GRC_LCLCTRL_GPIO_OUTPUT1);
8585 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8586 udelay(100);
8588 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8589 val = tr32(MSGINT_MODE);
8590 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8591 tw32(MSGINT_MODE, val);
8594 if (!tg3_flag(tp, 5705_PLUS)) {
8595 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8596 udelay(40);
8599 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8600 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8601 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8602 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8603 WDMAC_MODE_LNGREAD_ENAB);
8605 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8606 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8607 if (tg3_flag(tp, TSO_CAPABLE) &&
8608 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8609 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8610 /* nothing */
8611 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8612 !tg3_flag(tp, IS_5788)) {
8613 val |= WDMAC_MODE_RX_ACCEL;
8617 /* Enable host coalescing bug fix */
8618 if (tg3_flag(tp, 5755_PLUS))
8619 val |= WDMAC_MODE_STATUS_TAG_FIX;
8621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8622 val |= WDMAC_MODE_BURST_ALL_DATA;
8624 tw32_f(WDMAC_MODE, val);
8625 udelay(40);
8627 if (tg3_flag(tp, PCIX_MODE)) {
8628 u16 pcix_cmd;
8630 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8631 &pcix_cmd);
8632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8633 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8634 pcix_cmd |= PCI_X_CMD_READ_2K;
8635 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8636 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8637 pcix_cmd |= PCI_X_CMD_READ_2K;
8639 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8640 pcix_cmd);
8643 tw32_f(RDMAC_MODE, rdmac_mode);
8644 udelay(40);
8646 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8647 if (!tg3_flag(tp, 5705_PLUS))
8648 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8650 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8651 tw32(SNDDATAC_MODE,
8652 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8653 else
8654 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8656 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8657 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8658 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8659 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8660 val |= RCVDBDI_MODE_LRG_RING_SZ;
8661 tw32(RCVDBDI_MODE, val);
8662 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8663 if (tg3_flag(tp, HW_TSO_1) ||
8664 tg3_flag(tp, HW_TSO_2) ||
8665 tg3_flag(tp, HW_TSO_3))
8666 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8667 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8668 if (tg3_flag(tp, ENABLE_TSS))
8669 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8670 tw32(SNDBDI_MODE, val);
8671 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8673 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8674 err = tg3_load_5701_a0_firmware_fix(tp);
8675 if (err)
8676 return err;
8679 if (tg3_flag(tp, TSO_CAPABLE)) {
8680 err = tg3_load_tso_firmware(tp);
8681 if (err)
8682 return err;
8685 tp->tx_mode = TX_MODE_ENABLE;
8687 if (tg3_flag(tp, 5755_PLUS) ||
8688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8689 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8691 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8692 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8693 tp->tx_mode &= ~val;
8694 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8697 tw32_f(MAC_TX_MODE, tp->tx_mode);
8698 udelay(100);
8700 if (tg3_flag(tp, ENABLE_RSS)) {
8701 int i = 0;
8702 u32 reg = MAC_RSS_INDIR_TBL_0;
8704 if (tp->irq_cnt == 2) {
8705 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8706 tw32(reg, 0x0);
8707 reg += 4;
8709 } else {
8710 u32 val;
8712 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8713 val = i % (tp->irq_cnt - 1);
8714 i++;
8715 for (; i % 8; i++) {
8716 val <<= 4;
8717 val |= (i % (tp->irq_cnt - 1));
8719 tw32(reg, val);
8720 reg += 4;
8724 /* Setup the "secret" hash key. */
8725 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8726 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8727 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8728 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8729 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8730 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8731 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8732 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8733 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8734 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8737 tp->rx_mode = RX_MODE_ENABLE;
8738 if (tg3_flag(tp, 5755_PLUS))
8739 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8741 if (tg3_flag(tp, ENABLE_RSS))
8742 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8743 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8744 RX_MODE_RSS_IPV6_HASH_EN |
8745 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8746 RX_MODE_RSS_IPV4_HASH_EN |
8747 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8749 tw32_f(MAC_RX_MODE, tp->rx_mode);
8750 udelay(10);
8752 tw32(MAC_LED_CTRL, tp->led_ctrl);
8754 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8755 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8756 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8757 udelay(10);
8759 tw32_f(MAC_RX_MODE, tp->rx_mode);
8760 udelay(10);
8762 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8763 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8764 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8765 /* Set drive transmission level to 1.2V */
8766 /* only if the signal pre-emphasis bit is not set */
8767 val = tr32(MAC_SERDES_CFG);
8768 val &= 0xfffff000;
8769 val |= 0x880;
8770 tw32(MAC_SERDES_CFG, val);
8772 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8773 tw32(MAC_SERDES_CFG, 0x616000);
8776 /* Prevent chip from dropping frames when flow control
8777 * is enabled.
8779 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8780 val = 1;
8781 else
8782 val = 2;
8783 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8786 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8787 /* Use hardware link auto-negotiation */
8788 tg3_flag_set(tp, HW_AUTONEG);
8791 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8793 u32 tmp;
8795 tmp = tr32(SERDES_RX_CTRL);
8796 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8797 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8798 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8799 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8802 if (!tg3_flag(tp, USE_PHYLIB)) {
8803 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8804 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8805 tp->link_config.speed = tp->link_config.orig_speed;
8806 tp->link_config.duplex = tp->link_config.orig_duplex;
8807 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8810 err = tg3_setup_phy(tp, 0);
8811 if (err)
8812 return err;
8814 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8815 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8816 u32 tmp;
8818 /* Clear CRC stats. */
8819 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8820 tg3_writephy(tp, MII_TG3_TEST1,
8821 tmp | MII_TG3_TEST1_CRC_EN);
8822 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8827 __tg3_set_rx_mode(tp->dev);
8829 /* Initialize receive rules. */
8830 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8831 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8832 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8833 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8835 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8836 limit = 8;
8837 else
8838 limit = 16;
8839 if (tg3_flag(tp, ENABLE_ASF))
8840 limit -= 4;
8841 switch (limit) {
8842 case 16:
8843 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8844 case 15:
8845 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8846 case 14:
8847 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8848 case 13:
8849 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8850 case 12:
8851 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8852 case 11:
8853 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8854 case 10:
8855 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8856 case 9:
8857 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8858 case 8:
8859 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8860 case 7:
8861 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8862 case 6:
8863 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8864 case 5:
8865 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8866 case 4:
8867 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8868 case 3:
8869 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8870 case 2:
8871 case 1:
8873 default:
8874 break;
8877 if (tg3_flag(tp, ENABLE_APE))
8878 /* Write our heartbeat update interval to APE. */
8879 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8880 APE_HOST_HEARTBEAT_INT_DISABLE);
8882 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8884 return 0;
8887 /* Called at device open time to get the chip ready for
8888 * packet processing. Invoked with tp->lock held.
8890 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8892 tg3_switch_clocks(tp);
8894 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8896 return tg3_reset_hw(tp, reset_phy);
8899 #define TG3_STAT_ADD32(PSTAT, REG) \
8900 do { u32 __val = tr32(REG); \
8901 (PSTAT)->low += __val; \
8902 if ((PSTAT)->low < __val) \
8903 (PSTAT)->high += 1; \
8904 } while (0)
8906 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8908 struct tg3_hw_stats *sp = tp->hw_stats;
8910 if (!netif_carrier_ok(tp->dev))
8911 return;
8913 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8914 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8915 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8916 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8917 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8918 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8919 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8920 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8921 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8922 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8923 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8924 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8925 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8927 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8928 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8929 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8930 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8931 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8932 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8933 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8934 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8935 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8936 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8937 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8938 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8939 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8940 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8942 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8943 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8944 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8945 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8946 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8947 } else {
8948 u32 val = tr32(HOSTCC_FLOW_ATTN);
8949 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8950 if (val) {
8951 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8952 sp->rx_discards.low += val;
8953 if (sp->rx_discards.low < val)
8954 sp->rx_discards.high += 1;
8956 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8958 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8961 static void tg3_chk_missed_msi(struct tg3 *tp)
8963 u32 i;
8965 for (i = 0; i < tp->irq_cnt; i++) {
8966 struct tg3_napi *tnapi = &tp->napi[i];
8968 if (tg3_has_work(tnapi)) {
8969 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8970 tnapi->last_tx_cons == tnapi->tx_cons) {
8971 if (tnapi->chk_msi_cnt < 1) {
8972 tnapi->chk_msi_cnt++;
8973 return;
8975 tw32_mailbox(tnapi->int_mbox,
8976 tnapi->last_tag << 24);
8979 tnapi->chk_msi_cnt = 0;
8980 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8981 tnapi->last_tx_cons = tnapi->tx_cons;
8985 static void tg3_timer(unsigned long __opaque)
8987 struct tg3 *tp = (struct tg3 *) __opaque;
8989 if (tp->irq_sync)
8990 goto restart_timer;
8992 spin_lock(&tp->lock);
8994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8996 tg3_chk_missed_msi(tp);
8998 if (!tg3_flag(tp, TAGGED_STATUS)) {
8999 /* All of this garbage is because when using non-tagged
9000 * IRQ status the mailbox/status_block protocol the chip
9001 * uses with the cpu is race prone.
9003 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9004 tw32(GRC_LOCAL_CTRL,
9005 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9006 } else {
9007 tw32(HOSTCC_MODE, tp->coalesce_mode |
9008 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9011 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9012 tg3_flag_set(tp, RESTART_TIMER);
9013 spin_unlock(&tp->lock);
9014 schedule_work(&tp->reset_task);
9015 return;
9019 /* This part only runs once per second. */
9020 if (!--tp->timer_counter) {
9021 if (tg3_flag(tp, 5705_PLUS))
9022 tg3_periodic_fetch_stats(tp);
9024 if (tp->setlpicnt && !--tp->setlpicnt)
9025 tg3_phy_eee_enable(tp);
9027 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9028 u32 mac_stat;
9029 int phy_event;
9031 mac_stat = tr32(MAC_STATUS);
9033 phy_event = 0;
9034 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9035 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9036 phy_event = 1;
9037 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9038 phy_event = 1;
9040 if (phy_event)
9041 tg3_setup_phy(tp, 0);
9042 } else if (tg3_flag(tp, POLL_SERDES)) {
9043 u32 mac_stat = tr32(MAC_STATUS);
9044 int need_setup = 0;
9046 if (netif_carrier_ok(tp->dev) &&
9047 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9048 need_setup = 1;
9050 if (!netif_carrier_ok(tp->dev) &&
9051 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9052 MAC_STATUS_SIGNAL_DET))) {
9053 need_setup = 1;
9055 if (need_setup) {
9056 if (!tp->serdes_counter) {
9057 tw32_f(MAC_MODE,
9058 (tp->mac_mode &
9059 ~MAC_MODE_PORT_MODE_MASK));
9060 udelay(40);
9061 tw32_f(MAC_MODE, tp->mac_mode);
9062 udelay(40);
9064 tg3_setup_phy(tp, 0);
9066 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9067 tg3_flag(tp, 5780_CLASS)) {
9068 tg3_serdes_parallel_detect(tp);
9071 tp->timer_counter = tp->timer_multiplier;
9074 /* Heartbeat is only sent once every 2 seconds.
9076 * The heartbeat is to tell the ASF firmware that the host
9077 * driver is still alive. In the event that the OS crashes,
9078 * ASF needs to reset the hardware to free up the FIFO space
9079 * that may be filled with rx packets destined for the host.
9080 * If the FIFO is full, ASF will no longer function properly.
9082 * Unintended resets have been reported on real time kernels
9083 * where the timer doesn't run on time. Netpoll will also have
9084 * same problem.
9086 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9087 * to check the ring condition when the heartbeat is expiring
9088 * before doing the reset. This will prevent most unintended
9089 * resets.
9091 if (!--tp->asf_counter) {
9092 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9093 tg3_wait_for_event_ack(tp);
9095 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9096 FWCMD_NICDRV_ALIVE3);
9097 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9098 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9099 TG3_FW_UPDATE_TIMEOUT_SEC);
9101 tg3_generate_fw_event(tp);
9103 tp->asf_counter = tp->asf_multiplier;
9106 spin_unlock(&tp->lock);
9108 restart_timer:
9109 tp->timer.expires = jiffies + tp->timer_offset;
9110 add_timer(&tp->timer);
9113 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9115 irq_handler_t fn;
9116 unsigned long flags;
9117 char *name;
9118 struct tg3_napi *tnapi = &tp->napi[irq_num];
9120 if (tp->irq_cnt == 1)
9121 name = tp->dev->name;
9122 else {
9123 name = &tnapi->irq_lbl[0];
9124 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9125 name[IFNAMSIZ-1] = 0;
9128 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9129 fn = tg3_msi;
9130 if (tg3_flag(tp, 1SHOT_MSI))
9131 fn = tg3_msi_1shot;
9132 flags = 0;
9133 } else {
9134 fn = tg3_interrupt;
9135 if (tg3_flag(tp, TAGGED_STATUS))
9136 fn = tg3_interrupt_tagged;
9137 flags = IRQF_SHARED;
9140 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9143 static int tg3_test_interrupt(struct tg3 *tp)
9145 struct tg3_napi *tnapi = &tp->napi[0];
9146 struct net_device *dev = tp->dev;
9147 int err, i, intr_ok = 0;
9148 u32 val;
9150 if (!netif_running(dev))
9151 return -ENODEV;
9153 tg3_disable_ints(tp);
9155 free_irq(tnapi->irq_vec, tnapi);
9158 * Turn off MSI one shot mode. Otherwise this test has no
9159 * observable way to know whether the interrupt was delivered.
9161 if (tg3_flag(tp, 57765_PLUS)) {
9162 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9163 tw32(MSGINT_MODE, val);
9166 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9167 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9168 if (err)
9169 return err;
9171 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9172 tg3_enable_ints(tp);
9174 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9175 tnapi->coal_now);
9177 for (i = 0; i < 5; i++) {
9178 u32 int_mbox, misc_host_ctrl;
9180 int_mbox = tr32_mailbox(tnapi->int_mbox);
9181 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9183 if ((int_mbox != 0) ||
9184 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9185 intr_ok = 1;
9186 break;
9189 if (tg3_flag(tp, 57765_PLUS) &&
9190 tnapi->hw_status->status_tag != tnapi->last_tag)
9191 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9193 msleep(10);
9196 tg3_disable_ints(tp);
9198 free_irq(tnapi->irq_vec, tnapi);
9200 err = tg3_request_irq(tp, 0);
9202 if (err)
9203 return err;
9205 if (intr_ok) {
9206 /* Reenable MSI one shot mode. */
9207 if (tg3_flag(tp, 57765_PLUS)) {
9208 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9209 tw32(MSGINT_MODE, val);
9211 return 0;
9214 return -EIO;
9217 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9218 * successfully restored
9220 static int tg3_test_msi(struct tg3 *tp)
9222 int err;
9223 u16 pci_cmd;
9225 if (!tg3_flag(tp, USING_MSI))
9226 return 0;
9228 /* Turn off SERR reporting in case MSI terminates with Master
9229 * Abort.
9231 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9232 pci_write_config_word(tp->pdev, PCI_COMMAND,
9233 pci_cmd & ~PCI_COMMAND_SERR);
9235 err = tg3_test_interrupt(tp);
9237 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9239 if (!err)
9240 return 0;
9242 /* other failures */
9243 if (err != -EIO)
9244 return err;
9246 /* MSI test failed, go back to INTx mode */
9247 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9248 "to INTx mode. Please report this failure to the PCI "
9249 "maintainer and include system chipset information\n");
9251 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9253 pci_disable_msi(tp->pdev);
9255 tg3_flag_clear(tp, USING_MSI);
9256 tp->napi[0].irq_vec = tp->pdev->irq;
9258 err = tg3_request_irq(tp, 0);
9259 if (err)
9260 return err;
9262 /* Need to reset the chip because the MSI cycle may have terminated
9263 * with Master Abort.
9265 tg3_full_lock(tp, 1);
9267 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9268 err = tg3_init_hw(tp, 1);
9270 tg3_full_unlock(tp);
9272 if (err)
9273 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9275 return err;
9278 static int tg3_request_firmware(struct tg3 *tp)
9280 const __be32 *fw_data;
9282 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9283 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9284 tp->fw_needed);
9285 return -ENOENT;
9288 fw_data = (void *)tp->fw->data;
9290 /* Firmware blob starts with version numbers, followed by
9291 * start address and _full_ length including BSS sections
9292 * (which must be longer than the actual data, of course
9295 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9296 if (tp->fw_len < (tp->fw->size - 12)) {
9297 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9298 tp->fw_len, tp->fw_needed);
9299 release_firmware(tp->fw);
9300 tp->fw = NULL;
9301 return -EINVAL;
9304 /* We no longer need firmware; we have it. */
9305 tp->fw_needed = NULL;
9306 return 0;
9309 static bool tg3_enable_msix(struct tg3 *tp)
9311 int i, rc, cpus = num_online_cpus();
9312 struct msix_entry msix_ent[tp->irq_max];
9314 if (cpus == 1)
9315 /* Just fallback to the simpler MSI mode. */
9316 return false;
9319 * We want as many rx rings enabled as there are cpus.
9320 * The first MSIX vector only deals with link interrupts, etc,
9321 * so we add one to the number of vectors we are requesting.
9323 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9325 for (i = 0; i < tp->irq_max; i++) {
9326 msix_ent[i].entry = i;
9327 msix_ent[i].vector = 0;
9330 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9331 if (rc < 0) {
9332 return false;
9333 } else if (rc != 0) {
9334 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9335 return false;
9336 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9337 tp->irq_cnt, rc);
9338 tp->irq_cnt = rc;
9341 for (i = 0; i < tp->irq_max; i++)
9342 tp->napi[i].irq_vec = msix_ent[i].vector;
9344 netif_set_real_num_tx_queues(tp->dev, 1);
9345 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9346 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9347 pci_disable_msix(tp->pdev);
9348 return false;
9351 if (tp->irq_cnt > 1) {
9352 tg3_flag_set(tp, ENABLE_RSS);
9354 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9355 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9356 tg3_flag_set(tp, ENABLE_TSS);
9357 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9361 return true;
9364 static void tg3_ints_init(struct tg3 *tp)
9366 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9367 !tg3_flag(tp, TAGGED_STATUS)) {
9368 /* All MSI supporting chips should support tagged
9369 * status. Assert that this is the case.
9371 netdev_warn(tp->dev,
9372 "MSI without TAGGED_STATUS? Not using MSI\n");
9373 goto defcfg;
9376 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9377 tg3_flag_set(tp, USING_MSIX);
9378 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9379 tg3_flag_set(tp, USING_MSI);
9381 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9382 u32 msi_mode = tr32(MSGINT_MODE);
9383 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9384 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9385 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9387 defcfg:
9388 if (!tg3_flag(tp, USING_MSIX)) {
9389 tp->irq_cnt = 1;
9390 tp->napi[0].irq_vec = tp->pdev->irq;
9391 netif_set_real_num_tx_queues(tp->dev, 1);
9392 netif_set_real_num_rx_queues(tp->dev, 1);
9396 static void tg3_ints_fini(struct tg3 *tp)
9398 if (tg3_flag(tp, USING_MSIX))
9399 pci_disable_msix(tp->pdev);
9400 else if (tg3_flag(tp, USING_MSI))
9401 pci_disable_msi(tp->pdev);
9402 tg3_flag_clear(tp, USING_MSI);
9403 tg3_flag_clear(tp, USING_MSIX);
9404 tg3_flag_clear(tp, ENABLE_RSS);
9405 tg3_flag_clear(tp, ENABLE_TSS);
9408 static int tg3_open(struct net_device *dev)
9410 struct tg3 *tp = netdev_priv(dev);
9411 int i, err;
9413 if (tp->fw_needed) {
9414 err = tg3_request_firmware(tp);
9415 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9416 if (err)
9417 return err;
9418 } else if (err) {
9419 netdev_warn(tp->dev, "TSO capability disabled\n");
9420 tg3_flag_clear(tp, TSO_CAPABLE);
9421 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9422 netdev_notice(tp->dev, "TSO capability restored\n");
9423 tg3_flag_set(tp, TSO_CAPABLE);
9427 netif_carrier_off(tp->dev);
9429 err = tg3_power_up(tp);
9430 if (err)
9431 return err;
9433 tg3_full_lock(tp, 0);
9435 tg3_disable_ints(tp);
9436 tg3_flag_clear(tp, INIT_COMPLETE);
9438 tg3_full_unlock(tp);
9441 * Setup interrupts first so we know how
9442 * many NAPI resources to allocate
9444 tg3_ints_init(tp);
9446 /* The placement of this call is tied
9447 * to the setup and use of Host TX descriptors.
9449 err = tg3_alloc_consistent(tp);
9450 if (err)
9451 goto err_out1;
9453 tg3_napi_init(tp);
9455 tg3_napi_enable(tp);
9457 for (i = 0; i < tp->irq_cnt; i++) {
9458 struct tg3_napi *tnapi = &tp->napi[i];
9459 err = tg3_request_irq(tp, i);
9460 if (err) {
9461 for (i--; i >= 0; i--)
9462 free_irq(tnapi->irq_vec, tnapi);
9463 break;
9467 if (err)
9468 goto err_out2;
9470 tg3_full_lock(tp, 0);
9472 err = tg3_init_hw(tp, 1);
9473 if (err) {
9474 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9475 tg3_free_rings(tp);
9476 } else {
9477 if (tg3_flag(tp, TAGGED_STATUS) &&
9478 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9479 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9480 tp->timer_offset = HZ;
9481 else
9482 tp->timer_offset = HZ / 10;
9484 BUG_ON(tp->timer_offset > HZ);
9485 tp->timer_counter = tp->timer_multiplier =
9486 (HZ / tp->timer_offset);
9487 tp->asf_counter = tp->asf_multiplier =
9488 ((HZ / tp->timer_offset) * 2);
9490 init_timer(&tp->timer);
9491 tp->timer.expires = jiffies + tp->timer_offset;
9492 tp->timer.data = (unsigned long) tp;
9493 tp->timer.function = tg3_timer;
9496 tg3_full_unlock(tp);
9498 if (err)
9499 goto err_out3;
9501 if (tg3_flag(tp, USING_MSI)) {
9502 err = tg3_test_msi(tp);
9504 if (err) {
9505 tg3_full_lock(tp, 0);
9506 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9507 tg3_free_rings(tp);
9508 tg3_full_unlock(tp);
9510 goto err_out2;
9513 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9514 u32 val = tr32(PCIE_TRANSACTION_CFG);
9516 tw32(PCIE_TRANSACTION_CFG,
9517 val | PCIE_TRANS_CFG_1SHOT_MSI);
9521 tg3_phy_start(tp);
9523 tg3_full_lock(tp, 0);
9525 add_timer(&tp->timer);
9526 tg3_flag_set(tp, INIT_COMPLETE);
9527 tg3_enable_ints(tp);
9529 tg3_full_unlock(tp);
9531 netif_tx_start_all_queues(dev);
9534 * Reset loopback feature if it was turned on while the device was down
9535 * make sure that it's installed properly now.
9537 if (dev->features & NETIF_F_LOOPBACK)
9538 tg3_set_loopback(dev, dev->features);
9540 return 0;
9542 err_out3:
9543 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9544 struct tg3_napi *tnapi = &tp->napi[i];
9545 free_irq(tnapi->irq_vec, tnapi);
9548 err_out2:
9549 tg3_napi_disable(tp);
9550 tg3_napi_fini(tp);
9551 tg3_free_consistent(tp);
9553 err_out1:
9554 tg3_ints_fini(tp);
9555 tg3_frob_aux_power(tp, false);
9556 pci_set_power_state(tp->pdev, PCI_D3hot);
9557 return err;
9560 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9561 struct rtnl_link_stats64 *);
9562 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9564 static int tg3_close(struct net_device *dev)
9566 int i;
9567 struct tg3 *tp = netdev_priv(dev);
9569 tg3_napi_disable(tp);
9570 cancel_work_sync(&tp->reset_task);
9572 netif_tx_stop_all_queues(dev);
9574 del_timer_sync(&tp->timer);
9576 tg3_phy_stop(tp);
9578 tg3_full_lock(tp, 1);
9580 tg3_disable_ints(tp);
9582 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9583 tg3_free_rings(tp);
9584 tg3_flag_clear(tp, INIT_COMPLETE);
9586 tg3_full_unlock(tp);
9588 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9589 struct tg3_napi *tnapi = &tp->napi[i];
9590 free_irq(tnapi->irq_vec, tnapi);
9593 tg3_ints_fini(tp);
9595 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9597 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9598 sizeof(tp->estats_prev));
9600 tg3_napi_fini(tp);
9602 tg3_free_consistent(tp);
9604 tg3_power_down(tp);
9606 netif_carrier_off(tp->dev);
9608 return 0;
9611 static inline u64 get_stat64(tg3_stat64_t *val)
9613 return ((u64)val->high << 32) | ((u64)val->low);
9616 static u64 calc_crc_errors(struct tg3 *tp)
9618 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9620 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9621 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9623 u32 val;
9625 spin_lock_bh(&tp->lock);
9626 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9627 tg3_writephy(tp, MII_TG3_TEST1,
9628 val | MII_TG3_TEST1_CRC_EN);
9629 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9630 } else
9631 val = 0;
9632 spin_unlock_bh(&tp->lock);
9634 tp->phy_crc_errors += val;
9636 return tp->phy_crc_errors;
9639 return get_stat64(&hw_stats->rx_fcs_errors);
9642 #define ESTAT_ADD(member) \
9643 estats->member = old_estats->member + \
9644 get_stat64(&hw_stats->member)
9646 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9648 struct tg3_ethtool_stats *estats = &tp->estats;
9649 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9650 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9652 if (!hw_stats)
9653 return old_estats;
9655 ESTAT_ADD(rx_octets);
9656 ESTAT_ADD(rx_fragments);
9657 ESTAT_ADD(rx_ucast_packets);
9658 ESTAT_ADD(rx_mcast_packets);
9659 ESTAT_ADD(rx_bcast_packets);
9660 ESTAT_ADD(rx_fcs_errors);
9661 ESTAT_ADD(rx_align_errors);
9662 ESTAT_ADD(rx_xon_pause_rcvd);
9663 ESTAT_ADD(rx_xoff_pause_rcvd);
9664 ESTAT_ADD(rx_mac_ctrl_rcvd);
9665 ESTAT_ADD(rx_xoff_entered);
9666 ESTAT_ADD(rx_frame_too_long_errors);
9667 ESTAT_ADD(rx_jabbers);
9668 ESTAT_ADD(rx_undersize_packets);
9669 ESTAT_ADD(rx_in_length_errors);
9670 ESTAT_ADD(rx_out_length_errors);
9671 ESTAT_ADD(rx_64_or_less_octet_packets);
9672 ESTAT_ADD(rx_65_to_127_octet_packets);
9673 ESTAT_ADD(rx_128_to_255_octet_packets);
9674 ESTAT_ADD(rx_256_to_511_octet_packets);
9675 ESTAT_ADD(rx_512_to_1023_octet_packets);
9676 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9677 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9678 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9679 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9680 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9682 ESTAT_ADD(tx_octets);
9683 ESTAT_ADD(tx_collisions);
9684 ESTAT_ADD(tx_xon_sent);
9685 ESTAT_ADD(tx_xoff_sent);
9686 ESTAT_ADD(tx_flow_control);
9687 ESTAT_ADD(tx_mac_errors);
9688 ESTAT_ADD(tx_single_collisions);
9689 ESTAT_ADD(tx_mult_collisions);
9690 ESTAT_ADD(tx_deferred);
9691 ESTAT_ADD(tx_excessive_collisions);
9692 ESTAT_ADD(tx_late_collisions);
9693 ESTAT_ADD(tx_collide_2times);
9694 ESTAT_ADD(tx_collide_3times);
9695 ESTAT_ADD(tx_collide_4times);
9696 ESTAT_ADD(tx_collide_5times);
9697 ESTAT_ADD(tx_collide_6times);
9698 ESTAT_ADD(tx_collide_7times);
9699 ESTAT_ADD(tx_collide_8times);
9700 ESTAT_ADD(tx_collide_9times);
9701 ESTAT_ADD(tx_collide_10times);
9702 ESTAT_ADD(tx_collide_11times);
9703 ESTAT_ADD(tx_collide_12times);
9704 ESTAT_ADD(tx_collide_13times);
9705 ESTAT_ADD(tx_collide_14times);
9706 ESTAT_ADD(tx_collide_15times);
9707 ESTAT_ADD(tx_ucast_packets);
9708 ESTAT_ADD(tx_mcast_packets);
9709 ESTAT_ADD(tx_bcast_packets);
9710 ESTAT_ADD(tx_carrier_sense_errors);
9711 ESTAT_ADD(tx_discards);
9712 ESTAT_ADD(tx_errors);
9714 ESTAT_ADD(dma_writeq_full);
9715 ESTAT_ADD(dma_write_prioq_full);
9716 ESTAT_ADD(rxbds_empty);
9717 ESTAT_ADD(rx_discards);
9718 ESTAT_ADD(rx_errors);
9719 ESTAT_ADD(rx_threshold_hit);
9721 ESTAT_ADD(dma_readq_full);
9722 ESTAT_ADD(dma_read_prioq_full);
9723 ESTAT_ADD(tx_comp_queue_full);
9725 ESTAT_ADD(ring_set_send_prod_index);
9726 ESTAT_ADD(ring_status_update);
9727 ESTAT_ADD(nic_irqs);
9728 ESTAT_ADD(nic_avoided_irqs);
9729 ESTAT_ADD(nic_tx_threshold_hit);
9731 ESTAT_ADD(mbuf_lwm_thresh_hit);
9733 return estats;
9736 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9737 struct rtnl_link_stats64 *stats)
9739 struct tg3 *tp = netdev_priv(dev);
9740 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9741 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9743 if (!hw_stats)
9744 return old_stats;
9746 stats->rx_packets = old_stats->rx_packets +
9747 get_stat64(&hw_stats->rx_ucast_packets) +
9748 get_stat64(&hw_stats->rx_mcast_packets) +
9749 get_stat64(&hw_stats->rx_bcast_packets);
9751 stats->tx_packets = old_stats->tx_packets +
9752 get_stat64(&hw_stats->tx_ucast_packets) +
9753 get_stat64(&hw_stats->tx_mcast_packets) +
9754 get_stat64(&hw_stats->tx_bcast_packets);
9756 stats->rx_bytes = old_stats->rx_bytes +
9757 get_stat64(&hw_stats->rx_octets);
9758 stats->tx_bytes = old_stats->tx_bytes +
9759 get_stat64(&hw_stats->tx_octets);
9761 stats->rx_errors = old_stats->rx_errors +
9762 get_stat64(&hw_stats->rx_errors);
9763 stats->tx_errors = old_stats->tx_errors +
9764 get_stat64(&hw_stats->tx_errors) +
9765 get_stat64(&hw_stats->tx_mac_errors) +
9766 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9767 get_stat64(&hw_stats->tx_discards);
9769 stats->multicast = old_stats->multicast +
9770 get_stat64(&hw_stats->rx_mcast_packets);
9771 stats->collisions = old_stats->collisions +
9772 get_stat64(&hw_stats->tx_collisions);
9774 stats->rx_length_errors = old_stats->rx_length_errors +
9775 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9776 get_stat64(&hw_stats->rx_undersize_packets);
9778 stats->rx_over_errors = old_stats->rx_over_errors +
9779 get_stat64(&hw_stats->rxbds_empty);
9780 stats->rx_frame_errors = old_stats->rx_frame_errors +
9781 get_stat64(&hw_stats->rx_align_errors);
9782 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9783 get_stat64(&hw_stats->tx_discards);
9784 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9785 get_stat64(&hw_stats->tx_carrier_sense_errors);
9787 stats->rx_crc_errors = old_stats->rx_crc_errors +
9788 calc_crc_errors(tp);
9790 stats->rx_missed_errors = old_stats->rx_missed_errors +
9791 get_stat64(&hw_stats->rx_discards);
9793 stats->rx_dropped = tp->rx_dropped;
9795 return stats;
9798 static inline u32 calc_crc(unsigned char *buf, int len)
9800 u32 reg;
9801 u32 tmp;
9802 int j, k;
9804 reg = 0xffffffff;
9806 for (j = 0; j < len; j++) {
9807 reg ^= buf[j];
9809 for (k = 0; k < 8; k++) {
9810 tmp = reg & 0x01;
9812 reg >>= 1;
9814 if (tmp)
9815 reg ^= 0xedb88320;
9819 return ~reg;
9822 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9824 /* accept or reject all multicast frames */
9825 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9826 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9827 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9828 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9831 static void __tg3_set_rx_mode(struct net_device *dev)
9833 struct tg3 *tp = netdev_priv(dev);
9834 u32 rx_mode;
9836 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9837 RX_MODE_KEEP_VLAN_TAG);
9839 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9840 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9841 * flag clear.
9843 if (!tg3_flag(tp, ENABLE_ASF))
9844 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9845 #endif
9847 if (dev->flags & IFF_PROMISC) {
9848 /* Promiscuous mode. */
9849 rx_mode |= RX_MODE_PROMISC;
9850 } else if (dev->flags & IFF_ALLMULTI) {
9851 /* Accept all multicast. */
9852 tg3_set_multi(tp, 1);
9853 } else if (netdev_mc_empty(dev)) {
9854 /* Reject all multicast. */
9855 tg3_set_multi(tp, 0);
9856 } else {
9857 /* Accept one or more multicast(s). */
9858 struct netdev_hw_addr *ha;
9859 u32 mc_filter[4] = { 0, };
9860 u32 regidx;
9861 u32 bit;
9862 u32 crc;
9864 netdev_for_each_mc_addr(ha, dev) {
9865 crc = calc_crc(ha->addr, ETH_ALEN);
9866 bit = ~crc & 0x7f;
9867 regidx = (bit & 0x60) >> 5;
9868 bit &= 0x1f;
9869 mc_filter[regidx] |= (1 << bit);
9872 tw32(MAC_HASH_REG_0, mc_filter[0]);
9873 tw32(MAC_HASH_REG_1, mc_filter[1]);
9874 tw32(MAC_HASH_REG_2, mc_filter[2]);
9875 tw32(MAC_HASH_REG_3, mc_filter[3]);
9878 if (rx_mode != tp->rx_mode) {
9879 tp->rx_mode = rx_mode;
9880 tw32_f(MAC_RX_MODE, rx_mode);
9881 udelay(10);
9885 static void tg3_set_rx_mode(struct net_device *dev)
9887 struct tg3 *tp = netdev_priv(dev);
9889 if (!netif_running(dev))
9890 return;
9892 tg3_full_lock(tp, 0);
9893 __tg3_set_rx_mode(dev);
9894 tg3_full_unlock(tp);
9897 static int tg3_get_regs_len(struct net_device *dev)
9899 return TG3_REG_BLK_SIZE;
9902 static void tg3_get_regs(struct net_device *dev,
9903 struct ethtool_regs *regs, void *_p)
9905 struct tg3 *tp = netdev_priv(dev);
9907 regs->version = 0;
9909 memset(_p, 0, TG3_REG_BLK_SIZE);
9911 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9912 return;
9914 tg3_full_lock(tp, 0);
9916 tg3_dump_legacy_regs(tp, (u32 *)_p);
9918 tg3_full_unlock(tp);
9921 static int tg3_get_eeprom_len(struct net_device *dev)
9923 struct tg3 *tp = netdev_priv(dev);
9925 return tp->nvram_size;
9928 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9930 struct tg3 *tp = netdev_priv(dev);
9931 int ret;
9932 u8 *pd;
9933 u32 i, offset, len, b_offset, b_count;
9934 __be32 val;
9936 if (tg3_flag(tp, NO_NVRAM))
9937 return -EINVAL;
9939 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9940 return -EAGAIN;
9942 offset = eeprom->offset;
9943 len = eeprom->len;
9944 eeprom->len = 0;
9946 eeprom->magic = TG3_EEPROM_MAGIC;
9948 if (offset & 3) {
9949 /* adjustments to start on required 4 byte boundary */
9950 b_offset = offset & 3;
9951 b_count = 4 - b_offset;
9952 if (b_count > len) {
9953 /* i.e. offset=1 len=2 */
9954 b_count = len;
9956 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9957 if (ret)
9958 return ret;
9959 memcpy(data, ((char *)&val) + b_offset, b_count);
9960 len -= b_count;
9961 offset += b_count;
9962 eeprom->len += b_count;
9965 /* read bytes up to the last 4 byte boundary */
9966 pd = &data[eeprom->len];
9967 for (i = 0; i < (len - (len & 3)); i += 4) {
9968 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9969 if (ret) {
9970 eeprom->len += i;
9971 return ret;
9973 memcpy(pd + i, &val, 4);
9975 eeprom->len += i;
9977 if (len & 3) {
9978 /* read last bytes not ending on 4 byte boundary */
9979 pd = &data[eeprom->len];
9980 b_count = len & 3;
9981 b_offset = offset + len - b_count;
9982 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9983 if (ret)
9984 return ret;
9985 memcpy(pd, &val, b_count);
9986 eeprom->len += b_count;
9988 return 0;
9991 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9993 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9995 struct tg3 *tp = netdev_priv(dev);
9996 int ret;
9997 u32 offset, len, b_offset, odd_len;
9998 u8 *buf;
9999 __be32 start, end;
10001 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10002 return -EAGAIN;
10004 if (tg3_flag(tp, NO_NVRAM) ||
10005 eeprom->magic != TG3_EEPROM_MAGIC)
10006 return -EINVAL;
10008 offset = eeprom->offset;
10009 len = eeprom->len;
10011 if ((b_offset = (offset & 3))) {
10012 /* adjustments to start on required 4 byte boundary */
10013 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10014 if (ret)
10015 return ret;
10016 len += b_offset;
10017 offset &= ~3;
10018 if (len < 4)
10019 len = 4;
10022 odd_len = 0;
10023 if (len & 3) {
10024 /* adjustments to end on required 4 byte boundary */
10025 odd_len = 1;
10026 len = (len + 3) & ~3;
10027 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10028 if (ret)
10029 return ret;
10032 buf = data;
10033 if (b_offset || odd_len) {
10034 buf = kmalloc(len, GFP_KERNEL);
10035 if (!buf)
10036 return -ENOMEM;
10037 if (b_offset)
10038 memcpy(buf, &start, 4);
10039 if (odd_len)
10040 memcpy(buf+len-4, &end, 4);
10041 memcpy(buf + b_offset, data, eeprom->len);
10044 ret = tg3_nvram_write_block(tp, offset, len, buf);
10046 if (buf != data)
10047 kfree(buf);
10049 return ret;
10052 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10054 struct tg3 *tp = netdev_priv(dev);
10056 if (tg3_flag(tp, USE_PHYLIB)) {
10057 struct phy_device *phydev;
10058 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10059 return -EAGAIN;
10060 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10061 return phy_ethtool_gset(phydev, cmd);
10064 cmd->supported = (SUPPORTED_Autoneg);
10066 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10067 cmd->supported |= (SUPPORTED_1000baseT_Half |
10068 SUPPORTED_1000baseT_Full);
10070 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10071 cmd->supported |= (SUPPORTED_100baseT_Half |
10072 SUPPORTED_100baseT_Full |
10073 SUPPORTED_10baseT_Half |
10074 SUPPORTED_10baseT_Full |
10075 SUPPORTED_TP);
10076 cmd->port = PORT_TP;
10077 } else {
10078 cmd->supported |= SUPPORTED_FIBRE;
10079 cmd->port = PORT_FIBRE;
10082 cmd->advertising = tp->link_config.advertising;
10083 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10084 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10085 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10086 cmd->advertising |= ADVERTISED_Pause;
10087 } else {
10088 cmd->advertising |= ADVERTISED_Pause |
10089 ADVERTISED_Asym_Pause;
10091 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10092 cmd->advertising |= ADVERTISED_Asym_Pause;
10095 if (netif_running(dev)) {
10096 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10097 cmd->duplex = tp->link_config.active_duplex;
10098 } else {
10099 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10100 cmd->duplex = DUPLEX_INVALID;
10102 cmd->phy_address = tp->phy_addr;
10103 cmd->transceiver = XCVR_INTERNAL;
10104 cmd->autoneg = tp->link_config.autoneg;
10105 cmd->maxtxpkt = 0;
10106 cmd->maxrxpkt = 0;
10107 return 0;
10110 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10112 struct tg3 *tp = netdev_priv(dev);
10113 u32 speed = ethtool_cmd_speed(cmd);
10115 if (tg3_flag(tp, USE_PHYLIB)) {
10116 struct phy_device *phydev;
10117 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10118 return -EAGAIN;
10119 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10120 return phy_ethtool_sset(phydev, cmd);
10123 if (cmd->autoneg != AUTONEG_ENABLE &&
10124 cmd->autoneg != AUTONEG_DISABLE)
10125 return -EINVAL;
10127 if (cmd->autoneg == AUTONEG_DISABLE &&
10128 cmd->duplex != DUPLEX_FULL &&
10129 cmd->duplex != DUPLEX_HALF)
10130 return -EINVAL;
10132 if (cmd->autoneg == AUTONEG_ENABLE) {
10133 u32 mask = ADVERTISED_Autoneg |
10134 ADVERTISED_Pause |
10135 ADVERTISED_Asym_Pause;
10137 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10138 mask |= ADVERTISED_1000baseT_Half |
10139 ADVERTISED_1000baseT_Full;
10141 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10142 mask |= ADVERTISED_100baseT_Half |
10143 ADVERTISED_100baseT_Full |
10144 ADVERTISED_10baseT_Half |
10145 ADVERTISED_10baseT_Full |
10146 ADVERTISED_TP;
10147 else
10148 mask |= ADVERTISED_FIBRE;
10150 if (cmd->advertising & ~mask)
10151 return -EINVAL;
10153 mask &= (ADVERTISED_1000baseT_Half |
10154 ADVERTISED_1000baseT_Full |
10155 ADVERTISED_100baseT_Half |
10156 ADVERTISED_100baseT_Full |
10157 ADVERTISED_10baseT_Half |
10158 ADVERTISED_10baseT_Full);
10160 cmd->advertising &= mask;
10161 } else {
10162 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10163 if (speed != SPEED_1000)
10164 return -EINVAL;
10166 if (cmd->duplex != DUPLEX_FULL)
10167 return -EINVAL;
10168 } else {
10169 if (speed != SPEED_100 &&
10170 speed != SPEED_10)
10171 return -EINVAL;
10175 tg3_full_lock(tp, 0);
10177 tp->link_config.autoneg = cmd->autoneg;
10178 if (cmd->autoneg == AUTONEG_ENABLE) {
10179 tp->link_config.advertising = (cmd->advertising |
10180 ADVERTISED_Autoneg);
10181 tp->link_config.speed = SPEED_INVALID;
10182 tp->link_config.duplex = DUPLEX_INVALID;
10183 } else {
10184 tp->link_config.advertising = 0;
10185 tp->link_config.speed = speed;
10186 tp->link_config.duplex = cmd->duplex;
10189 tp->link_config.orig_speed = tp->link_config.speed;
10190 tp->link_config.orig_duplex = tp->link_config.duplex;
10191 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10193 if (netif_running(dev))
10194 tg3_setup_phy(tp, 1);
10196 tg3_full_unlock(tp);
10198 return 0;
10201 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10203 struct tg3 *tp = netdev_priv(dev);
10205 strcpy(info->driver, DRV_MODULE_NAME);
10206 strcpy(info->version, DRV_MODULE_VERSION);
10207 strcpy(info->fw_version, tp->fw_ver);
10208 strcpy(info->bus_info, pci_name(tp->pdev));
10211 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10213 struct tg3 *tp = netdev_priv(dev);
10215 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10216 wol->supported = WAKE_MAGIC;
10217 else
10218 wol->supported = 0;
10219 wol->wolopts = 0;
10220 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10221 wol->wolopts = WAKE_MAGIC;
10222 memset(&wol->sopass, 0, sizeof(wol->sopass));
10225 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10227 struct tg3 *tp = netdev_priv(dev);
10228 struct device *dp = &tp->pdev->dev;
10230 if (wol->wolopts & ~WAKE_MAGIC)
10231 return -EINVAL;
10232 if ((wol->wolopts & WAKE_MAGIC) &&
10233 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10234 return -EINVAL;
10236 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10238 spin_lock_bh(&tp->lock);
10239 if (device_may_wakeup(dp))
10240 tg3_flag_set(tp, WOL_ENABLE);
10241 else
10242 tg3_flag_clear(tp, WOL_ENABLE);
10243 spin_unlock_bh(&tp->lock);
10245 return 0;
10248 static u32 tg3_get_msglevel(struct net_device *dev)
10250 struct tg3 *tp = netdev_priv(dev);
10251 return tp->msg_enable;
10254 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10256 struct tg3 *tp = netdev_priv(dev);
10257 tp->msg_enable = value;
10260 static int tg3_nway_reset(struct net_device *dev)
10262 struct tg3 *tp = netdev_priv(dev);
10263 int r;
10265 if (!netif_running(dev))
10266 return -EAGAIN;
10268 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10269 return -EINVAL;
10271 if (tg3_flag(tp, USE_PHYLIB)) {
10272 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10273 return -EAGAIN;
10274 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10275 } else {
10276 u32 bmcr;
10278 spin_lock_bh(&tp->lock);
10279 r = -EINVAL;
10280 tg3_readphy(tp, MII_BMCR, &bmcr);
10281 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10282 ((bmcr & BMCR_ANENABLE) ||
10283 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10284 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10285 BMCR_ANENABLE);
10286 r = 0;
10288 spin_unlock_bh(&tp->lock);
10291 return r;
10294 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10296 struct tg3 *tp = netdev_priv(dev);
10298 ering->rx_max_pending = tp->rx_std_ring_mask;
10299 ering->rx_mini_max_pending = 0;
10300 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10301 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10302 else
10303 ering->rx_jumbo_max_pending = 0;
10305 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10307 ering->rx_pending = tp->rx_pending;
10308 ering->rx_mini_pending = 0;
10309 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10310 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10311 else
10312 ering->rx_jumbo_pending = 0;
10314 ering->tx_pending = tp->napi[0].tx_pending;
10317 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10319 struct tg3 *tp = netdev_priv(dev);
10320 int i, irq_sync = 0, err = 0;
10322 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10323 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10324 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10325 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10326 (tg3_flag(tp, TSO_BUG) &&
10327 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10328 return -EINVAL;
10330 if (netif_running(dev)) {
10331 tg3_phy_stop(tp);
10332 tg3_netif_stop(tp);
10333 irq_sync = 1;
10336 tg3_full_lock(tp, irq_sync);
10338 tp->rx_pending = ering->rx_pending;
10340 if (tg3_flag(tp, MAX_RXPEND_64) &&
10341 tp->rx_pending > 63)
10342 tp->rx_pending = 63;
10343 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10345 for (i = 0; i < tp->irq_max; i++)
10346 tp->napi[i].tx_pending = ering->tx_pending;
10348 if (netif_running(dev)) {
10349 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10350 err = tg3_restart_hw(tp, 1);
10351 if (!err)
10352 tg3_netif_start(tp);
10355 tg3_full_unlock(tp);
10357 if (irq_sync && !err)
10358 tg3_phy_start(tp);
10360 return err;
10363 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10365 struct tg3 *tp = netdev_priv(dev);
10367 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10369 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10370 epause->rx_pause = 1;
10371 else
10372 epause->rx_pause = 0;
10374 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10375 epause->tx_pause = 1;
10376 else
10377 epause->tx_pause = 0;
10380 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10382 struct tg3 *tp = netdev_priv(dev);
10383 int err = 0;
10385 if (tg3_flag(tp, USE_PHYLIB)) {
10386 u32 newadv;
10387 struct phy_device *phydev;
10389 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10391 if (!(phydev->supported & SUPPORTED_Pause) ||
10392 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10393 (epause->rx_pause != epause->tx_pause)))
10394 return -EINVAL;
10396 tp->link_config.flowctrl = 0;
10397 if (epause->rx_pause) {
10398 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10400 if (epause->tx_pause) {
10401 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10402 newadv = ADVERTISED_Pause;
10403 } else
10404 newadv = ADVERTISED_Pause |
10405 ADVERTISED_Asym_Pause;
10406 } else if (epause->tx_pause) {
10407 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10408 newadv = ADVERTISED_Asym_Pause;
10409 } else
10410 newadv = 0;
10412 if (epause->autoneg)
10413 tg3_flag_set(tp, PAUSE_AUTONEG);
10414 else
10415 tg3_flag_clear(tp, PAUSE_AUTONEG);
10417 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10418 u32 oldadv = phydev->advertising &
10419 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10420 if (oldadv != newadv) {
10421 phydev->advertising &=
10422 ~(ADVERTISED_Pause |
10423 ADVERTISED_Asym_Pause);
10424 phydev->advertising |= newadv;
10425 if (phydev->autoneg) {
10427 * Always renegotiate the link to
10428 * inform our link partner of our
10429 * flow control settings, even if the
10430 * flow control is forced. Let
10431 * tg3_adjust_link() do the final
10432 * flow control setup.
10434 return phy_start_aneg(phydev);
10438 if (!epause->autoneg)
10439 tg3_setup_flow_control(tp, 0, 0);
10440 } else {
10441 tp->link_config.orig_advertising &=
10442 ~(ADVERTISED_Pause |
10443 ADVERTISED_Asym_Pause);
10444 tp->link_config.orig_advertising |= newadv;
10446 } else {
10447 int irq_sync = 0;
10449 if (netif_running(dev)) {
10450 tg3_netif_stop(tp);
10451 irq_sync = 1;
10454 tg3_full_lock(tp, irq_sync);
10456 if (epause->autoneg)
10457 tg3_flag_set(tp, PAUSE_AUTONEG);
10458 else
10459 tg3_flag_clear(tp, PAUSE_AUTONEG);
10460 if (epause->rx_pause)
10461 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10462 else
10463 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10464 if (epause->tx_pause)
10465 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10466 else
10467 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10469 if (netif_running(dev)) {
10470 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10471 err = tg3_restart_hw(tp, 1);
10472 if (!err)
10473 tg3_netif_start(tp);
10476 tg3_full_unlock(tp);
10479 return err;
10482 static int tg3_get_sset_count(struct net_device *dev, int sset)
10484 switch (sset) {
10485 case ETH_SS_TEST:
10486 return TG3_NUM_TEST;
10487 case ETH_SS_STATS:
10488 return TG3_NUM_STATS;
10489 default:
10490 return -EOPNOTSUPP;
10494 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10496 switch (stringset) {
10497 case ETH_SS_STATS:
10498 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10499 break;
10500 case ETH_SS_TEST:
10501 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10502 break;
10503 default:
10504 WARN_ON(1); /* we need a WARN() */
10505 break;
10509 static int tg3_set_phys_id(struct net_device *dev,
10510 enum ethtool_phys_id_state state)
10512 struct tg3 *tp = netdev_priv(dev);
10514 if (!netif_running(tp->dev))
10515 return -EAGAIN;
10517 switch (state) {
10518 case ETHTOOL_ID_ACTIVE:
10519 return 1; /* cycle on/off once per second */
10521 case ETHTOOL_ID_ON:
10522 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10523 LED_CTRL_1000MBPS_ON |
10524 LED_CTRL_100MBPS_ON |
10525 LED_CTRL_10MBPS_ON |
10526 LED_CTRL_TRAFFIC_OVERRIDE |
10527 LED_CTRL_TRAFFIC_BLINK |
10528 LED_CTRL_TRAFFIC_LED);
10529 break;
10531 case ETHTOOL_ID_OFF:
10532 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10533 LED_CTRL_TRAFFIC_OVERRIDE);
10534 break;
10536 case ETHTOOL_ID_INACTIVE:
10537 tw32(MAC_LED_CTRL, tp->led_ctrl);
10538 break;
10541 return 0;
10544 static void tg3_get_ethtool_stats(struct net_device *dev,
10545 struct ethtool_stats *estats, u64 *tmp_stats)
10547 struct tg3 *tp = netdev_priv(dev);
10548 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10551 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10553 int i;
10554 __be32 *buf;
10555 u32 offset = 0, len = 0;
10556 u32 magic, val;
10558 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10559 return NULL;
10561 if (magic == TG3_EEPROM_MAGIC) {
10562 for (offset = TG3_NVM_DIR_START;
10563 offset < TG3_NVM_DIR_END;
10564 offset += TG3_NVM_DIRENT_SIZE) {
10565 if (tg3_nvram_read(tp, offset, &val))
10566 return NULL;
10568 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10569 TG3_NVM_DIRTYPE_EXTVPD)
10570 break;
10573 if (offset != TG3_NVM_DIR_END) {
10574 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10575 if (tg3_nvram_read(tp, offset + 4, &offset))
10576 return NULL;
10578 offset = tg3_nvram_logical_addr(tp, offset);
10582 if (!offset || !len) {
10583 offset = TG3_NVM_VPD_OFF;
10584 len = TG3_NVM_VPD_LEN;
10587 buf = kmalloc(len, GFP_KERNEL);
10588 if (buf == NULL)
10589 return NULL;
10591 if (magic == TG3_EEPROM_MAGIC) {
10592 for (i = 0; i < len; i += 4) {
10593 /* The data is in little-endian format in NVRAM.
10594 * Use the big-endian read routines to preserve
10595 * the byte order as it exists in NVRAM.
10597 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10598 goto error;
10600 } else {
10601 u8 *ptr;
10602 ssize_t cnt;
10603 unsigned int pos = 0;
10605 ptr = (u8 *)&buf[0];
10606 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10607 cnt = pci_read_vpd(tp->pdev, pos,
10608 len - pos, ptr);
10609 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10610 cnt = 0;
10611 else if (cnt < 0)
10612 goto error;
10614 if (pos != len)
10615 goto error;
10618 *vpdlen = len;
10620 return buf;
10622 error:
10623 kfree(buf);
10624 return NULL;
10627 #define NVRAM_TEST_SIZE 0x100
10628 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10629 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10630 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10631 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10632 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10633 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
10634 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10635 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10637 static int tg3_test_nvram(struct tg3 *tp)
10639 u32 csum, magic, len;
10640 __be32 *buf;
10641 int i, j, k, err = 0, size;
10643 if (tg3_flag(tp, NO_NVRAM))
10644 return 0;
10646 if (tg3_nvram_read(tp, 0, &magic) != 0)
10647 return -EIO;
10649 if (magic == TG3_EEPROM_MAGIC)
10650 size = NVRAM_TEST_SIZE;
10651 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10652 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10653 TG3_EEPROM_SB_FORMAT_1) {
10654 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10655 case TG3_EEPROM_SB_REVISION_0:
10656 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10657 break;
10658 case TG3_EEPROM_SB_REVISION_2:
10659 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10660 break;
10661 case TG3_EEPROM_SB_REVISION_3:
10662 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10663 break;
10664 case TG3_EEPROM_SB_REVISION_4:
10665 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10666 break;
10667 case TG3_EEPROM_SB_REVISION_5:
10668 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10669 break;
10670 case TG3_EEPROM_SB_REVISION_6:
10671 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10672 break;
10673 default:
10674 return -EIO;
10676 } else
10677 return 0;
10678 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10679 size = NVRAM_SELFBOOT_HW_SIZE;
10680 else
10681 return -EIO;
10683 buf = kmalloc(size, GFP_KERNEL);
10684 if (buf == NULL)
10685 return -ENOMEM;
10687 err = -EIO;
10688 for (i = 0, j = 0; i < size; i += 4, j++) {
10689 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10690 if (err)
10691 break;
10693 if (i < size)
10694 goto out;
10696 /* Selfboot format */
10697 magic = be32_to_cpu(buf[0]);
10698 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10699 TG3_EEPROM_MAGIC_FW) {
10700 u8 *buf8 = (u8 *) buf, csum8 = 0;
10702 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10703 TG3_EEPROM_SB_REVISION_2) {
10704 /* For rev 2, the csum doesn't include the MBA. */
10705 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10706 csum8 += buf8[i];
10707 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10708 csum8 += buf8[i];
10709 } else {
10710 for (i = 0; i < size; i++)
10711 csum8 += buf8[i];
10714 if (csum8 == 0) {
10715 err = 0;
10716 goto out;
10719 err = -EIO;
10720 goto out;
10723 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10724 TG3_EEPROM_MAGIC_HW) {
10725 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10726 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10727 u8 *buf8 = (u8 *) buf;
10729 /* Separate the parity bits and the data bytes. */
10730 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10731 if ((i == 0) || (i == 8)) {
10732 int l;
10733 u8 msk;
10735 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10736 parity[k++] = buf8[i] & msk;
10737 i++;
10738 } else if (i == 16) {
10739 int l;
10740 u8 msk;
10742 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10743 parity[k++] = buf8[i] & msk;
10744 i++;
10746 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10747 parity[k++] = buf8[i] & msk;
10748 i++;
10750 data[j++] = buf8[i];
10753 err = -EIO;
10754 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10755 u8 hw8 = hweight8(data[i]);
10757 if ((hw8 & 0x1) && parity[i])
10758 goto out;
10759 else if (!(hw8 & 0x1) && !parity[i])
10760 goto out;
10762 err = 0;
10763 goto out;
10766 err = -EIO;
10768 /* Bootstrap checksum at offset 0x10 */
10769 csum = calc_crc((unsigned char *) buf, 0x10);
10770 if (csum != le32_to_cpu(buf[0x10/4]))
10771 goto out;
10773 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10774 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10775 if (csum != le32_to_cpu(buf[0xfc/4]))
10776 goto out;
10778 kfree(buf);
10780 buf = tg3_vpd_readblock(tp, &len);
10781 if (!buf)
10782 return -ENOMEM;
10784 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
10785 if (i > 0) {
10786 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10787 if (j < 0)
10788 goto out;
10790 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
10791 goto out;
10793 i += PCI_VPD_LRDT_TAG_SIZE;
10794 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10795 PCI_VPD_RO_KEYWORD_CHKSUM);
10796 if (j > 0) {
10797 u8 csum8 = 0;
10799 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10801 for (i = 0; i <= j; i++)
10802 csum8 += ((u8 *)buf)[i];
10804 if (csum8)
10805 goto out;
10809 err = 0;
10811 out:
10812 kfree(buf);
10813 return err;
10816 #define TG3_SERDES_TIMEOUT_SEC 2
10817 #define TG3_COPPER_TIMEOUT_SEC 6
10819 static int tg3_test_link(struct tg3 *tp)
10821 int i, max;
10823 if (!netif_running(tp->dev))
10824 return -ENODEV;
10826 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10827 max = TG3_SERDES_TIMEOUT_SEC;
10828 else
10829 max = TG3_COPPER_TIMEOUT_SEC;
10831 for (i = 0; i < max; i++) {
10832 if (netif_carrier_ok(tp->dev))
10833 return 0;
10835 if (msleep_interruptible(1000))
10836 break;
10839 return -EIO;
10842 /* Only test the commonly used registers */
10843 static int tg3_test_registers(struct tg3 *tp)
10845 int i, is_5705, is_5750;
10846 u32 offset, read_mask, write_mask, val, save_val, read_val;
10847 static struct {
10848 u16 offset;
10849 u16 flags;
10850 #define TG3_FL_5705 0x1
10851 #define TG3_FL_NOT_5705 0x2
10852 #define TG3_FL_NOT_5788 0x4
10853 #define TG3_FL_NOT_5750 0x8
10854 u32 read_mask;
10855 u32 write_mask;
10856 } reg_tbl[] = {
10857 /* MAC Control Registers */
10858 { MAC_MODE, TG3_FL_NOT_5705,
10859 0x00000000, 0x00ef6f8c },
10860 { MAC_MODE, TG3_FL_5705,
10861 0x00000000, 0x01ef6b8c },
10862 { MAC_STATUS, TG3_FL_NOT_5705,
10863 0x03800107, 0x00000000 },
10864 { MAC_STATUS, TG3_FL_5705,
10865 0x03800100, 0x00000000 },
10866 { MAC_ADDR_0_HIGH, 0x0000,
10867 0x00000000, 0x0000ffff },
10868 { MAC_ADDR_0_LOW, 0x0000,
10869 0x00000000, 0xffffffff },
10870 { MAC_RX_MTU_SIZE, 0x0000,
10871 0x00000000, 0x0000ffff },
10872 { MAC_TX_MODE, 0x0000,
10873 0x00000000, 0x00000070 },
10874 { MAC_TX_LENGTHS, 0x0000,
10875 0x00000000, 0x00003fff },
10876 { MAC_RX_MODE, TG3_FL_NOT_5705,
10877 0x00000000, 0x000007fc },
10878 { MAC_RX_MODE, TG3_FL_5705,
10879 0x00000000, 0x000007dc },
10880 { MAC_HASH_REG_0, 0x0000,
10881 0x00000000, 0xffffffff },
10882 { MAC_HASH_REG_1, 0x0000,
10883 0x00000000, 0xffffffff },
10884 { MAC_HASH_REG_2, 0x0000,
10885 0x00000000, 0xffffffff },
10886 { MAC_HASH_REG_3, 0x0000,
10887 0x00000000, 0xffffffff },
10889 /* Receive Data and Receive BD Initiator Control Registers. */
10890 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10891 0x00000000, 0xffffffff },
10892 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10893 0x00000000, 0xffffffff },
10894 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10895 0x00000000, 0x00000003 },
10896 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10897 0x00000000, 0xffffffff },
10898 { RCVDBDI_STD_BD+0, 0x0000,
10899 0x00000000, 0xffffffff },
10900 { RCVDBDI_STD_BD+4, 0x0000,
10901 0x00000000, 0xffffffff },
10902 { RCVDBDI_STD_BD+8, 0x0000,
10903 0x00000000, 0xffff0002 },
10904 { RCVDBDI_STD_BD+0xc, 0x0000,
10905 0x00000000, 0xffffffff },
10907 /* Receive BD Initiator Control Registers. */
10908 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10909 0x00000000, 0xffffffff },
10910 { RCVBDI_STD_THRESH, TG3_FL_5705,
10911 0x00000000, 0x000003ff },
10912 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10913 0x00000000, 0xffffffff },
10915 /* Host Coalescing Control Registers. */
10916 { HOSTCC_MODE, TG3_FL_NOT_5705,
10917 0x00000000, 0x00000004 },
10918 { HOSTCC_MODE, TG3_FL_5705,
10919 0x00000000, 0x000000f6 },
10920 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10921 0x00000000, 0xffffffff },
10922 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10923 0x00000000, 0x000003ff },
10924 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10925 0x00000000, 0xffffffff },
10926 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10927 0x00000000, 0x000003ff },
10928 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10929 0x00000000, 0xffffffff },
10930 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10931 0x00000000, 0x000000ff },
10932 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10933 0x00000000, 0xffffffff },
10934 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10935 0x00000000, 0x000000ff },
10936 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10937 0x00000000, 0xffffffff },
10938 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10939 0x00000000, 0xffffffff },
10940 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10941 0x00000000, 0xffffffff },
10942 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10943 0x00000000, 0x000000ff },
10944 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10945 0x00000000, 0xffffffff },
10946 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10947 0x00000000, 0x000000ff },
10948 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10949 0x00000000, 0xffffffff },
10950 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10951 0x00000000, 0xffffffff },
10952 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10953 0x00000000, 0xffffffff },
10954 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10955 0x00000000, 0xffffffff },
10956 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10957 0x00000000, 0xffffffff },
10958 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10959 0xffffffff, 0x00000000 },
10960 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10961 0xffffffff, 0x00000000 },
10963 /* Buffer Manager Control Registers. */
10964 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10965 0x00000000, 0x007fff80 },
10966 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10967 0x00000000, 0x007fffff },
10968 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10969 0x00000000, 0x0000003f },
10970 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10971 0x00000000, 0x000001ff },
10972 { BUFMGR_MB_HIGH_WATER, 0x0000,
10973 0x00000000, 0x000001ff },
10974 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10975 0xffffffff, 0x00000000 },
10976 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10977 0xffffffff, 0x00000000 },
10979 /* Mailbox Registers */
10980 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10981 0x00000000, 0x000001ff },
10982 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10983 0x00000000, 0x000001ff },
10984 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10985 0x00000000, 0x000007ff },
10986 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10987 0x00000000, 0x000001ff },
10989 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10992 is_5705 = is_5750 = 0;
10993 if (tg3_flag(tp, 5705_PLUS)) {
10994 is_5705 = 1;
10995 if (tg3_flag(tp, 5750_PLUS))
10996 is_5750 = 1;
10999 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11000 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11001 continue;
11003 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11004 continue;
11006 if (tg3_flag(tp, IS_5788) &&
11007 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11008 continue;
11010 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11011 continue;
11013 offset = (u32) reg_tbl[i].offset;
11014 read_mask = reg_tbl[i].read_mask;
11015 write_mask = reg_tbl[i].write_mask;
11017 /* Save the original register content */
11018 save_val = tr32(offset);
11020 /* Determine the read-only value. */
11021 read_val = save_val & read_mask;
11023 /* Write zero to the register, then make sure the read-only bits
11024 * are not changed and the read/write bits are all zeros.
11026 tw32(offset, 0);
11028 val = tr32(offset);
11030 /* Test the read-only and read/write bits. */
11031 if (((val & read_mask) != read_val) || (val & write_mask))
11032 goto out;
11034 /* Write ones to all the bits defined by RdMask and WrMask, then
11035 * make sure the read-only bits are not changed and the
11036 * read/write bits are all ones.
11038 tw32(offset, read_mask | write_mask);
11040 val = tr32(offset);
11042 /* Test the read-only bits. */
11043 if ((val & read_mask) != read_val)
11044 goto out;
11046 /* Test the read/write bits. */
11047 if ((val & write_mask) != write_mask)
11048 goto out;
11050 tw32(offset, save_val);
11053 return 0;
11055 out:
11056 if (netif_msg_hw(tp))
11057 netdev_err(tp->dev,
11058 "Register test failed at offset %x\n", offset);
11059 tw32(offset, save_val);
11060 return -EIO;
11063 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11065 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11066 int i;
11067 u32 j;
11069 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11070 for (j = 0; j < len; j += 4) {
11071 u32 val;
11073 tg3_write_mem(tp, offset + j, test_pattern[i]);
11074 tg3_read_mem(tp, offset + j, &val);
11075 if (val != test_pattern[i])
11076 return -EIO;
11079 return 0;
11082 static int tg3_test_memory(struct tg3 *tp)
11084 static struct mem_entry {
11085 u32 offset;
11086 u32 len;
11087 } mem_tbl_570x[] = {
11088 { 0x00000000, 0x00b50},
11089 { 0x00002000, 0x1c000},
11090 { 0xffffffff, 0x00000}
11091 }, mem_tbl_5705[] = {
11092 { 0x00000100, 0x0000c},
11093 { 0x00000200, 0x00008},
11094 { 0x00004000, 0x00800},
11095 { 0x00006000, 0x01000},
11096 { 0x00008000, 0x02000},
11097 { 0x00010000, 0x0e000},
11098 { 0xffffffff, 0x00000}
11099 }, mem_tbl_5755[] = {
11100 { 0x00000200, 0x00008},
11101 { 0x00004000, 0x00800},
11102 { 0x00006000, 0x00800},
11103 { 0x00008000, 0x02000},
11104 { 0x00010000, 0x0c000},
11105 { 0xffffffff, 0x00000}
11106 }, mem_tbl_5906[] = {
11107 { 0x00000200, 0x00008},
11108 { 0x00004000, 0x00400},
11109 { 0x00006000, 0x00400},
11110 { 0x00008000, 0x01000},
11111 { 0x00010000, 0x01000},
11112 { 0xffffffff, 0x00000}
11113 }, mem_tbl_5717[] = {
11114 { 0x00000200, 0x00008},
11115 { 0x00010000, 0x0a000},
11116 { 0x00020000, 0x13c00},
11117 { 0xffffffff, 0x00000}
11118 }, mem_tbl_57765[] = {
11119 { 0x00000200, 0x00008},
11120 { 0x00004000, 0x00800},
11121 { 0x00006000, 0x09800},
11122 { 0x00010000, 0x0a000},
11123 { 0xffffffff, 0x00000}
11125 struct mem_entry *mem_tbl;
11126 int err = 0;
11127 int i;
11129 if (tg3_flag(tp, 5717_PLUS))
11130 mem_tbl = mem_tbl_5717;
11131 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11132 mem_tbl = mem_tbl_57765;
11133 else if (tg3_flag(tp, 5755_PLUS))
11134 mem_tbl = mem_tbl_5755;
11135 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11136 mem_tbl = mem_tbl_5906;
11137 else if (tg3_flag(tp, 5705_PLUS))
11138 mem_tbl = mem_tbl_5705;
11139 else
11140 mem_tbl = mem_tbl_570x;
11142 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11143 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11144 if (err)
11145 break;
11148 return err;
11151 #define TG3_MAC_LOOPBACK 0
11152 #define TG3_PHY_LOOPBACK 1
11153 #define TG3_TSO_LOOPBACK 2
11155 #define TG3_TSO_MSS 500
11157 #define TG3_TSO_IP_HDR_LEN 20
11158 #define TG3_TSO_TCP_HDR_LEN 20
11159 #define TG3_TSO_TCP_OPT_LEN 12
11161 static const u8 tg3_tso_header[] = {
11162 0x08, 0x00,
11163 0x45, 0x00, 0x00, 0x00,
11164 0x00, 0x00, 0x40, 0x00,
11165 0x40, 0x06, 0x00, 0x00,
11166 0x0a, 0x00, 0x00, 0x01,
11167 0x0a, 0x00, 0x00, 0x02,
11168 0x0d, 0x00, 0xe0, 0x00,
11169 0x00, 0x00, 0x01, 0x00,
11170 0x00, 0x00, 0x02, 0x00,
11171 0x80, 0x10, 0x10, 0x00,
11172 0x14, 0x09, 0x00, 0x00,
11173 0x01, 0x01, 0x08, 0x0a,
11174 0x11, 0x11, 0x11, 0x11,
11175 0x11, 0x11, 0x11, 0x11,
11178 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11180 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11181 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11182 struct sk_buff *skb, *rx_skb;
11183 u8 *tx_data;
11184 dma_addr_t map;
11185 int num_pkts, tx_len, rx_len, i, err;
11186 struct tg3_rx_buffer_desc *desc;
11187 struct tg3_napi *tnapi, *rnapi;
11188 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11190 tnapi = &tp->napi[0];
11191 rnapi = &tp->napi[0];
11192 if (tp->irq_cnt > 1) {
11193 if (tg3_flag(tp, ENABLE_RSS))
11194 rnapi = &tp->napi[1];
11195 if (tg3_flag(tp, ENABLE_TSS))
11196 tnapi = &tp->napi[1];
11198 coal_now = tnapi->coal_now | rnapi->coal_now;
11200 if (loopback_mode == TG3_MAC_LOOPBACK) {
11201 /* HW errata - mac loopback fails in some cases on 5780.
11202 * Normal traffic and PHY loopback are not affected by
11203 * errata. Also, the MAC loopback test is deprecated for
11204 * all newer ASIC revisions.
11206 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11207 tg3_flag(tp, CPMU_PRESENT))
11208 return 0;
11210 mac_mode = tp->mac_mode &
11211 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11212 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11213 if (!tg3_flag(tp, 5705_PLUS))
11214 mac_mode |= MAC_MODE_LINK_POLARITY;
11215 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11216 mac_mode |= MAC_MODE_PORT_MODE_MII;
11217 else
11218 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11219 tw32(MAC_MODE, mac_mode);
11220 } else {
11221 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11222 tg3_phy_fet_toggle_apd(tp, false);
11223 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11224 } else
11225 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11227 tg3_phy_toggle_automdix(tp, 0);
11229 tg3_writephy(tp, MII_BMCR, val);
11230 udelay(40);
11232 mac_mode = tp->mac_mode &
11233 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11234 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11235 tg3_writephy(tp, MII_TG3_FET_PTEST,
11236 MII_TG3_FET_PTEST_FRC_TX_LINK |
11237 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11238 /* The write needs to be flushed for the AC131 */
11239 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11240 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11241 mac_mode |= MAC_MODE_PORT_MODE_MII;
11242 } else
11243 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11245 /* reset to prevent losing 1st rx packet intermittently */
11246 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11247 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11248 udelay(10);
11249 tw32_f(MAC_RX_MODE, tp->rx_mode);
11251 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11252 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11253 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11254 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11255 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11256 mac_mode |= MAC_MODE_LINK_POLARITY;
11257 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11258 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11260 tw32(MAC_MODE, mac_mode);
11262 /* Wait for link */
11263 for (i = 0; i < 100; i++) {
11264 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11265 break;
11266 mdelay(1);
11270 err = -EIO;
11272 tx_len = pktsz;
11273 skb = netdev_alloc_skb(tp->dev, tx_len);
11274 if (!skb)
11275 return -ENOMEM;
11277 tx_data = skb_put(skb, tx_len);
11278 memcpy(tx_data, tp->dev->dev_addr, 6);
11279 memset(tx_data + 6, 0x0, 8);
11281 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11283 if (loopback_mode == TG3_TSO_LOOPBACK) {
11284 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11286 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11287 TG3_TSO_TCP_OPT_LEN;
11289 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11290 sizeof(tg3_tso_header));
11291 mss = TG3_TSO_MSS;
11293 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11294 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11296 /* Set the total length field in the IP header */
11297 iph->tot_len = htons((u16)(mss + hdr_len));
11299 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11300 TXD_FLAG_CPU_POST_DMA);
11302 if (tg3_flag(tp, HW_TSO_1) ||
11303 tg3_flag(tp, HW_TSO_2) ||
11304 tg3_flag(tp, HW_TSO_3)) {
11305 struct tcphdr *th;
11306 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11307 th = (struct tcphdr *)&tx_data[val];
11308 th->check = 0;
11309 } else
11310 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11312 if (tg3_flag(tp, HW_TSO_3)) {
11313 mss |= (hdr_len & 0xc) << 12;
11314 if (hdr_len & 0x10)
11315 base_flags |= 0x00000010;
11316 base_flags |= (hdr_len & 0x3e0) << 5;
11317 } else if (tg3_flag(tp, HW_TSO_2))
11318 mss |= hdr_len << 9;
11319 else if (tg3_flag(tp, HW_TSO_1) ||
11320 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11321 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11322 } else {
11323 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11326 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11327 } else {
11328 num_pkts = 1;
11329 data_off = ETH_HLEN;
11332 for (i = data_off; i < tx_len; i++)
11333 tx_data[i] = (u8) (i & 0xff);
11335 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11336 if (pci_dma_mapping_error(tp->pdev, map)) {
11337 dev_kfree_skb(skb);
11338 return -EIO;
11341 val = tnapi->tx_prod;
11342 tnapi->tx_buffers[val].skb = skb;
11343 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11345 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11346 rnapi->coal_now);
11348 udelay(10);
11350 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11352 tg3_tx_set_bd(tnapi, tnapi->tx_prod, map, tx_len,
11353 base_flags | TXD_FLAG_END, mss, 0);
11355 tnapi->tx_prod++;
11357 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11358 tr32_mailbox(tnapi->prodmbox);
11360 udelay(10);
11362 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11363 for (i = 0; i < 35; i++) {
11364 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11365 coal_now);
11367 udelay(10);
11369 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11370 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11371 if ((tx_idx == tnapi->tx_prod) &&
11372 (rx_idx == (rx_start_idx + num_pkts)))
11373 break;
11376 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11377 dev_kfree_skb(skb);
11379 if (tx_idx != tnapi->tx_prod)
11380 goto out;
11382 if (rx_idx != rx_start_idx + num_pkts)
11383 goto out;
11385 val = data_off;
11386 while (rx_idx != rx_start_idx) {
11387 desc = &rnapi->rx_rcb[rx_start_idx++];
11388 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11389 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11391 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11392 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11393 goto out;
11395 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11396 - ETH_FCS_LEN;
11398 if (loopback_mode != TG3_TSO_LOOPBACK) {
11399 if (rx_len != tx_len)
11400 goto out;
11402 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11403 if (opaque_key != RXD_OPAQUE_RING_STD)
11404 goto out;
11405 } else {
11406 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11407 goto out;
11409 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11410 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11411 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11412 goto out;
11415 if (opaque_key == RXD_OPAQUE_RING_STD) {
11416 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11417 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11418 mapping);
11419 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11420 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11421 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11422 mapping);
11423 } else
11424 goto out;
11426 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11427 PCI_DMA_FROMDEVICE);
11429 for (i = data_off; i < rx_len; i++, val++) {
11430 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11431 goto out;
11435 err = 0;
11437 /* tg3_free_rings will unmap and free the rx_skb */
11438 out:
11439 return err;
11442 #define TG3_STD_LOOPBACK_FAILED 1
11443 #define TG3_JMB_LOOPBACK_FAILED 2
11444 #define TG3_TSO_LOOPBACK_FAILED 4
11446 #define TG3_MAC_LOOPBACK_SHIFT 0
11447 #define TG3_PHY_LOOPBACK_SHIFT 4
11448 #define TG3_LOOPBACK_FAILED 0x00000077
11450 static int tg3_test_loopback(struct tg3 *tp)
11452 int err = 0;
11453 u32 eee_cap, cpmuctrl = 0;
11455 if (!netif_running(tp->dev))
11456 return TG3_LOOPBACK_FAILED;
11458 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11459 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11461 err = tg3_reset_hw(tp, 1);
11462 if (err) {
11463 err = TG3_LOOPBACK_FAILED;
11464 goto done;
11467 if (tg3_flag(tp, ENABLE_RSS)) {
11468 int i;
11470 /* Reroute all rx packets to the 1st queue */
11471 for (i = MAC_RSS_INDIR_TBL_0;
11472 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11473 tw32(i, 0x0);
11476 /* Turn off gphy autopowerdown. */
11477 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11478 tg3_phy_toggle_apd(tp, false);
11480 if (tg3_flag(tp, CPMU_PRESENT)) {
11481 int i;
11482 u32 status;
11484 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11486 /* Wait for up to 40 microseconds to acquire lock. */
11487 for (i = 0; i < 4; i++) {
11488 status = tr32(TG3_CPMU_MUTEX_GNT);
11489 if (status == CPMU_MUTEX_GNT_DRIVER)
11490 break;
11491 udelay(10);
11494 if (status != CPMU_MUTEX_GNT_DRIVER) {
11495 err = TG3_LOOPBACK_FAILED;
11496 goto done;
11499 /* Turn off link-based power management. */
11500 cpmuctrl = tr32(TG3_CPMU_CTRL);
11501 tw32(TG3_CPMU_CTRL,
11502 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11503 CPMU_CTRL_LINK_AWARE_MODE));
11506 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11507 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11509 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11510 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11511 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11513 if (tg3_flag(tp, CPMU_PRESENT)) {
11514 tw32(TG3_CPMU_CTRL, cpmuctrl);
11516 /* Release the mutex */
11517 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11520 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11521 !tg3_flag(tp, USE_PHYLIB)) {
11522 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11523 err |= TG3_STD_LOOPBACK_FAILED <<
11524 TG3_PHY_LOOPBACK_SHIFT;
11525 if (tg3_flag(tp, TSO_CAPABLE) &&
11526 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11527 err |= TG3_TSO_LOOPBACK_FAILED <<
11528 TG3_PHY_LOOPBACK_SHIFT;
11529 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11530 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11531 err |= TG3_JMB_LOOPBACK_FAILED <<
11532 TG3_PHY_LOOPBACK_SHIFT;
11535 /* Re-enable gphy autopowerdown. */
11536 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11537 tg3_phy_toggle_apd(tp, true);
11539 done:
11540 tp->phy_flags |= eee_cap;
11542 return err;
11545 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11546 u64 *data)
11548 struct tg3 *tp = netdev_priv(dev);
11550 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11551 tg3_power_up(tp)) {
11552 etest->flags |= ETH_TEST_FL_FAILED;
11553 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11554 return;
11557 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11559 if (tg3_test_nvram(tp) != 0) {
11560 etest->flags |= ETH_TEST_FL_FAILED;
11561 data[0] = 1;
11563 if (tg3_test_link(tp) != 0) {
11564 etest->flags |= ETH_TEST_FL_FAILED;
11565 data[1] = 1;
11567 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11568 int err, err2 = 0, irq_sync = 0;
11570 if (netif_running(dev)) {
11571 tg3_phy_stop(tp);
11572 tg3_netif_stop(tp);
11573 irq_sync = 1;
11576 tg3_full_lock(tp, irq_sync);
11578 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11579 err = tg3_nvram_lock(tp);
11580 tg3_halt_cpu(tp, RX_CPU_BASE);
11581 if (!tg3_flag(tp, 5705_PLUS))
11582 tg3_halt_cpu(tp, TX_CPU_BASE);
11583 if (!err)
11584 tg3_nvram_unlock(tp);
11586 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11587 tg3_phy_reset(tp);
11589 if (tg3_test_registers(tp) != 0) {
11590 etest->flags |= ETH_TEST_FL_FAILED;
11591 data[2] = 1;
11593 if (tg3_test_memory(tp) != 0) {
11594 etest->flags |= ETH_TEST_FL_FAILED;
11595 data[3] = 1;
11597 if ((data[4] = tg3_test_loopback(tp)) != 0)
11598 etest->flags |= ETH_TEST_FL_FAILED;
11600 tg3_full_unlock(tp);
11602 if (tg3_test_interrupt(tp) != 0) {
11603 etest->flags |= ETH_TEST_FL_FAILED;
11604 data[5] = 1;
11607 tg3_full_lock(tp, 0);
11609 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11610 if (netif_running(dev)) {
11611 tg3_flag_set(tp, INIT_COMPLETE);
11612 err2 = tg3_restart_hw(tp, 1);
11613 if (!err2)
11614 tg3_netif_start(tp);
11617 tg3_full_unlock(tp);
11619 if (irq_sync && !err2)
11620 tg3_phy_start(tp);
11622 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11623 tg3_power_down(tp);
11627 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11629 struct mii_ioctl_data *data = if_mii(ifr);
11630 struct tg3 *tp = netdev_priv(dev);
11631 int err;
11633 if (tg3_flag(tp, USE_PHYLIB)) {
11634 struct phy_device *phydev;
11635 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11636 return -EAGAIN;
11637 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11638 return phy_mii_ioctl(phydev, ifr, cmd);
11641 switch (cmd) {
11642 case SIOCGMIIPHY:
11643 data->phy_id = tp->phy_addr;
11645 /* fallthru */
11646 case SIOCGMIIREG: {
11647 u32 mii_regval;
11649 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11650 break; /* We have no PHY */
11652 if (!netif_running(dev))
11653 return -EAGAIN;
11655 spin_lock_bh(&tp->lock);
11656 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11657 spin_unlock_bh(&tp->lock);
11659 data->val_out = mii_regval;
11661 return err;
11664 case SIOCSMIIREG:
11665 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11666 break; /* We have no PHY */
11668 if (!netif_running(dev))
11669 return -EAGAIN;
11671 spin_lock_bh(&tp->lock);
11672 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11673 spin_unlock_bh(&tp->lock);
11675 return err;
11677 default:
11678 /* do nothing */
11679 break;
11681 return -EOPNOTSUPP;
11684 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11686 struct tg3 *tp = netdev_priv(dev);
11688 memcpy(ec, &tp->coal, sizeof(*ec));
11689 return 0;
11692 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11694 struct tg3 *tp = netdev_priv(dev);
11695 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11696 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11698 if (!tg3_flag(tp, 5705_PLUS)) {
11699 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11700 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11701 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11702 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11705 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11706 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11707 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11708 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11709 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11710 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11711 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11712 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11713 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11714 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11715 return -EINVAL;
11717 /* No rx interrupts will be generated if both are zero */
11718 if ((ec->rx_coalesce_usecs == 0) &&
11719 (ec->rx_max_coalesced_frames == 0))
11720 return -EINVAL;
11722 /* No tx interrupts will be generated if both are zero */
11723 if ((ec->tx_coalesce_usecs == 0) &&
11724 (ec->tx_max_coalesced_frames == 0))
11725 return -EINVAL;
11727 /* Only copy relevant parameters, ignore all others. */
11728 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11729 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11730 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11731 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11732 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11733 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11734 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11735 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11736 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11738 if (netif_running(dev)) {
11739 tg3_full_lock(tp, 0);
11740 __tg3_set_coalesce(tp, &tp->coal);
11741 tg3_full_unlock(tp);
11743 return 0;
11746 static const struct ethtool_ops tg3_ethtool_ops = {
11747 .get_settings = tg3_get_settings,
11748 .set_settings = tg3_set_settings,
11749 .get_drvinfo = tg3_get_drvinfo,
11750 .get_regs_len = tg3_get_regs_len,
11751 .get_regs = tg3_get_regs,
11752 .get_wol = tg3_get_wol,
11753 .set_wol = tg3_set_wol,
11754 .get_msglevel = tg3_get_msglevel,
11755 .set_msglevel = tg3_set_msglevel,
11756 .nway_reset = tg3_nway_reset,
11757 .get_link = ethtool_op_get_link,
11758 .get_eeprom_len = tg3_get_eeprom_len,
11759 .get_eeprom = tg3_get_eeprom,
11760 .set_eeprom = tg3_set_eeprom,
11761 .get_ringparam = tg3_get_ringparam,
11762 .set_ringparam = tg3_set_ringparam,
11763 .get_pauseparam = tg3_get_pauseparam,
11764 .set_pauseparam = tg3_set_pauseparam,
11765 .self_test = tg3_self_test,
11766 .get_strings = tg3_get_strings,
11767 .set_phys_id = tg3_set_phys_id,
11768 .get_ethtool_stats = tg3_get_ethtool_stats,
11769 .get_coalesce = tg3_get_coalesce,
11770 .set_coalesce = tg3_set_coalesce,
11771 .get_sset_count = tg3_get_sset_count,
11774 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11776 u32 cursize, val, magic;
11778 tp->nvram_size = EEPROM_CHIP_SIZE;
11780 if (tg3_nvram_read(tp, 0, &magic) != 0)
11781 return;
11783 if ((magic != TG3_EEPROM_MAGIC) &&
11784 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11785 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11786 return;
11789 * Size the chip by reading offsets at increasing powers of two.
11790 * When we encounter our validation signature, we know the addressing
11791 * has wrapped around, and thus have our chip size.
11793 cursize = 0x10;
11795 while (cursize < tp->nvram_size) {
11796 if (tg3_nvram_read(tp, cursize, &val) != 0)
11797 return;
11799 if (val == magic)
11800 break;
11802 cursize <<= 1;
11805 tp->nvram_size = cursize;
11808 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11810 u32 val;
11812 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11813 return;
11815 /* Selfboot format */
11816 if (val != TG3_EEPROM_MAGIC) {
11817 tg3_get_eeprom_size(tp);
11818 return;
11821 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11822 if (val != 0) {
11823 /* This is confusing. We want to operate on the
11824 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11825 * call will read from NVRAM and byteswap the data
11826 * according to the byteswapping settings for all
11827 * other register accesses. This ensures the data we
11828 * want will always reside in the lower 16-bits.
11829 * However, the data in NVRAM is in LE format, which
11830 * means the data from the NVRAM read will always be
11831 * opposite the endianness of the CPU. The 16-bit
11832 * byteswap then brings the data to CPU endianness.
11834 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11835 return;
11838 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11841 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11843 u32 nvcfg1;
11845 nvcfg1 = tr32(NVRAM_CFG1);
11846 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11847 tg3_flag_set(tp, FLASH);
11848 } else {
11849 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11850 tw32(NVRAM_CFG1, nvcfg1);
11853 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11854 tg3_flag(tp, 5780_CLASS)) {
11855 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11856 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11857 tp->nvram_jedecnum = JEDEC_ATMEL;
11858 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11859 tg3_flag_set(tp, NVRAM_BUFFERED);
11860 break;
11861 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11862 tp->nvram_jedecnum = JEDEC_ATMEL;
11863 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11864 break;
11865 case FLASH_VENDOR_ATMEL_EEPROM:
11866 tp->nvram_jedecnum = JEDEC_ATMEL;
11867 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11868 tg3_flag_set(tp, NVRAM_BUFFERED);
11869 break;
11870 case FLASH_VENDOR_ST:
11871 tp->nvram_jedecnum = JEDEC_ST;
11872 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11873 tg3_flag_set(tp, NVRAM_BUFFERED);
11874 break;
11875 case FLASH_VENDOR_SAIFUN:
11876 tp->nvram_jedecnum = JEDEC_SAIFUN;
11877 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11878 break;
11879 case FLASH_VENDOR_SST_SMALL:
11880 case FLASH_VENDOR_SST_LARGE:
11881 tp->nvram_jedecnum = JEDEC_SST;
11882 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11883 break;
11885 } else {
11886 tp->nvram_jedecnum = JEDEC_ATMEL;
11887 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11888 tg3_flag_set(tp, NVRAM_BUFFERED);
11892 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11894 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11895 case FLASH_5752PAGE_SIZE_256:
11896 tp->nvram_pagesize = 256;
11897 break;
11898 case FLASH_5752PAGE_SIZE_512:
11899 tp->nvram_pagesize = 512;
11900 break;
11901 case FLASH_5752PAGE_SIZE_1K:
11902 tp->nvram_pagesize = 1024;
11903 break;
11904 case FLASH_5752PAGE_SIZE_2K:
11905 tp->nvram_pagesize = 2048;
11906 break;
11907 case FLASH_5752PAGE_SIZE_4K:
11908 tp->nvram_pagesize = 4096;
11909 break;
11910 case FLASH_5752PAGE_SIZE_264:
11911 tp->nvram_pagesize = 264;
11912 break;
11913 case FLASH_5752PAGE_SIZE_528:
11914 tp->nvram_pagesize = 528;
11915 break;
11919 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11921 u32 nvcfg1;
11923 nvcfg1 = tr32(NVRAM_CFG1);
11925 /* NVRAM protection for TPM */
11926 if (nvcfg1 & (1 << 27))
11927 tg3_flag_set(tp, PROTECTED_NVRAM);
11929 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11930 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11931 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11932 tp->nvram_jedecnum = JEDEC_ATMEL;
11933 tg3_flag_set(tp, NVRAM_BUFFERED);
11934 break;
11935 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11936 tp->nvram_jedecnum = JEDEC_ATMEL;
11937 tg3_flag_set(tp, NVRAM_BUFFERED);
11938 tg3_flag_set(tp, FLASH);
11939 break;
11940 case FLASH_5752VENDOR_ST_M45PE10:
11941 case FLASH_5752VENDOR_ST_M45PE20:
11942 case FLASH_5752VENDOR_ST_M45PE40:
11943 tp->nvram_jedecnum = JEDEC_ST;
11944 tg3_flag_set(tp, NVRAM_BUFFERED);
11945 tg3_flag_set(tp, FLASH);
11946 break;
11949 if (tg3_flag(tp, FLASH)) {
11950 tg3_nvram_get_pagesize(tp, nvcfg1);
11951 } else {
11952 /* For eeprom, set pagesize to maximum eeprom size */
11953 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11955 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11956 tw32(NVRAM_CFG1, nvcfg1);
11960 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11962 u32 nvcfg1, protect = 0;
11964 nvcfg1 = tr32(NVRAM_CFG1);
11966 /* NVRAM protection for TPM */
11967 if (nvcfg1 & (1 << 27)) {
11968 tg3_flag_set(tp, PROTECTED_NVRAM);
11969 protect = 1;
11972 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11973 switch (nvcfg1) {
11974 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11975 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11976 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11977 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11978 tp->nvram_jedecnum = JEDEC_ATMEL;
11979 tg3_flag_set(tp, NVRAM_BUFFERED);
11980 tg3_flag_set(tp, FLASH);
11981 tp->nvram_pagesize = 264;
11982 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11983 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11984 tp->nvram_size = (protect ? 0x3e200 :
11985 TG3_NVRAM_SIZE_512KB);
11986 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11987 tp->nvram_size = (protect ? 0x1f200 :
11988 TG3_NVRAM_SIZE_256KB);
11989 else
11990 tp->nvram_size = (protect ? 0x1f200 :
11991 TG3_NVRAM_SIZE_128KB);
11992 break;
11993 case FLASH_5752VENDOR_ST_M45PE10:
11994 case FLASH_5752VENDOR_ST_M45PE20:
11995 case FLASH_5752VENDOR_ST_M45PE40:
11996 tp->nvram_jedecnum = JEDEC_ST;
11997 tg3_flag_set(tp, NVRAM_BUFFERED);
11998 tg3_flag_set(tp, FLASH);
11999 tp->nvram_pagesize = 256;
12000 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12001 tp->nvram_size = (protect ?
12002 TG3_NVRAM_SIZE_64KB :
12003 TG3_NVRAM_SIZE_128KB);
12004 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12005 tp->nvram_size = (protect ?
12006 TG3_NVRAM_SIZE_64KB :
12007 TG3_NVRAM_SIZE_256KB);
12008 else
12009 tp->nvram_size = (protect ?
12010 TG3_NVRAM_SIZE_128KB :
12011 TG3_NVRAM_SIZE_512KB);
12012 break;
12016 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12018 u32 nvcfg1;
12020 nvcfg1 = tr32(NVRAM_CFG1);
12022 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12023 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12024 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12025 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12026 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12027 tp->nvram_jedecnum = JEDEC_ATMEL;
12028 tg3_flag_set(tp, NVRAM_BUFFERED);
12029 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12031 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12032 tw32(NVRAM_CFG1, nvcfg1);
12033 break;
12034 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12035 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12036 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12037 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12038 tp->nvram_jedecnum = JEDEC_ATMEL;
12039 tg3_flag_set(tp, NVRAM_BUFFERED);
12040 tg3_flag_set(tp, FLASH);
12041 tp->nvram_pagesize = 264;
12042 break;
12043 case FLASH_5752VENDOR_ST_M45PE10:
12044 case FLASH_5752VENDOR_ST_M45PE20:
12045 case FLASH_5752VENDOR_ST_M45PE40:
12046 tp->nvram_jedecnum = JEDEC_ST;
12047 tg3_flag_set(tp, NVRAM_BUFFERED);
12048 tg3_flag_set(tp, FLASH);
12049 tp->nvram_pagesize = 256;
12050 break;
12054 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12056 u32 nvcfg1, protect = 0;
12058 nvcfg1 = tr32(NVRAM_CFG1);
12060 /* NVRAM protection for TPM */
12061 if (nvcfg1 & (1 << 27)) {
12062 tg3_flag_set(tp, PROTECTED_NVRAM);
12063 protect = 1;
12066 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12067 switch (nvcfg1) {
12068 case FLASH_5761VENDOR_ATMEL_ADB021D:
12069 case FLASH_5761VENDOR_ATMEL_ADB041D:
12070 case FLASH_5761VENDOR_ATMEL_ADB081D:
12071 case FLASH_5761VENDOR_ATMEL_ADB161D:
12072 case FLASH_5761VENDOR_ATMEL_MDB021D:
12073 case FLASH_5761VENDOR_ATMEL_MDB041D:
12074 case FLASH_5761VENDOR_ATMEL_MDB081D:
12075 case FLASH_5761VENDOR_ATMEL_MDB161D:
12076 tp->nvram_jedecnum = JEDEC_ATMEL;
12077 tg3_flag_set(tp, NVRAM_BUFFERED);
12078 tg3_flag_set(tp, FLASH);
12079 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12080 tp->nvram_pagesize = 256;
12081 break;
12082 case FLASH_5761VENDOR_ST_A_M45PE20:
12083 case FLASH_5761VENDOR_ST_A_M45PE40:
12084 case FLASH_5761VENDOR_ST_A_M45PE80:
12085 case FLASH_5761VENDOR_ST_A_M45PE16:
12086 case FLASH_5761VENDOR_ST_M_M45PE20:
12087 case FLASH_5761VENDOR_ST_M_M45PE40:
12088 case FLASH_5761VENDOR_ST_M_M45PE80:
12089 case FLASH_5761VENDOR_ST_M_M45PE16:
12090 tp->nvram_jedecnum = JEDEC_ST;
12091 tg3_flag_set(tp, NVRAM_BUFFERED);
12092 tg3_flag_set(tp, FLASH);
12093 tp->nvram_pagesize = 256;
12094 break;
12097 if (protect) {
12098 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12099 } else {
12100 switch (nvcfg1) {
12101 case FLASH_5761VENDOR_ATMEL_ADB161D:
12102 case FLASH_5761VENDOR_ATMEL_MDB161D:
12103 case FLASH_5761VENDOR_ST_A_M45PE16:
12104 case FLASH_5761VENDOR_ST_M_M45PE16:
12105 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12106 break;
12107 case FLASH_5761VENDOR_ATMEL_ADB081D:
12108 case FLASH_5761VENDOR_ATMEL_MDB081D:
12109 case FLASH_5761VENDOR_ST_A_M45PE80:
12110 case FLASH_5761VENDOR_ST_M_M45PE80:
12111 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12112 break;
12113 case FLASH_5761VENDOR_ATMEL_ADB041D:
12114 case FLASH_5761VENDOR_ATMEL_MDB041D:
12115 case FLASH_5761VENDOR_ST_A_M45PE40:
12116 case FLASH_5761VENDOR_ST_M_M45PE40:
12117 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12118 break;
12119 case FLASH_5761VENDOR_ATMEL_ADB021D:
12120 case FLASH_5761VENDOR_ATMEL_MDB021D:
12121 case FLASH_5761VENDOR_ST_A_M45PE20:
12122 case FLASH_5761VENDOR_ST_M_M45PE20:
12123 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12124 break;
12129 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12131 tp->nvram_jedecnum = JEDEC_ATMEL;
12132 tg3_flag_set(tp, NVRAM_BUFFERED);
12133 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12136 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12138 u32 nvcfg1;
12140 nvcfg1 = tr32(NVRAM_CFG1);
12142 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12143 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12144 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12145 tp->nvram_jedecnum = JEDEC_ATMEL;
12146 tg3_flag_set(tp, NVRAM_BUFFERED);
12147 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12149 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12150 tw32(NVRAM_CFG1, nvcfg1);
12151 return;
12152 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12153 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12154 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12155 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12156 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12157 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12158 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12159 tp->nvram_jedecnum = JEDEC_ATMEL;
12160 tg3_flag_set(tp, NVRAM_BUFFERED);
12161 tg3_flag_set(tp, FLASH);
12163 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12164 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12165 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12166 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12167 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12168 break;
12169 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12170 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12171 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12172 break;
12173 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12174 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12175 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12176 break;
12178 break;
12179 case FLASH_5752VENDOR_ST_M45PE10:
12180 case FLASH_5752VENDOR_ST_M45PE20:
12181 case FLASH_5752VENDOR_ST_M45PE40:
12182 tp->nvram_jedecnum = JEDEC_ST;
12183 tg3_flag_set(tp, NVRAM_BUFFERED);
12184 tg3_flag_set(tp, FLASH);
12186 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12187 case FLASH_5752VENDOR_ST_M45PE10:
12188 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12189 break;
12190 case FLASH_5752VENDOR_ST_M45PE20:
12191 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12192 break;
12193 case FLASH_5752VENDOR_ST_M45PE40:
12194 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12195 break;
12197 break;
12198 default:
12199 tg3_flag_set(tp, NO_NVRAM);
12200 return;
12203 tg3_nvram_get_pagesize(tp, nvcfg1);
12204 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12205 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12209 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12211 u32 nvcfg1;
12213 nvcfg1 = tr32(NVRAM_CFG1);
12215 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12216 case FLASH_5717VENDOR_ATMEL_EEPROM:
12217 case FLASH_5717VENDOR_MICRO_EEPROM:
12218 tp->nvram_jedecnum = JEDEC_ATMEL;
12219 tg3_flag_set(tp, NVRAM_BUFFERED);
12220 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12222 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12223 tw32(NVRAM_CFG1, nvcfg1);
12224 return;
12225 case FLASH_5717VENDOR_ATMEL_MDB011D:
12226 case FLASH_5717VENDOR_ATMEL_ADB011B:
12227 case FLASH_5717VENDOR_ATMEL_ADB011D:
12228 case FLASH_5717VENDOR_ATMEL_MDB021D:
12229 case FLASH_5717VENDOR_ATMEL_ADB021B:
12230 case FLASH_5717VENDOR_ATMEL_ADB021D:
12231 case FLASH_5717VENDOR_ATMEL_45USPT:
12232 tp->nvram_jedecnum = JEDEC_ATMEL;
12233 tg3_flag_set(tp, NVRAM_BUFFERED);
12234 tg3_flag_set(tp, FLASH);
12236 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12237 case FLASH_5717VENDOR_ATMEL_MDB021D:
12238 /* Detect size with tg3_nvram_get_size() */
12239 break;
12240 case FLASH_5717VENDOR_ATMEL_ADB021B:
12241 case FLASH_5717VENDOR_ATMEL_ADB021D:
12242 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12243 break;
12244 default:
12245 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12246 break;
12248 break;
12249 case FLASH_5717VENDOR_ST_M_M25PE10:
12250 case FLASH_5717VENDOR_ST_A_M25PE10:
12251 case FLASH_5717VENDOR_ST_M_M45PE10:
12252 case FLASH_5717VENDOR_ST_A_M45PE10:
12253 case FLASH_5717VENDOR_ST_M_M25PE20:
12254 case FLASH_5717VENDOR_ST_A_M25PE20:
12255 case FLASH_5717VENDOR_ST_M_M45PE20:
12256 case FLASH_5717VENDOR_ST_A_M45PE20:
12257 case FLASH_5717VENDOR_ST_25USPT:
12258 case FLASH_5717VENDOR_ST_45USPT:
12259 tp->nvram_jedecnum = JEDEC_ST;
12260 tg3_flag_set(tp, NVRAM_BUFFERED);
12261 tg3_flag_set(tp, FLASH);
12263 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12264 case FLASH_5717VENDOR_ST_M_M25PE20:
12265 case FLASH_5717VENDOR_ST_M_M45PE20:
12266 /* Detect size with tg3_nvram_get_size() */
12267 break;
12268 case FLASH_5717VENDOR_ST_A_M25PE20:
12269 case FLASH_5717VENDOR_ST_A_M45PE20:
12270 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12271 break;
12272 default:
12273 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12274 break;
12276 break;
12277 default:
12278 tg3_flag_set(tp, NO_NVRAM);
12279 return;
12282 tg3_nvram_get_pagesize(tp, nvcfg1);
12283 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12284 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12287 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12289 u32 nvcfg1, nvmpinstrp;
12291 nvcfg1 = tr32(NVRAM_CFG1);
12292 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12294 switch (nvmpinstrp) {
12295 case FLASH_5720_EEPROM_HD:
12296 case FLASH_5720_EEPROM_LD:
12297 tp->nvram_jedecnum = JEDEC_ATMEL;
12298 tg3_flag_set(tp, NVRAM_BUFFERED);
12300 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12301 tw32(NVRAM_CFG1, nvcfg1);
12302 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12303 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12304 else
12305 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12306 return;
12307 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12308 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12309 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12310 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12311 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12312 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12313 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12314 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12315 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12316 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12317 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12318 case FLASH_5720VENDOR_ATMEL_45USPT:
12319 tp->nvram_jedecnum = JEDEC_ATMEL;
12320 tg3_flag_set(tp, NVRAM_BUFFERED);
12321 tg3_flag_set(tp, FLASH);
12323 switch (nvmpinstrp) {
12324 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12325 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12326 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12327 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12328 break;
12329 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12330 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12331 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12332 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12333 break;
12334 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12335 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12336 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12337 break;
12338 default:
12339 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12340 break;
12342 break;
12343 case FLASH_5720VENDOR_M_ST_M25PE10:
12344 case FLASH_5720VENDOR_M_ST_M45PE10:
12345 case FLASH_5720VENDOR_A_ST_M25PE10:
12346 case FLASH_5720VENDOR_A_ST_M45PE10:
12347 case FLASH_5720VENDOR_M_ST_M25PE20:
12348 case FLASH_5720VENDOR_M_ST_M45PE20:
12349 case FLASH_5720VENDOR_A_ST_M25PE20:
12350 case FLASH_5720VENDOR_A_ST_M45PE20:
12351 case FLASH_5720VENDOR_M_ST_M25PE40:
12352 case FLASH_5720VENDOR_M_ST_M45PE40:
12353 case FLASH_5720VENDOR_A_ST_M25PE40:
12354 case FLASH_5720VENDOR_A_ST_M45PE40:
12355 case FLASH_5720VENDOR_M_ST_M25PE80:
12356 case FLASH_5720VENDOR_M_ST_M45PE80:
12357 case FLASH_5720VENDOR_A_ST_M25PE80:
12358 case FLASH_5720VENDOR_A_ST_M45PE80:
12359 case FLASH_5720VENDOR_ST_25USPT:
12360 case FLASH_5720VENDOR_ST_45USPT:
12361 tp->nvram_jedecnum = JEDEC_ST;
12362 tg3_flag_set(tp, NVRAM_BUFFERED);
12363 tg3_flag_set(tp, FLASH);
12365 switch (nvmpinstrp) {
12366 case FLASH_5720VENDOR_M_ST_M25PE20:
12367 case FLASH_5720VENDOR_M_ST_M45PE20:
12368 case FLASH_5720VENDOR_A_ST_M25PE20:
12369 case FLASH_5720VENDOR_A_ST_M45PE20:
12370 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12371 break;
12372 case FLASH_5720VENDOR_M_ST_M25PE40:
12373 case FLASH_5720VENDOR_M_ST_M45PE40:
12374 case FLASH_5720VENDOR_A_ST_M25PE40:
12375 case FLASH_5720VENDOR_A_ST_M45PE40:
12376 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12377 break;
12378 case FLASH_5720VENDOR_M_ST_M25PE80:
12379 case FLASH_5720VENDOR_M_ST_M45PE80:
12380 case FLASH_5720VENDOR_A_ST_M25PE80:
12381 case FLASH_5720VENDOR_A_ST_M45PE80:
12382 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12383 break;
12384 default:
12385 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12386 break;
12388 break;
12389 default:
12390 tg3_flag_set(tp, NO_NVRAM);
12391 return;
12394 tg3_nvram_get_pagesize(tp, nvcfg1);
12395 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12396 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12399 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12400 static void __devinit tg3_nvram_init(struct tg3 *tp)
12402 tw32_f(GRC_EEPROM_ADDR,
12403 (EEPROM_ADDR_FSM_RESET |
12404 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12405 EEPROM_ADDR_CLKPERD_SHIFT)));
12407 msleep(1);
12409 /* Enable seeprom accesses. */
12410 tw32_f(GRC_LOCAL_CTRL,
12411 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12412 udelay(100);
12414 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12415 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12416 tg3_flag_set(tp, NVRAM);
12418 if (tg3_nvram_lock(tp)) {
12419 netdev_warn(tp->dev,
12420 "Cannot get nvram lock, %s failed\n",
12421 __func__);
12422 return;
12424 tg3_enable_nvram_access(tp);
12426 tp->nvram_size = 0;
12428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12429 tg3_get_5752_nvram_info(tp);
12430 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12431 tg3_get_5755_nvram_info(tp);
12432 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12433 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12434 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12435 tg3_get_5787_nvram_info(tp);
12436 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12437 tg3_get_5761_nvram_info(tp);
12438 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12439 tg3_get_5906_nvram_info(tp);
12440 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12442 tg3_get_57780_nvram_info(tp);
12443 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12445 tg3_get_5717_nvram_info(tp);
12446 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12447 tg3_get_5720_nvram_info(tp);
12448 else
12449 tg3_get_nvram_info(tp);
12451 if (tp->nvram_size == 0)
12452 tg3_get_nvram_size(tp);
12454 tg3_disable_nvram_access(tp);
12455 tg3_nvram_unlock(tp);
12457 } else {
12458 tg3_flag_clear(tp, NVRAM);
12459 tg3_flag_clear(tp, NVRAM_BUFFERED);
12461 tg3_get_eeprom_size(tp);
12465 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12466 u32 offset, u32 len, u8 *buf)
12468 int i, j, rc = 0;
12469 u32 val;
12471 for (i = 0; i < len; i += 4) {
12472 u32 addr;
12473 __be32 data;
12475 addr = offset + i;
12477 memcpy(&data, buf + i, 4);
12480 * The SEEPROM interface expects the data to always be opposite
12481 * the native endian format. We accomplish this by reversing
12482 * all the operations that would have been performed on the
12483 * data from a call to tg3_nvram_read_be32().
12485 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12487 val = tr32(GRC_EEPROM_ADDR);
12488 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12490 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12491 EEPROM_ADDR_READ);
12492 tw32(GRC_EEPROM_ADDR, val |
12493 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12494 (addr & EEPROM_ADDR_ADDR_MASK) |
12495 EEPROM_ADDR_START |
12496 EEPROM_ADDR_WRITE);
12498 for (j = 0; j < 1000; j++) {
12499 val = tr32(GRC_EEPROM_ADDR);
12501 if (val & EEPROM_ADDR_COMPLETE)
12502 break;
12503 msleep(1);
12505 if (!(val & EEPROM_ADDR_COMPLETE)) {
12506 rc = -EBUSY;
12507 break;
12511 return rc;
12514 /* offset and length are dword aligned */
12515 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12516 u8 *buf)
12518 int ret = 0;
12519 u32 pagesize = tp->nvram_pagesize;
12520 u32 pagemask = pagesize - 1;
12521 u32 nvram_cmd;
12522 u8 *tmp;
12524 tmp = kmalloc(pagesize, GFP_KERNEL);
12525 if (tmp == NULL)
12526 return -ENOMEM;
12528 while (len) {
12529 int j;
12530 u32 phy_addr, page_off, size;
12532 phy_addr = offset & ~pagemask;
12534 for (j = 0; j < pagesize; j += 4) {
12535 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12536 (__be32 *) (tmp + j));
12537 if (ret)
12538 break;
12540 if (ret)
12541 break;
12543 page_off = offset & pagemask;
12544 size = pagesize;
12545 if (len < size)
12546 size = len;
12548 len -= size;
12550 memcpy(tmp + page_off, buf, size);
12552 offset = offset + (pagesize - page_off);
12554 tg3_enable_nvram_access(tp);
12557 * Before we can erase the flash page, we need
12558 * to issue a special "write enable" command.
12560 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12562 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12563 break;
12565 /* Erase the target page */
12566 tw32(NVRAM_ADDR, phy_addr);
12568 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12569 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12571 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12572 break;
12574 /* Issue another write enable to start the write. */
12575 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12577 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12578 break;
12580 for (j = 0; j < pagesize; j += 4) {
12581 __be32 data;
12583 data = *((__be32 *) (tmp + j));
12585 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12587 tw32(NVRAM_ADDR, phy_addr + j);
12589 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12590 NVRAM_CMD_WR;
12592 if (j == 0)
12593 nvram_cmd |= NVRAM_CMD_FIRST;
12594 else if (j == (pagesize - 4))
12595 nvram_cmd |= NVRAM_CMD_LAST;
12597 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12598 break;
12600 if (ret)
12601 break;
12604 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12605 tg3_nvram_exec_cmd(tp, nvram_cmd);
12607 kfree(tmp);
12609 return ret;
12612 /* offset and length are dword aligned */
12613 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12614 u8 *buf)
12616 int i, ret = 0;
12618 for (i = 0; i < len; i += 4, offset += 4) {
12619 u32 page_off, phy_addr, nvram_cmd;
12620 __be32 data;
12622 memcpy(&data, buf + i, 4);
12623 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12625 page_off = offset % tp->nvram_pagesize;
12627 phy_addr = tg3_nvram_phys_addr(tp, offset);
12629 tw32(NVRAM_ADDR, phy_addr);
12631 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12633 if (page_off == 0 || i == 0)
12634 nvram_cmd |= NVRAM_CMD_FIRST;
12635 if (page_off == (tp->nvram_pagesize - 4))
12636 nvram_cmd |= NVRAM_CMD_LAST;
12638 if (i == (len - 4))
12639 nvram_cmd |= NVRAM_CMD_LAST;
12641 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12642 !tg3_flag(tp, 5755_PLUS) &&
12643 (tp->nvram_jedecnum == JEDEC_ST) &&
12644 (nvram_cmd & NVRAM_CMD_FIRST)) {
12646 if ((ret = tg3_nvram_exec_cmd(tp,
12647 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12648 NVRAM_CMD_DONE)))
12650 break;
12652 if (!tg3_flag(tp, FLASH)) {
12653 /* We always do complete word writes to eeprom. */
12654 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12657 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12658 break;
12660 return ret;
12663 /* offset and length are dword aligned */
12664 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12666 int ret;
12668 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12669 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12670 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12671 udelay(40);
12674 if (!tg3_flag(tp, NVRAM)) {
12675 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12676 } else {
12677 u32 grc_mode;
12679 ret = tg3_nvram_lock(tp);
12680 if (ret)
12681 return ret;
12683 tg3_enable_nvram_access(tp);
12684 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12685 tw32(NVRAM_WRITE1, 0x406);
12687 grc_mode = tr32(GRC_MODE);
12688 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12690 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12691 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12692 buf);
12693 } else {
12694 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12695 buf);
12698 grc_mode = tr32(GRC_MODE);
12699 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12701 tg3_disable_nvram_access(tp);
12702 tg3_nvram_unlock(tp);
12705 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12706 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12707 udelay(40);
12710 return ret;
12713 struct subsys_tbl_ent {
12714 u16 subsys_vendor, subsys_devid;
12715 u32 phy_id;
12718 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12719 /* Broadcom boards. */
12720 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12721 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12722 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12723 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12724 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12725 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12726 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12727 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12728 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12729 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12730 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12731 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12732 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12733 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12734 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12735 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12736 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12737 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12738 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12739 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12740 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12741 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12743 /* 3com boards. */
12744 { TG3PCI_SUBVENDOR_ID_3COM,
12745 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12746 { TG3PCI_SUBVENDOR_ID_3COM,
12747 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12748 { TG3PCI_SUBVENDOR_ID_3COM,
12749 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12750 { TG3PCI_SUBVENDOR_ID_3COM,
12751 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12752 { TG3PCI_SUBVENDOR_ID_3COM,
12753 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12755 /* DELL boards. */
12756 { TG3PCI_SUBVENDOR_ID_DELL,
12757 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12758 { TG3PCI_SUBVENDOR_ID_DELL,
12759 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12760 { TG3PCI_SUBVENDOR_ID_DELL,
12761 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12762 { TG3PCI_SUBVENDOR_ID_DELL,
12763 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12765 /* Compaq boards. */
12766 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12767 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12768 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12769 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12770 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12771 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12772 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12773 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12774 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12775 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12777 /* IBM boards. */
12778 { TG3PCI_SUBVENDOR_ID_IBM,
12779 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12782 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12784 int i;
12786 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12787 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12788 tp->pdev->subsystem_vendor) &&
12789 (subsys_id_to_phy_id[i].subsys_devid ==
12790 tp->pdev->subsystem_device))
12791 return &subsys_id_to_phy_id[i];
12793 return NULL;
12796 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12798 u32 val;
12800 tp->phy_id = TG3_PHY_ID_INVALID;
12801 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12803 /* Assume an onboard device and WOL capable by default. */
12804 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12805 tg3_flag_set(tp, WOL_CAP);
12807 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12808 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12809 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12810 tg3_flag_set(tp, IS_NIC);
12812 val = tr32(VCPU_CFGSHDW);
12813 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12814 tg3_flag_set(tp, ASPM_WORKAROUND);
12815 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12816 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12817 tg3_flag_set(tp, WOL_ENABLE);
12818 device_set_wakeup_enable(&tp->pdev->dev, true);
12820 goto done;
12823 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12824 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12825 u32 nic_cfg, led_cfg;
12826 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12827 int eeprom_phy_serdes = 0;
12829 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12830 tp->nic_sram_data_cfg = nic_cfg;
12832 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12833 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12834 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12835 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12836 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12837 (ver > 0) && (ver < 0x100))
12838 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12840 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12841 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12843 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12844 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12845 eeprom_phy_serdes = 1;
12847 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12848 if (nic_phy_id != 0) {
12849 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12850 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12852 eeprom_phy_id = (id1 >> 16) << 10;
12853 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12854 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12855 } else
12856 eeprom_phy_id = 0;
12858 tp->phy_id = eeprom_phy_id;
12859 if (eeprom_phy_serdes) {
12860 if (!tg3_flag(tp, 5705_PLUS))
12861 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12862 else
12863 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12866 if (tg3_flag(tp, 5750_PLUS))
12867 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12868 SHASTA_EXT_LED_MODE_MASK);
12869 else
12870 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12872 switch (led_cfg) {
12873 default:
12874 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12875 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12876 break;
12878 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12879 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12880 break;
12882 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12883 tp->led_ctrl = LED_CTRL_MODE_MAC;
12885 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12886 * read on some older 5700/5701 bootcode.
12888 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12889 ASIC_REV_5700 ||
12890 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12891 ASIC_REV_5701)
12892 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12894 break;
12896 case SHASTA_EXT_LED_SHARED:
12897 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12898 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12899 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12900 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12901 LED_CTRL_MODE_PHY_2);
12902 break;
12904 case SHASTA_EXT_LED_MAC:
12905 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12906 break;
12908 case SHASTA_EXT_LED_COMBO:
12909 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12910 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12911 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12912 LED_CTRL_MODE_PHY_2);
12913 break;
12917 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12918 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12919 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12920 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12922 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12923 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12925 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12926 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12927 if ((tp->pdev->subsystem_vendor ==
12928 PCI_VENDOR_ID_ARIMA) &&
12929 (tp->pdev->subsystem_device == 0x205a ||
12930 tp->pdev->subsystem_device == 0x2063))
12931 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12932 } else {
12933 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12934 tg3_flag_set(tp, IS_NIC);
12937 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12938 tg3_flag_set(tp, ENABLE_ASF);
12939 if (tg3_flag(tp, 5750_PLUS))
12940 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12943 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12944 tg3_flag(tp, 5750_PLUS))
12945 tg3_flag_set(tp, ENABLE_APE);
12947 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12948 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12949 tg3_flag_clear(tp, WOL_CAP);
12951 if (tg3_flag(tp, WOL_CAP) &&
12952 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12953 tg3_flag_set(tp, WOL_ENABLE);
12954 device_set_wakeup_enable(&tp->pdev->dev, true);
12957 if (cfg2 & (1 << 17))
12958 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12960 /* serdes signal pre-emphasis in register 0x590 set by */
12961 /* bootcode if bit 18 is set */
12962 if (cfg2 & (1 << 18))
12963 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12965 if ((tg3_flag(tp, 57765_PLUS) ||
12966 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12967 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12968 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12969 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12971 if (tg3_flag(tp, PCI_EXPRESS) &&
12972 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12973 !tg3_flag(tp, 57765_PLUS)) {
12974 u32 cfg3;
12976 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12977 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12978 tg3_flag_set(tp, ASPM_WORKAROUND);
12981 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12982 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12983 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12984 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12985 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12986 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12988 done:
12989 if (tg3_flag(tp, WOL_CAP))
12990 device_set_wakeup_enable(&tp->pdev->dev,
12991 tg3_flag(tp, WOL_ENABLE));
12992 else
12993 device_set_wakeup_capable(&tp->pdev->dev, false);
12996 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12998 int i;
12999 u32 val;
13001 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13002 tw32(OTP_CTRL, cmd);
13004 /* Wait for up to 1 ms for command to execute. */
13005 for (i = 0; i < 100; i++) {
13006 val = tr32(OTP_STATUS);
13007 if (val & OTP_STATUS_CMD_DONE)
13008 break;
13009 udelay(10);
13012 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13015 /* Read the gphy configuration from the OTP region of the chip. The gphy
13016 * configuration is a 32-bit value that straddles the alignment boundary.
13017 * We do two 32-bit reads and then shift and merge the results.
13019 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13021 u32 bhalf_otp, thalf_otp;
13023 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13025 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13026 return 0;
13028 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13030 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13031 return 0;
13033 thalf_otp = tr32(OTP_READ_DATA);
13035 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13037 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13038 return 0;
13040 bhalf_otp = tr32(OTP_READ_DATA);
13042 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13045 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13047 u32 adv = ADVERTISED_Autoneg |
13048 ADVERTISED_Pause;
13050 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13051 adv |= ADVERTISED_1000baseT_Half |
13052 ADVERTISED_1000baseT_Full;
13054 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13055 adv |= ADVERTISED_100baseT_Half |
13056 ADVERTISED_100baseT_Full |
13057 ADVERTISED_10baseT_Half |
13058 ADVERTISED_10baseT_Full |
13059 ADVERTISED_TP;
13060 else
13061 adv |= ADVERTISED_FIBRE;
13063 tp->link_config.advertising = adv;
13064 tp->link_config.speed = SPEED_INVALID;
13065 tp->link_config.duplex = DUPLEX_INVALID;
13066 tp->link_config.autoneg = AUTONEG_ENABLE;
13067 tp->link_config.active_speed = SPEED_INVALID;
13068 tp->link_config.active_duplex = DUPLEX_INVALID;
13069 tp->link_config.orig_speed = SPEED_INVALID;
13070 tp->link_config.orig_duplex = DUPLEX_INVALID;
13071 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13074 static int __devinit tg3_phy_probe(struct tg3 *tp)
13076 u32 hw_phy_id_1, hw_phy_id_2;
13077 u32 hw_phy_id, hw_phy_id_masked;
13078 int err;
13080 /* flow control autonegotiation is default behavior */
13081 tg3_flag_set(tp, PAUSE_AUTONEG);
13082 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13084 if (tg3_flag(tp, USE_PHYLIB))
13085 return tg3_phy_init(tp);
13087 /* Reading the PHY ID register can conflict with ASF
13088 * firmware access to the PHY hardware.
13090 err = 0;
13091 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13092 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13093 } else {
13094 /* Now read the physical PHY_ID from the chip and verify
13095 * that it is sane. If it doesn't look good, we fall back
13096 * to either the hard-coded table based PHY_ID and failing
13097 * that the value found in the eeprom area.
13099 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13100 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13102 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13103 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13104 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13106 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13109 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13110 tp->phy_id = hw_phy_id;
13111 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13112 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13113 else
13114 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13115 } else {
13116 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13117 /* Do nothing, phy ID already set up in
13118 * tg3_get_eeprom_hw_cfg().
13120 } else {
13121 struct subsys_tbl_ent *p;
13123 /* No eeprom signature? Try the hardcoded
13124 * subsys device table.
13126 p = tg3_lookup_by_subsys(tp);
13127 if (!p)
13128 return -ENODEV;
13130 tp->phy_id = p->phy_id;
13131 if (!tp->phy_id ||
13132 tp->phy_id == TG3_PHY_ID_BCM8002)
13133 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13137 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13138 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13140 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13141 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13142 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13143 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13144 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13146 tg3_phy_init_link_config(tp);
13148 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13149 !tg3_flag(tp, ENABLE_APE) &&
13150 !tg3_flag(tp, ENABLE_ASF)) {
13151 u32 bmsr, mask;
13153 tg3_readphy(tp, MII_BMSR, &bmsr);
13154 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13155 (bmsr & BMSR_LSTATUS))
13156 goto skip_phy_reset;
13158 err = tg3_phy_reset(tp);
13159 if (err)
13160 return err;
13162 tg3_phy_set_wirespeed(tp);
13164 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13165 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13166 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13167 if (!tg3_copper_is_advertising_all(tp, mask)) {
13168 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13169 tp->link_config.flowctrl);
13171 tg3_writephy(tp, MII_BMCR,
13172 BMCR_ANENABLE | BMCR_ANRESTART);
13176 skip_phy_reset:
13177 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13178 err = tg3_init_5401phy_dsp(tp);
13179 if (err)
13180 return err;
13182 err = tg3_init_5401phy_dsp(tp);
13185 return err;
13188 static void __devinit tg3_read_vpd(struct tg3 *tp)
13190 u8 *vpd_data;
13191 unsigned int block_end, rosize, len;
13192 u32 vpdlen;
13193 int j, i = 0;
13195 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13196 if (!vpd_data)
13197 goto out_no_vpd;
13199 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13200 if (i < 0)
13201 goto out_not_found;
13203 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13204 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13205 i += PCI_VPD_LRDT_TAG_SIZE;
13207 if (block_end > vpdlen)
13208 goto out_not_found;
13210 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13211 PCI_VPD_RO_KEYWORD_MFR_ID);
13212 if (j > 0) {
13213 len = pci_vpd_info_field_size(&vpd_data[j]);
13215 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13216 if (j + len > block_end || len != 4 ||
13217 memcmp(&vpd_data[j], "1028", 4))
13218 goto partno;
13220 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13221 PCI_VPD_RO_KEYWORD_VENDOR0);
13222 if (j < 0)
13223 goto partno;
13225 len = pci_vpd_info_field_size(&vpd_data[j]);
13227 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13228 if (j + len > block_end)
13229 goto partno;
13231 memcpy(tp->fw_ver, &vpd_data[j], len);
13232 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13235 partno:
13236 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13237 PCI_VPD_RO_KEYWORD_PARTNO);
13238 if (i < 0)
13239 goto out_not_found;
13241 len = pci_vpd_info_field_size(&vpd_data[i]);
13243 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13244 if (len > TG3_BPN_SIZE ||
13245 (len + i) > vpdlen)
13246 goto out_not_found;
13248 memcpy(tp->board_part_number, &vpd_data[i], len);
13250 out_not_found:
13251 kfree(vpd_data);
13252 if (tp->board_part_number[0])
13253 return;
13255 out_no_vpd:
13256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13257 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13258 strcpy(tp->board_part_number, "BCM5717");
13259 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13260 strcpy(tp->board_part_number, "BCM5718");
13261 else
13262 goto nomatch;
13263 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13264 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13265 strcpy(tp->board_part_number, "BCM57780");
13266 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13267 strcpy(tp->board_part_number, "BCM57760");
13268 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13269 strcpy(tp->board_part_number, "BCM57790");
13270 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13271 strcpy(tp->board_part_number, "BCM57788");
13272 else
13273 goto nomatch;
13274 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13275 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13276 strcpy(tp->board_part_number, "BCM57761");
13277 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13278 strcpy(tp->board_part_number, "BCM57765");
13279 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13280 strcpy(tp->board_part_number, "BCM57781");
13281 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13282 strcpy(tp->board_part_number, "BCM57785");
13283 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13284 strcpy(tp->board_part_number, "BCM57791");
13285 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13286 strcpy(tp->board_part_number, "BCM57795");
13287 else
13288 goto nomatch;
13289 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13290 strcpy(tp->board_part_number, "BCM95906");
13291 } else {
13292 nomatch:
13293 strcpy(tp->board_part_number, "none");
13297 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13299 u32 val;
13301 if (tg3_nvram_read(tp, offset, &val) ||
13302 (val & 0xfc000000) != 0x0c000000 ||
13303 tg3_nvram_read(tp, offset + 4, &val) ||
13304 val != 0)
13305 return 0;
13307 return 1;
13310 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13312 u32 val, offset, start, ver_offset;
13313 int i, dst_off;
13314 bool newver = false;
13316 if (tg3_nvram_read(tp, 0xc, &offset) ||
13317 tg3_nvram_read(tp, 0x4, &start))
13318 return;
13320 offset = tg3_nvram_logical_addr(tp, offset);
13322 if (tg3_nvram_read(tp, offset, &val))
13323 return;
13325 if ((val & 0xfc000000) == 0x0c000000) {
13326 if (tg3_nvram_read(tp, offset + 4, &val))
13327 return;
13329 if (val == 0)
13330 newver = true;
13333 dst_off = strlen(tp->fw_ver);
13335 if (newver) {
13336 if (TG3_VER_SIZE - dst_off < 16 ||
13337 tg3_nvram_read(tp, offset + 8, &ver_offset))
13338 return;
13340 offset = offset + ver_offset - start;
13341 for (i = 0; i < 16; i += 4) {
13342 __be32 v;
13343 if (tg3_nvram_read_be32(tp, offset + i, &v))
13344 return;
13346 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13348 } else {
13349 u32 major, minor;
13351 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13352 return;
13354 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13355 TG3_NVM_BCVER_MAJSFT;
13356 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13357 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13358 "v%d.%02d", major, minor);
13362 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13364 u32 val, major, minor;
13366 /* Use native endian representation */
13367 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13368 return;
13370 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13371 TG3_NVM_HWSB_CFG1_MAJSFT;
13372 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13373 TG3_NVM_HWSB_CFG1_MINSFT;
13375 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13378 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13380 u32 offset, major, minor, build;
13382 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13384 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13385 return;
13387 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13388 case TG3_EEPROM_SB_REVISION_0:
13389 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13390 break;
13391 case TG3_EEPROM_SB_REVISION_2:
13392 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13393 break;
13394 case TG3_EEPROM_SB_REVISION_3:
13395 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13396 break;
13397 case TG3_EEPROM_SB_REVISION_4:
13398 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13399 break;
13400 case TG3_EEPROM_SB_REVISION_5:
13401 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13402 break;
13403 case TG3_EEPROM_SB_REVISION_6:
13404 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13405 break;
13406 default:
13407 return;
13410 if (tg3_nvram_read(tp, offset, &val))
13411 return;
13413 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13414 TG3_EEPROM_SB_EDH_BLD_SHFT;
13415 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13416 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13417 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13419 if (minor > 99 || build > 26)
13420 return;
13422 offset = strlen(tp->fw_ver);
13423 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13424 " v%d.%02d", major, minor);
13426 if (build > 0) {
13427 offset = strlen(tp->fw_ver);
13428 if (offset < TG3_VER_SIZE - 1)
13429 tp->fw_ver[offset] = 'a' + build - 1;
13433 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13435 u32 val, offset, start;
13436 int i, vlen;
13438 for (offset = TG3_NVM_DIR_START;
13439 offset < TG3_NVM_DIR_END;
13440 offset += TG3_NVM_DIRENT_SIZE) {
13441 if (tg3_nvram_read(tp, offset, &val))
13442 return;
13444 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13445 break;
13448 if (offset == TG3_NVM_DIR_END)
13449 return;
13451 if (!tg3_flag(tp, 5705_PLUS))
13452 start = 0x08000000;
13453 else if (tg3_nvram_read(tp, offset - 4, &start))
13454 return;
13456 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13457 !tg3_fw_img_is_valid(tp, offset) ||
13458 tg3_nvram_read(tp, offset + 8, &val))
13459 return;
13461 offset += val - start;
13463 vlen = strlen(tp->fw_ver);
13465 tp->fw_ver[vlen++] = ',';
13466 tp->fw_ver[vlen++] = ' ';
13468 for (i = 0; i < 4; i++) {
13469 __be32 v;
13470 if (tg3_nvram_read_be32(tp, offset, &v))
13471 return;
13473 offset += sizeof(v);
13475 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13476 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13477 break;
13480 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13481 vlen += sizeof(v);
13485 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13487 int vlen;
13488 u32 apedata;
13489 char *fwtype;
13491 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13492 return;
13494 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13495 if (apedata != APE_SEG_SIG_MAGIC)
13496 return;
13498 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13499 if (!(apedata & APE_FW_STATUS_READY))
13500 return;
13502 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13504 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13505 tg3_flag_set(tp, APE_HAS_NCSI);
13506 fwtype = "NCSI";
13507 } else {
13508 fwtype = "DASH";
13511 vlen = strlen(tp->fw_ver);
13513 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13514 fwtype,
13515 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13516 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13517 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13518 (apedata & APE_FW_VERSION_BLDMSK));
13521 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13523 u32 val;
13524 bool vpd_vers = false;
13526 if (tp->fw_ver[0] != 0)
13527 vpd_vers = true;
13529 if (tg3_flag(tp, NO_NVRAM)) {
13530 strcat(tp->fw_ver, "sb");
13531 return;
13534 if (tg3_nvram_read(tp, 0, &val))
13535 return;
13537 if (val == TG3_EEPROM_MAGIC)
13538 tg3_read_bc_ver(tp);
13539 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13540 tg3_read_sb_ver(tp, val);
13541 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13542 tg3_read_hwsb_ver(tp);
13543 else
13544 return;
13546 if (vpd_vers)
13547 goto done;
13549 if (tg3_flag(tp, ENABLE_APE)) {
13550 if (tg3_flag(tp, ENABLE_ASF))
13551 tg3_read_dash_ver(tp);
13552 } else if (tg3_flag(tp, ENABLE_ASF)) {
13553 tg3_read_mgmtfw_ver(tp);
13556 done:
13557 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13560 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13562 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13564 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13565 return TG3_RX_RET_MAX_SIZE_5717;
13566 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13567 return TG3_RX_RET_MAX_SIZE_5700;
13568 else
13569 return TG3_RX_RET_MAX_SIZE_5705;
13572 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13573 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13574 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13575 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13576 { },
13579 static int __devinit tg3_get_invariants(struct tg3 *tp)
13581 u32 misc_ctrl_reg;
13582 u32 pci_state_reg, grc_misc_cfg;
13583 u32 val;
13584 u16 pci_cmd;
13585 int err;
13587 /* Force memory write invalidate off. If we leave it on,
13588 * then on 5700_BX chips we have to enable a workaround.
13589 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13590 * to match the cacheline size. The Broadcom driver have this
13591 * workaround but turns MWI off all the times so never uses
13592 * it. This seems to suggest that the workaround is insufficient.
13594 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13595 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13596 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13598 /* Important! -- Make sure register accesses are byteswapped
13599 * correctly. Also, for those chips that require it, make
13600 * sure that indirect register accesses are enabled before
13601 * the first operation.
13603 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13604 &misc_ctrl_reg);
13605 tp->misc_host_ctrl |= (misc_ctrl_reg &
13606 MISC_HOST_CTRL_CHIPREV);
13607 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13608 tp->misc_host_ctrl);
13610 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13611 MISC_HOST_CTRL_CHIPREV_SHIFT);
13612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13613 u32 prod_id_asic_rev;
13615 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13616 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13617 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13618 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13619 pci_read_config_dword(tp->pdev,
13620 TG3PCI_GEN2_PRODID_ASICREV,
13621 &prod_id_asic_rev);
13622 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13623 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13624 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13625 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13626 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13627 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13628 pci_read_config_dword(tp->pdev,
13629 TG3PCI_GEN15_PRODID_ASICREV,
13630 &prod_id_asic_rev);
13631 else
13632 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13633 &prod_id_asic_rev);
13635 tp->pci_chip_rev_id = prod_id_asic_rev;
13638 /* Wrong chip ID in 5752 A0. This code can be removed later
13639 * as A0 is not in production.
13641 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13642 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13644 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13645 * we need to disable memory and use config. cycles
13646 * only to access all registers. The 5702/03 chips
13647 * can mistakenly decode the special cycles from the
13648 * ICH chipsets as memory write cycles, causing corruption
13649 * of register and memory space. Only certain ICH bridges
13650 * will drive special cycles with non-zero data during the
13651 * address phase which can fall within the 5703's address
13652 * range. This is not an ICH bug as the PCI spec allows
13653 * non-zero address during special cycles. However, only
13654 * these ICH bridges are known to drive non-zero addresses
13655 * during special cycles.
13657 * Since special cycles do not cross PCI bridges, we only
13658 * enable this workaround if the 5703 is on the secondary
13659 * bus of these ICH bridges.
13661 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13662 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13663 static struct tg3_dev_id {
13664 u32 vendor;
13665 u32 device;
13666 u32 rev;
13667 } ich_chipsets[] = {
13668 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13669 PCI_ANY_ID },
13670 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13671 PCI_ANY_ID },
13672 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13673 0xa },
13674 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13675 PCI_ANY_ID },
13676 { },
13678 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13679 struct pci_dev *bridge = NULL;
13681 while (pci_id->vendor != 0) {
13682 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13683 bridge);
13684 if (!bridge) {
13685 pci_id++;
13686 continue;
13688 if (pci_id->rev != PCI_ANY_ID) {
13689 if (bridge->revision > pci_id->rev)
13690 continue;
13692 if (bridge->subordinate &&
13693 (bridge->subordinate->number ==
13694 tp->pdev->bus->number)) {
13695 tg3_flag_set(tp, ICH_WORKAROUND);
13696 pci_dev_put(bridge);
13697 break;
13702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13703 static struct tg3_dev_id {
13704 u32 vendor;
13705 u32 device;
13706 } bridge_chipsets[] = {
13707 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13708 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13709 { },
13711 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13712 struct pci_dev *bridge = NULL;
13714 while (pci_id->vendor != 0) {
13715 bridge = pci_get_device(pci_id->vendor,
13716 pci_id->device,
13717 bridge);
13718 if (!bridge) {
13719 pci_id++;
13720 continue;
13722 if (bridge->subordinate &&
13723 (bridge->subordinate->number <=
13724 tp->pdev->bus->number) &&
13725 (bridge->subordinate->subordinate >=
13726 tp->pdev->bus->number)) {
13727 tg3_flag_set(tp, 5701_DMA_BUG);
13728 pci_dev_put(bridge);
13729 break;
13734 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13735 * DMA addresses > 40-bit. This bridge may have other additional
13736 * 57xx devices behind it in some 4-port NIC designs for example.
13737 * Any tg3 device found behind the bridge will also need the 40-bit
13738 * DMA workaround.
13740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13742 tg3_flag_set(tp, 5780_CLASS);
13743 tg3_flag_set(tp, 40BIT_DMA_BUG);
13744 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13745 } else {
13746 struct pci_dev *bridge = NULL;
13748 do {
13749 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13750 PCI_DEVICE_ID_SERVERWORKS_EPB,
13751 bridge);
13752 if (bridge && bridge->subordinate &&
13753 (bridge->subordinate->number <=
13754 tp->pdev->bus->number) &&
13755 (bridge->subordinate->subordinate >=
13756 tp->pdev->bus->number)) {
13757 tg3_flag_set(tp, 40BIT_DMA_BUG);
13758 pci_dev_put(bridge);
13759 break;
13761 } while (bridge);
13764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13765 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13766 tp->pdev_peer = tg3_find_peer(tp);
13768 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13769 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13770 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13771 tg3_flag_set(tp, 5717_PLUS);
13773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13774 tg3_flag(tp, 5717_PLUS))
13775 tg3_flag_set(tp, 57765_PLUS);
13777 /* Intentionally exclude ASIC_REV_5906 */
13778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13780 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13781 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13784 tg3_flag(tp, 57765_PLUS))
13785 tg3_flag_set(tp, 5755_PLUS);
13787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13788 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13789 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13790 tg3_flag(tp, 5755_PLUS) ||
13791 tg3_flag(tp, 5780_CLASS))
13792 tg3_flag_set(tp, 5750_PLUS);
13794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13795 tg3_flag(tp, 5750_PLUS))
13796 tg3_flag_set(tp, 5705_PLUS);
13798 /* Determine TSO capabilities */
13799 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13800 ; /* Do nothing. HW bug. */
13801 else if (tg3_flag(tp, 57765_PLUS))
13802 tg3_flag_set(tp, HW_TSO_3);
13803 else if (tg3_flag(tp, 5755_PLUS) ||
13804 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13805 tg3_flag_set(tp, HW_TSO_2);
13806 else if (tg3_flag(tp, 5750_PLUS)) {
13807 tg3_flag_set(tp, HW_TSO_1);
13808 tg3_flag_set(tp, TSO_BUG);
13809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13810 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13811 tg3_flag_clear(tp, TSO_BUG);
13812 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13813 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13814 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13815 tg3_flag_set(tp, TSO_BUG);
13816 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13817 tp->fw_needed = FIRMWARE_TG3TSO5;
13818 else
13819 tp->fw_needed = FIRMWARE_TG3TSO;
13822 /* Selectively allow TSO based on operating conditions */
13823 if (tg3_flag(tp, HW_TSO_1) ||
13824 tg3_flag(tp, HW_TSO_2) ||
13825 tg3_flag(tp, HW_TSO_3) ||
13826 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13827 tg3_flag_set(tp, TSO_CAPABLE);
13828 else {
13829 tg3_flag_clear(tp, TSO_CAPABLE);
13830 tg3_flag_clear(tp, TSO_BUG);
13831 tp->fw_needed = NULL;
13834 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13835 tp->fw_needed = FIRMWARE_TG3;
13837 tp->irq_max = 1;
13839 if (tg3_flag(tp, 5750_PLUS)) {
13840 tg3_flag_set(tp, SUPPORT_MSI);
13841 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13842 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13843 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13844 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13845 tp->pdev_peer == tp->pdev))
13846 tg3_flag_clear(tp, SUPPORT_MSI);
13848 if (tg3_flag(tp, 5755_PLUS) ||
13849 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13850 tg3_flag_set(tp, 1SHOT_MSI);
13853 if (tg3_flag(tp, 57765_PLUS)) {
13854 tg3_flag_set(tp, SUPPORT_MSIX);
13855 tp->irq_max = TG3_IRQ_MAX_VECS;
13859 if (tg3_flag(tp, 5755_PLUS))
13860 tg3_flag_set(tp, SHORT_DMA_BUG);
13862 if (tg3_flag(tp, 5717_PLUS))
13863 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13865 if (tg3_flag(tp, 57765_PLUS) &&
13866 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13867 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13869 if (!tg3_flag(tp, 5705_PLUS) ||
13870 tg3_flag(tp, 5780_CLASS) ||
13871 tg3_flag(tp, USE_JUMBO_BDFLAG))
13872 tg3_flag_set(tp, JUMBO_CAPABLE);
13874 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13875 &pci_state_reg);
13877 if (pci_is_pcie(tp->pdev)) {
13878 u16 lnkctl;
13880 tg3_flag_set(tp, PCI_EXPRESS);
13882 tp->pcie_readrq = 4096;
13883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13885 tp->pcie_readrq = 2048;
13887 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13889 pci_read_config_word(tp->pdev,
13890 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13891 &lnkctl);
13892 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13893 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13894 ASIC_REV_5906) {
13895 tg3_flag_clear(tp, HW_TSO_2);
13896 tg3_flag_clear(tp, TSO_CAPABLE);
13898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13899 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13900 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13901 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13902 tg3_flag_set(tp, CLKREQ_BUG);
13903 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13904 tg3_flag_set(tp, L1PLLPD_EN);
13906 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13907 /* BCM5785 devices are effectively PCIe devices, and should
13908 * follow PCIe codepaths, but do not have a PCIe capabilities
13909 * section.
13911 tg3_flag_set(tp, PCI_EXPRESS);
13912 } else if (!tg3_flag(tp, 5705_PLUS) ||
13913 tg3_flag(tp, 5780_CLASS)) {
13914 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13915 if (!tp->pcix_cap) {
13916 dev_err(&tp->pdev->dev,
13917 "Cannot find PCI-X capability, aborting\n");
13918 return -EIO;
13921 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13922 tg3_flag_set(tp, PCIX_MODE);
13925 /* If we have an AMD 762 or VIA K8T800 chipset, write
13926 * reordering to the mailbox registers done by the host
13927 * controller can cause major troubles. We read back from
13928 * every mailbox register write to force the writes to be
13929 * posted to the chip in order.
13931 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13932 !tg3_flag(tp, PCI_EXPRESS))
13933 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13935 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13936 &tp->pci_cacheline_sz);
13937 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13938 &tp->pci_lat_timer);
13939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13940 tp->pci_lat_timer < 64) {
13941 tp->pci_lat_timer = 64;
13942 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13943 tp->pci_lat_timer);
13946 /* Important! -- It is critical that the PCI-X hw workaround
13947 * situation is decided before the first MMIO register access.
13949 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13950 /* 5700 BX chips need to have their TX producer index
13951 * mailboxes written twice to workaround a bug.
13953 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13955 /* If we are in PCI-X mode, enable register write workaround.
13957 * The workaround is to use indirect register accesses
13958 * for all chip writes not to mailbox registers.
13960 if (tg3_flag(tp, PCIX_MODE)) {
13961 u32 pm_reg;
13963 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13965 /* The chip can have it's power management PCI config
13966 * space registers clobbered due to this bug.
13967 * So explicitly force the chip into D0 here.
13969 pci_read_config_dword(tp->pdev,
13970 tp->pm_cap + PCI_PM_CTRL,
13971 &pm_reg);
13972 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13973 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13974 pci_write_config_dword(tp->pdev,
13975 tp->pm_cap + PCI_PM_CTRL,
13976 pm_reg);
13978 /* Also, force SERR#/PERR# in PCI command. */
13979 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13980 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13981 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13985 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13986 tg3_flag_set(tp, PCI_HIGH_SPEED);
13987 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13988 tg3_flag_set(tp, PCI_32BIT);
13990 /* Chip-specific fixup from Broadcom driver */
13991 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13992 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13993 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13994 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13997 /* Default fast path register access methods */
13998 tp->read32 = tg3_read32;
13999 tp->write32 = tg3_write32;
14000 tp->read32_mbox = tg3_read32;
14001 tp->write32_mbox = tg3_write32;
14002 tp->write32_tx_mbox = tg3_write32;
14003 tp->write32_rx_mbox = tg3_write32;
14005 /* Various workaround register access methods */
14006 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14007 tp->write32 = tg3_write_indirect_reg32;
14008 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14009 (tg3_flag(tp, PCI_EXPRESS) &&
14010 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14012 * Back to back register writes can cause problems on these
14013 * chips, the workaround is to read back all reg writes
14014 * except those to mailbox regs.
14016 * See tg3_write_indirect_reg32().
14018 tp->write32 = tg3_write_flush_reg32;
14021 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14022 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14023 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14024 tp->write32_rx_mbox = tg3_write_flush_reg32;
14027 if (tg3_flag(tp, ICH_WORKAROUND)) {
14028 tp->read32 = tg3_read_indirect_reg32;
14029 tp->write32 = tg3_write_indirect_reg32;
14030 tp->read32_mbox = tg3_read_indirect_mbox;
14031 tp->write32_mbox = tg3_write_indirect_mbox;
14032 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14033 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14035 iounmap(tp->regs);
14036 tp->regs = NULL;
14038 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14039 pci_cmd &= ~PCI_COMMAND_MEMORY;
14040 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14043 tp->read32_mbox = tg3_read32_mbox_5906;
14044 tp->write32_mbox = tg3_write32_mbox_5906;
14045 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14046 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14049 if (tp->write32 == tg3_write_indirect_reg32 ||
14050 (tg3_flag(tp, PCIX_MODE) &&
14051 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14053 tg3_flag_set(tp, SRAM_USE_CONFIG);
14055 /* The memory arbiter has to be enabled in order for SRAM accesses
14056 * to succeed. Normally on powerup the tg3 chip firmware will make
14057 * sure it is enabled, but other entities such as system netboot
14058 * code might disable it.
14060 val = tr32(MEMARB_MODE);
14061 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14063 if (tg3_flag(tp, PCIX_MODE)) {
14064 pci_read_config_dword(tp->pdev,
14065 tp->pcix_cap + PCI_X_STATUS, &val);
14066 tp->pci_fn = val & 0x7;
14067 } else {
14068 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14071 /* Get eeprom hw config before calling tg3_set_power_state().
14072 * In particular, the TG3_FLAG_IS_NIC flag must be
14073 * determined before calling tg3_set_power_state() so that
14074 * we know whether or not to switch out of Vaux power.
14075 * When the flag is set, it means that GPIO1 is used for eeprom
14076 * write protect and also implies that it is a LOM where GPIOs
14077 * are not used to switch power.
14079 tg3_get_eeprom_hw_cfg(tp);
14081 if (tg3_flag(tp, ENABLE_APE)) {
14082 /* Allow reads and writes to the
14083 * APE register and memory space.
14085 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14086 PCISTATE_ALLOW_APE_SHMEM_WR |
14087 PCISTATE_ALLOW_APE_PSPACE_WR;
14088 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14089 pci_state_reg);
14091 tg3_ape_lock_init(tp);
14094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14095 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14096 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14098 tg3_flag(tp, 57765_PLUS))
14099 tg3_flag_set(tp, CPMU_PRESENT);
14101 /* Set up tp->grc_local_ctrl before calling
14102 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14103 * will bring 5700's external PHY out of reset.
14104 * It is also used as eeprom write protect on LOMs.
14106 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14108 tg3_flag(tp, EEPROM_WRITE_PROT))
14109 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14110 GRC_LCLCTRL_GPIO_OUTPUT1);
14111 /* Unused GPIO3 must be driven as output on 5752 because there
14112 * are no pull-up resistors on unused GPIO pins.
14114 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14115 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14117 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14118 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14119 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14120 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14122 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14123 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14124 /* Turn off the debug UART. */
14125 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14126 if (tg3_flag(tp, IS_NIC))
14127 /* Keep VMain power. */
14128 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14129 GRC_LCLCTRL_GPIO_OUTPUT0;
14132 /* Switch out of Vaux if it is a NIC */
14133 tg3_pwrsrc_switch_to_vmain(tp);
14135 /* Derive initial jumbo mode from MTU assigned in
14136 * ether_setup() via the alloc_etherdev() call
14138 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14139 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14141 /* Determine WakeOnLan speed to use. */
14142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14143 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14144 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14145 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14146 tg3_flag_clear(tp, WOL_SPEED_100MB);
14147 } else {
14148 tg3_flag_set(tp, WOL_SPEED_100MB);
14151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14152 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14154 /* A few boards don't want Ethernet@WireSpeed phy feature */
14155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14156 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14157 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14158 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14159 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14160 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14161 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14163 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14164 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14165 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14166 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14167 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14169 if (tg3_flag(tp, 5705_PLUS) &&
14170 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14171 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14172 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14173 !tg3_flag(tp, 57765_PLUS)) {
14174 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14177 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14178 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14179 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14180 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14181 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14182 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14183 } else
14184 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14187 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14188 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14189 tp->phy_otp = tg3_read_otp_phycfg(tp);
14190 if (tp->phy_otp == 0)
14191 tp->phy_otp = TG3_OTP_DEFAULT;
14194 if (tg3_flag(tp, CPMU_PRESENT))
14195 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14196 else
14197 tp->mi_mode = MAC_MI_MODE_BASE;
14199 tp->coalesce_mode = 0;
14200 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14201 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14202 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14204 /* Set these bits to enable statistics workaround. */
14205 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14206 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14207 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14208 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14209 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14212 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14213 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14214 tg3_flag_set(tp, USE_PHYLIB);
14216 err = tg3_mdio_init(tp);
14217 if (err)
14218 return err;
14220 /* Initialize data/descriptor byte/word swapping. */
14221 val = tr32(GRC_MODE);
14222 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14223 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14224 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14225 GRC_MODE_B2HRX_ENABLE |
14226 GRC_MODE_HTX2B_ENABLE |
14227 GRC_MODE_HOST_STACKUP);
14228 else
14229 val &= GRC_MODE_HOST_STACKUP;
14231 tw32(GRC_MODE, val | tp->grc_mode);
14233 tg3_switch_clocks(tp);
14235 /* Clear this out for sanity. */
14236 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14238 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14239 &pci_state_reg);
14240 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14241 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14242 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14244 if (chiprevid == CHIPREV_ID_5701_A0 ||
14245 chiprevid == CHIPREV_ID_5701_B0 ||
14246 chiprevid == CHIPREV_ID_5701_B2 ||
14247 chiprevid == CHIPREV_ID_5701_B5) {
14248 void __iomem *sram_base;
14250 /* Write some dummy words into the SRAM status block
14251 * area, see if it reads back correctly. If the return
14252 * value is bad, force enable the PCIX workaround.
14254 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14256 writel(0x00000000, sram_base);
14257 writel(0x00000000, sram_base + 4);
14258 writel(0xffffffff, sram_base + 4);
14259 if (readl(sram_base) != 0x00000000)
14260 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14264 udelay(50);
14265 tg3_nvram_init(tp);
14267 grc_misc_cfg = tr32(GRC_MISC_CFG);
14268 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14271 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14272 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14273 tg3_flag_set(tp, IS_5788);
14275 if (!tg3_flag(tp, IS_5788) &&
14276 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14277 tg3_flag_set(tp, TAGGED_STATUS);
14278 if (tg3_flag(tp, TAGGED_STATUS)) {
14279 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14280 HOSTCC_MODE_CLRTICK_TXBD);
14282 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14283 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14284 tp->misc_host_ctrl);
14287 /* Preserve the APE MAC_MODE bits */
14288 if (tg3_flag(tp, ENABLE_APE))
14289 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14290 else
14291 tp->mac_mode = TG3_DEF_MAC_MODE;
14293 /* these are limited to 10/100 only */
14294 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14295 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14296 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14297 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14298 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14299 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14300 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14301 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14302 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14303 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14304 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14305 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14306 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14307 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14308 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14309 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14311 err = tg3_phy_probe(tp);
14312 if (err) {
14313 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14314 /* ... but do not return immediately ... */
14315 tg3_mdio_fini(tp);
14318 tg3_read_vpd(tp);
14319 tg3_read_fw_ver(tp);
14321 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14322 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14323 } else {
14324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14325 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14326 else
14327 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14330 /* 5700 {AX,BX} chips have a broken status block link
14331 * change bit implementation, so we must use the
14332 * status register in those cases.
14334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14335 tg3_flag_set(tp, USE_LINKCHG_REG);
14336 else
14337 tg3_flag_clear(tp, USE_LINKCHG_REG);
14339 /* The led_ctrl is set during tg3_phy_probe, here we might
14340 * have to force the link status polling mechanism based
14341 * upon subsystem IDs.
14343 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14344 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14345 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14346 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14347 tg3_flag_set(tp, USE_LINKCHG_REG);
14350 /* For all SERDES we poll the MAC status register. */
14351 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14352 tg3_flag_set(tp, POLL_SERDES);
14353 else
14354 tg3_flag_clear(tp, POLL_SERDES);
14356 tp->rx_offset = NET_IP_ALIGN;
14357 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14359 tg3_flag(tp, PCIX_MODE)) {
14360 tp->rx_offset = 0;
14361 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14362 tp->rx_copy_thresh = ~(u16)0;
14363 #endif
14366 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14367 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14368 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14370 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14372 /* Increment the rx prod index on the rx std ring by at most
14373 * 8 for these chips to workaround hw errata.
14375 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14377 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14378 tp->rx_std_max_post = 8;
14380 if (tg3_flag(tp, ASPM_WORKAROUND))
14381 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14382 PCIE_PWR_MGMT_L1_THRESH_MSK;
14384 return err;
14387 #ifdef CONFIG_SPARC
14388 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14390 struct net_device *dev = tp->dev;
14391 struct pci_dev *pdev = tp->pdev;
14392 struct device_node *dp = pci_device_to_OF_node(pdev);
14393 const unsigned char *addr;
14394 int len;
14396 addr = of_get_property(dp, "local-mac-address", &len);
14397 if (addr && len == 6) {
14398 memcpy(dev->dev_addr, addr, 6);
14399 memcpy(dev->perm_addr, dev->dev_addr, 6);
14400 return 0;
14402 return -ENODEV;
14405 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14407 struct net_device *dev = tp->dev;
14409 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14410 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14411 return 0;
14413 #endif
14415 static int __devinit tg3_get_device_address(struct tg3 *tp)
14417 struct net_device *dev = tp->dev;
14418 u32 hi, lo, mac_offset;
14419 int addr_ok = 0;
14421 #ifdef CONFIG_SPARC
14422 if (!tg3_get_macaddr_sparc(tp))
14423 return 0;
14424 #endif
14426 mac_offset = 0x7c;
14427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14428 tg3_flag(tp, 5780_CLASS)) {
14429 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14430 mac_offset = 0xcc;
14431 if (tg3_nvram_lock(tp))
14432 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14433 else
14434 tg3_nvram_unlock(tp);
14435 } else if (tg3_flag(tp, 5717_PLUS)) {
14436 if (tp->pci_fn & 1)
14437 mac_offset = 0xcc;
14438 if (tp->pci_fn > 1)
14439 mac_offset += 0x18c;
14440 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14441 mac_offset = 0x10;
14443 /* First try to get it from MAC address mailbox. */
14444 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14445 if ((hi >> 16) == 0x484b) {
14446 dev->dev_addr[0] = (hi >> 8) & 0xff;
14447 dev->dev_addr[1] = (hi >> 0) & 0xff;
14449 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14450 dev->dev_addr[2] = (lo >> 24) & 0xff;
14451 dev->dev_addr[3] = (lo >> 16) & 0xff;
14452 dev->dev_addr[4] = (lo >> 8) & 0xff;
14453 dev->dev_addr[5] = (lo >> 0) & 0xff;
14455 /* Some old bootcode may report a 0 MAC address in SRAM */
14456 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14458 if (!addr_ok) {
14459 /* Next, try NVRAM. */
14460 if (!tg3_flag(tp, NO_NVRAM) &&
14461 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14462 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14463 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14464 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14466 /* Finally just fetch it out of the MAC control regs. */
14467 else {
14468 hi = tr32(MAC_ADDR_0_HIGH);
14469 lo = tr32(MAC_ADDR_0_LOW);
14471 dev->dev_addr[5] = lo & 0xff;
14472 dev->dev_addr[4] = (lo >> 8) & 0xff;
14473 dev->dev_addr[3] = (lo >> 16) & 0xff;
14474 dev->dev_addr[2] = (lo >> 24) & 0xff;
14475 dev->dev_addr[1] = hi & 0xff;
14476 dev->dev_addr[0] = (hi >> 8) & 0xff;
14480 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14481 #ifdef CONFIG_SPARC
14482 if (!tg3_get_default_macaddr_sparc(tp))
14483 return 0;
14484 #endif
14485 return -EINVAL;
14487 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14488 return 0;
14491 #define BOUNDARY_SINGLE_CACHELINE 1
14492 #define BOUNDARY_MULTI_CACHELINE 2
14494 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14496 int cacheline_size;
14497 u8 byte;
14498 int goal;
14500 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14501 if (byte == 0)
14502 cacheline_size = 1024;
14503 else
14504 cacheline_size = (int) byte * 4;
14506 /* On 5703 and later chips, the boundary bits have no
14507 * effect.
14509 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14510 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14511 !tg3_flag(tp, PCI_EXPRESS))
14512 goto out;
14514 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14515 goal = BOUNDARY_MULTI_CACHELINE;
14516 #else
14517 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14518 goal = BOUNDARY_SINGLE_CACHELINE;
14519 #else
14520 goal = 0;
14521 #endif
14522 #endif
14524 if (tg3_flag(tp, 57765_PLUS)) {
14525 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14526 goto out;
14529 if (!goal)
14530 goto out;
14532 /* PCI controllers on most RISC systems tend to disconnect
14533 * when a device tries to burst across a cache-line boundary.
14534 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14536 * Unfortunately, for PCI-E there are only limited
14537 * write-side controls for this, and thus for reads
14538 * we will still get the disconnects. We'll also waste
14539 * these PCI cycles for both read and write for chips
14540 * other than 5700 and 5701 which do not implement the
14541 * boundary bits.
14543 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14544 switch (cacheline_size) {
14545 case 16:
14546 case 32:
14547 case 64:
14548 case 128:
14549 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14550 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14551 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14552 } else {
14553 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14554 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14556 break;
14558 case 256:
14559 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14560 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14561 break;
14563 default:
14564 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14565 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14566 break;
14568 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14569 switch (cacheline_size) {
14570 case 16:
14571 case 32:
14572 case 64:
14573 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14574 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14575 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14576 break;
14578 /* fallthrough */
14579 case 128:
14580 default:
14581 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14582 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14583 break;
14585 } else {
14586 switch (cacheline_size) {
14587 case 16:
14588 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14589 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14590 DMA_RWCTRL_WRITE_BNDRY_16);
14591 break;
14593 /* fallthrough */
14594 case 32:
14595 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14596 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14597 DMA_RWCTRL_WRITE_BNDRY_32);
14598 break;
14600 /* fallthrough */
14601 case 64:
14602 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14603 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14604 DMA_RWCTRL_WRITE_BNDRY_64);
14605 break;
14607 /* fallthrough */
14608 case 128:
14609 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14610 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14611 DMA_RWCTRL_WRITE_BNDRY_128);
14612 break;
14614 /* fallthrough */
14615 case 256:
14616 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14617 DMA_RWCTRL_WRITE_BNDRY_256);
14618 break;
14619 case 512:
14620 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14621 DMA_RWCTRL_WRITE_BNDRY_512);
14622 break;
14623 case 1024:
14624 default:
14625 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14626 DMA_RWCTRL_WRITE_BNDRY_1024);
14627 break;
14631 out:
14632 return val;
14635 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14637 struct tg3_internal_buffer_desc test_desc;
14638 u32 sram_dma_descs;
14639 int i, ret;
14641 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14643 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14644 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14645 tw32(RDMAC_STATUS, 0);
14646 tw32(WDMAC_STATUS, 0);
14648 tw32(BUFMGR_MODE, 0);
14649 tw32(FTQ_RESET, 0);
14651 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14652 test_desc.addr_lo = buf_dma & 0xffffffff;
14653 test_desc.nic_mbuf = 0x00002100;
14654 test_desc.len = size;
14657 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14658 * the *second* time the tg3 driver was getting loaded after an
14659 * initial scan.
14661 * Broadcom tells me:
14662 * ...the DMA engine is connected to the GRC block and a DMA
14663 * reset may affect the GRC block in some unpredictable way...
14664 * The behavior of resets to individual blocks has not been tested.
14666 * Broadcom noted the GRC reset will also reset all sub-components.
14668 if (to_device) {
14669 test_desc.cqid_sqid = (13 << 8) | 2;
14671 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14672 udelay(40);
14673 } else {
14674 test_desc.cqid_sqid = (16 << 8) | 7;
14676 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14677 udelay(40);
14679 test_desc.flags = 0x00000005;
14681 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14682 u32 val;
14684 val = *(((u32 *)&test_desc) + i);
14685 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14686 sram_dma_descs + (i * sizeof(u32)));
14687 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14689 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14691 if (to_device)
14692 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14693 else
14694 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14696 ret = -ENODEV;
14697 for (i = 0; i < 40; i++) {
14698 u32 val;
14700 if (to_device)
14701 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14702 else
14703 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14704 if ((val & 0xffff) == sram_dma_descs) {
14705 ret = 0;
14706 break;
14709 udelay(100);
14712 return ret;
14715 #define TEST_BUFFER_SIZE 0x2000
14717 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14718 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14719 { },
14722 static int __devinit tg3_test_dma(struct tg3 *tp)
14724 dma_addr_t buf_dma;
14725 u32 *buf, saved_dma_rwctrl;
14726 int ret = 0;
14728 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14729 &buf_dma, GFP_KERNEL);
14730 if (!buf) {
14731 ret = -ENOMEM;
14732 goto out_nofree;
14735 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14736 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14738 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14740 if (tg3_flag(tp, 57765_PLUS))
14741 goto out;
14743 if (tg3_flag(tp, PCI_EXPRESS)) {
14744 /* DMA read watermark not used on PCIE */
14745 tp->dma_rwctrl |= 0x00180000;
14746 } else if (!tg3_flag(tp, PCIX_MODE)) {
14747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14749 tp->dma_rwctrl |= 0x003f0000;
14750 else
14751 tp->dma_rwctrl |= 0x003f000f;
14752 } else {
14753 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14754 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14755 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14756 u32 read_water = 0x7;
14758 /* If the 5704 is behind the EPB bridge, we can
14759 * do the less restrictive ONE_DMA workaround for
14760 * better performance.
14762 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14763 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14764 tp->dma_rwctrl |= 0x8000;
14765 else if (ccval == 0x6 || ccval == 0x7)
14766 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14768 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14769 read_water = 4;
14770 /* Set bit 23 to enable PCIX hw bug fix */
14771 tp->dma_rwctrl |=
14772 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14773 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14774 (1 << 23);
14775 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14776 /* 5780 always in PCIX mode */
14777 tp->dma_rwctrl |= 0x00144000;
14778 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14779 /* 5714 always in PCIX mode */
14780 tp->dma_rwctrl |= 0x00148000;
14781 } else {
14782 tp->dma_rwctrl |= 0x001b000f;
14786 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14787 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14788 tp->dma_rwctrl &= 0xfffffff0;
14790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14792 /* Remove this if it causes problems for some boards. */
14793 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14795 /* On 5700/5701 chips, we need to set this bit.
14796 * Otherwise the chip will issue cacheline transactions
14797 * to streamable DMA memory with not all the byte
14798 * enables turned on. This is an error on several
14799 * RISC PCI controllers, in particular sparc64.
14801 * On 5703/5704 chips, this bit has been reassigned
14802 * a different meaning. In particular, it is used
14803 * on those chips to enable a PCI-X workaround.
14805 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14808 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14810 #if 0
14811 /* Unneeded, already done by tg3_get_invariants. */
14812 tg3_switch_clocks(tp);
14813 #endif
14815 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14816 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14817 goto out;
14819 /* It is best to perform DMA test with maximum write burst size
14820 * to expose the 5700/5701 write DMA bug.
14822 saved_dma_rwctrl = tp->dma_rwctrl;
14823 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14824 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14826 while (1) {
14827 u32 *p = buf, i;
14829 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14830 p[i] = i;
14832 /* Send the buffer to the chip. */
14833 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14834 if (ret) {
14835 dev_err(&tp->pdev->dev,
14836 "%s: Buffer write failed. err = %d\n",
14837 __func__, ret);
14838 break;
14841 #if 0
14842 /* validate data reached card RAM correctly. */
14843 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14844 u32 val;
14845 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14846 if (le32_to_cpu(val) != p[i]) {
14847 dev_err(&tp->pdev->dev,
14848 "%s: Buffer corrupted on device! "
14849 "(%d != %d)\n", __func__, val, i);
14850 /* ret = -ENODEV here? */
14852 p[i] = 0;
14854 #endif
14855 /* Now read it back. */
14856 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14857 if (ret) {
14858 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14859 "err = %d\n", __func__, ret);
14860 break;
14863 /* Verify it. */
14864 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14865 if (p[i] == i)
14866 continue;
14868 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14869 DMA_RWCTRL_WRITE_BNDRY_16) {
14870 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14871 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14872 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14873 break;
14874 } else {
14875 dev_err(&tp->pdev->dev,
14876 "%s: Buffer corrupted on read back! "
14877 "(%d != %d)\n", __func__, p[i], i);
14878 ret = -ENODEV;
14879 goto out;
14883 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14884 /* Success. */
14885 ret = 0;
14886 break;
14889 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14890 DMA_RWCTRL_WRITE_BNDRY_16) {
14891 /* DMA test passed without adjusting DMA boundary,
14892 * now look for chipsets that are known to expose the
14893 * DMA bug without failing the test.
14895 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14896 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14897 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14898 } else {
14899 /* Safe to use the calculated DMA boundary. */
14900 tp->dma_rwctrl = saved_dma_rwctrl;
14903 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14906 out:
14907 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14908 out_nofree:
14909 return ret;
14912 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14914 if (tg3_flag(tp, 57765_PLUS)) {
14915 tp->bufmgr_config.mbuf_read_dma_low_water =
14916 DEFAULT_MB_RDMA_LOW_WATER_5705;
14917 tp->bufmgr_config.mbuf_mac_rx_low_water =
14918 DEFAULT_MB_MACRX_LOW_WATER_57765;
14919 tp->bufmgr_config.mbuf_high_water =
14920 DEFAULT_MB_HIGH_WATER_57765;
14922 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14923 DEFAULT_MB_RDMA_LOW_WATER_5705;
14924 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14925 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14926 tp->bufmgr_config.mbuf_high_water_jumbo =
14927 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14928 } else if (tg3_flag(tp, 5705_PLUS)) {
14929 tp->bufmgr_config.mbuf_read_dma_low_water =
14930 DEFAULT_MB_RDMA_LOW_WATER_5705;
14931 tp->bufmgr_config.mbuf_mac_rx_low_water =
14932 DEFAULT_MB_MACRX_LOW_WATER_5705;
14933 tp->bufmgr_config.mbuf_high_water =
14934 DEFAULT_MB_HIGH_WATER_5705;
14935 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14936 tp->bufmgr_config.mbuf_mac_rx_low_water =
14937 DEFAULT_MB_MACRX_LOW_WATER_5906;
14938 tp->bufmgr_config.mbuf_high_water =
14939 DEFAULT_MB_HIGH_WATER_5906;
14942 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14943 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14944 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14945 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14946 tp->bufmgr_config.mbuf_high_water_jumbo =
14947 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14948 } else {
14949 tp->bufmgr_config.mbuf_read_dma_low_water =
14950 DEFAULT_MB_RDMA_LOW_WATER;
14951 tp->bufmgr_config.mbuf_mac_rx_low_water =
14952 DEFAULT_MB_MACRX_LOW_WATER;
14953 tp->bufmgr_config.mbuf_high_water =
14954 DEFAULT_MB_HIGH_WATER;
14956 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14957 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14958 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14959 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14960 tp->bufmgr_config.mbuf_high_water_jumbo =
14961 DEFAULT_MB_HIGH_WATER_JUMBO;
14964 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14965 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14968 static char * __devinit tg3_phy_string(struct tg3 *tp)
14970 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14971 case TG3_PHY_ID_BCM5400: return "5400";
14972 case TG3_PHY_ID_BCM5401: return "5401";
14973 case TG3_PHY_ID_BCM5411: return "5411";
14974 case TG3_PHY_ID_BCM5701: return "5701";
14975 case TG3_PHY_ID_BCM5703: return "5703";
14976 case TG3_PHY_ID_BCM5704: return "5704";
14977 case TG3_PHY_ID_BCM5705: return "5705";
14978 case TG3_PHY_ID_BCM5750: return "5750";
14979 case TG3_PHY_ID_BCM5752: return "5752";
14980 case TG3_PHY_ID_BCM5714: return "5714";
14981 case TG3_PHY_ID_BCM5780: return "5780";
14982 case TG3_PHY_ID_BCM5755: return "5755";
14983 case TG3_PHY_ID_BCM5787: return "5787";
14984 case TG3_PHY_ID_BCM5784: return "5784";
14985 case TG3_PHY_ID_BCM5756: return "5722/5756";
14986 case TG3_PHY_ID_BCM5906: return "5906";
14987 case TG3_PHY_ID_BCM5761: return "5761";
14988 case TG3_PHY_ID_BCM5718C: return "5718C";
14989 case TG3_PHY_ID_BCM5718S: return "5718S";
14990 case TG3_PHY_ID_BCM57765: return "57765";
14991 case TG3_PHY_ID_BCM5719C: return "5719C";
14992 case TG3_PHY_ID_BCM5720C: return "5720C";
14993 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14994 case 0: return "serdes";
14995 default: return "unknown";
14999 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15001 if (tg3_flag(tp, PCI_EXPRESS)) {
15002 strcpy(str, "PCI Express");
15003 return str;
15004 } else if (tg3_flag(tp, PCIX_MODE)) {
15005 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15007 strcpy(str, "PCIX:");
15009 if ((clock_ctrl == 7) ||
15010 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15011 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15012 strcat(str, "133MHz");
15013 else if (clock_ctrl == 0)
15014 strcat(str, "33MHz");
15015 else if (clock_ctrl == 2)
15016 strcat(str, "50MHz");
15017 else if (clock_ctrl == 4)
15018 strcat(str, "66MHz");
15019 else if (clock_ctrl == 6)
15020 strcat(str, "100MHz");
15021 } else {
15022 strcpy(str, "PCI:");
15023 if (tg3_flag(tp, PCI_HIGH_SPEED))
15024 strcat(str, "66MHz");
15025 else
15026 strcat(str, "33MHz");
15028 if (tg3_flag(tp, PCI_32BIT))
15029 strcat(str, ":32-bit");
15030 else
15031 strcat(str, ":64-bit");
15032 return str;
15035 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15037 struct pci_dev *peer;
15038 unsigned int func, devnr = tp->pdev->devfn & ~7;
15040 for (func = 0; func < 8; func++) {
15041 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15042 if (peer && peer != tp->pdev)
15043 break;
15044 pci_dev_put(peer);
15046 /* 5704 can be configured in single-port mode, set peer to
15047 * tp->pdev in that case.
15049 if (!peer) {
15050 peer = tp->pdev;
15051 return peer;
15055 * We don't need to keep the refcount elevated; there's no way
15056 * to remove one half of this device without removing the other
15058 pci_dev_put(peer);
15060 return peer;
15063 static void __devinit tg3_init_coal(struct tg3 *tp)
15065 struct ethtool_coalesce *ec = &tp->coal;
15067 memset(ec, 0, sizeof(*ec));
15068 ec->cmd = ETHTOOL_GCOALESCE;
15069 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15070 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15071 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15072 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15073 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15074 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15075 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15076 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15077 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15079 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15080 HOSTCC_MODE_CLRTICK_TXBD)) {
15081 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15082 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15083 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15084 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15087 if (tg3_flag(tp, 5705_PLUS)) {
15088 ec->rx_coalesce_usecs_irq = 0;
15089 ec->tx_coalesce_usecs_irq = 0;
15090 ec->stats_block_coalesce_usecs = 0;
15094 static const struct net_device_ops tg3_netdev_ops = {
15095 .ndo_open = tg3_open,
15096 .ndo_stop = tg3_close,
15097 .ndo_start_xmit = tg3_start_xmit,
15098 .ndo_get_stats64 = tg3_get_stats64,
15099 .ndo_validate_addr = eth_validate_addr,
15100 .ndo_set_multicast_list = tg3_set_rx_mode,
15101 .ndo_set_mac_address = tg3_set_mac_addr,
15102 .ndo_do_ioctl = tg3_ioctl,
15103 .ndo_tx_timeout = tg3_tx_timeout,
15104 .ndo_change_mtu = tg3_change_mtu,
15105 .ndo_fix_features = tg3_fix_features,
15106 .ndo_set_features = tg3_set_features,
15107 #ifdef CONFIG_NET_POLL_CONTROLLER
15108 .ndo_poll_controller = tg3_poll_controller,
15109 #endif
15112 static int __devinit tg3_init_one(struct pci_dev *pdev,
15113 const struct pci_device_id *ent)
15115 struct net_device *dev;
15116 struct tg3 *tp;
15117 int i, err, pm_cap;
15118 u32 sndmbx, rcvmbx, intmbx;
15119 char str[40];
15120 u64 dma_mask, persist_dma_mask;
15121 u32 features = 0;
15123 printk_once(KERN_INFO "%s\n", version);
15125 err = pci_enable_device(pdev);
15126 if (err) {
15127 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15128 return err;
15131 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15132 if (err) {
15133 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15134 goto err_out_disable_pdev;
15137 pci_set_master(pdev);
15139 /* Find power-management capability. */
15140 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15141 if (pm_cap == 0) {
15142 dev_err(&pdev->dev,
15143 "Cannot find Power Management capability, aborting\n");
15144 err = -EIO;
15145 goto err_out_free_res;
15148 err = pci_set_power_state(pdev, PCI_D0);
15149 if (err) {
15150 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15151 goto err_out_free_res;
15154 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15155 if (!dev) {
15156 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15157 err = -ENOMEM;
15158 goto err_out_power_down;
15161 SET_NETDEV_DEV(dev, &pdev->dev);
15163 tp = netdev_priv(dev);
15164 tp->pdev = pdev;
15165 tp->dev = dev;
15166 tp->pm_cap = pm_cap;
15167 tp->rx_mode = TG3_DEF_RX_MODE;
15168 tp->tx_mode = TG3_DEF_TX_MODE;
15170 if (tg3_debug > 0)
15171 tp->msg_enable = tg3_debug;
15172 else
15173 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15175 /* The word/byte swap controls here control register access byte
15176 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15177 * setting below.
15179 tp->misc_host_ctrl =
15180 MISC_HOST_CTRL_MASK_PCI_INT |
15181 MISC_HOST_CTRL_WORD_SWAP |
15182 MISC_HOST_CTRL_INDIR_ACCESS |
15183 MISC_HOST_CTRL_PCISTATE_RW;
15185 /* The NONFRM (non-frame) byte/word swap controls take effect
15186 * on descriptor entries, anything which isn't packet data.
15188 * The StrongARM chips on the board (one for tx, one for rx)
15189 * are running in big-endian mode.
15191 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15192 GRC_MODE_WSWAP_NONFRM_DATA);
15193 #ifdef __BIG_ENDIAN
15194 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15195 #endif
15196 spin_lock_init(&tp->lock);
15197 spin_lock_init(&tp->indirect_lock);
15198 INIT_WORK(&tp->reset_task, tg3_reset_task);
15200 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15201 if (!tp->regs) {
15202 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15203 err = -ENOMEM;
15204 goto err_out_free_dev;
15207 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15208 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15209 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15210 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15211 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15212 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15213 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15214 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15215 tg3_flag_set(tp, ENABLE_APE);
15216 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15217 if (!tp->aperegs) {
15218 dev_err(&pdev->dev,
15219 "Cannot map APE registers, aborting\n");
15220 err = -ENOMEM;
15221 goto err_out_iounmap;
15225 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15226 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15228 dev->ethtool_ops = &tg3_ethtool_ops;
15229 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15230 dev->netdev_ops = &tg3_netdev_ops;
15231 dev->irq = pdev->irq;
15233 err = tg3_get_invariants(tp);
15234 if (err) {
15235 dev_err(&pdev->dev,
15236 "Problem fetching invariants of chip, aborting\n");
15237 goto err_out_apeunmap;
15240 /* The EPB bridge inside 5714, 5715, and 5780 and any
15241 * device behind the EPB cannot support DMA addresses > 40-bit.
15242 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15243 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15244 * do DMA address check in tg3_start_xmit().
15246 if (tg3_flag(tp, IS_5788))
15247 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15248 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15249 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15250 #ifdef CONFIG_HIGHMEM
15251 dma_mask = DMA_BIT_MASK(64);
15252 #endif
15253 } else
15254 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15256 /* Configure DMA attributes. */
15257 if (dma_mask > DMA_BIT_MASK(32)) {
15258 err = pci_set_dma_mask(pdev, dma_mask);
15259 if (!err) {
15260 features |= NETIF_F_HIGHDMA;
15261 err = pci_set_consistent_dma_mask(pdev,
15262 persist_dma_mask);
15263 if (err < 0) {
15264 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15265 "DMA for consistent allocations\n");
15266 goto err_out_apeunmap;
15270 if (err || dma_mask == DMA_BIT_MASK(32)) {
15271 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15272 if (err) {
15273 dev_err(&pdev->dev,
15274 "No usable DMA configuration, aborting\n");
15275 goto err_out_apeunmap;
15279 tg3_init_bufmgr_config(tp);
15281 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15283 /* 5700 B0 chips do not support checksumming correctly due
15284 * to hardware bugs.
15286 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15287 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15289 if (tg3_flag(tp, 5755_PLUS))
15290 features |= NETIF_F_IPV6_CSUM;
15293 /* TSO is on by default on chips that support hardware TSO.
15294 * Firmware TSO on older chips gives lower performance, so it
15295 * is off by default, but can be enabled using ethtool.
15297 if ((tg3_flag(tp, HW_TSO_1) ||
15298 tg3_flag(tp, HW_TSO_2) ||
15299 tg3_flag(tp, HW_TSO_3)) &&
15300 (features & NETIF_F_IP_CSUM))
15301 features |= NETIF_F_TSO;
15302 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15303 if (features & NETIF_F_IPV6_CSUM)
15304 features |= NETIF_F_TSO6;
15305 if (tg3_flag(tp, HW_TSO_3) ||
15306 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15307 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15308 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15309 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15311 features |= NETIF_F_TSO_ECN;
15314 dev->features |= features;
15315 dev->vlan_features |= features;
15318 * Add loopback capability only for a subset of devices that support
15319 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15320 * loopback for the remaining devices.
15322 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15323 !tg3_flag(tp, CPMU_PRESENT))
15324 /* Add the loopback capability */
15325 features |= NETIF_F_LOOPBACK;
15327 dev->hw_features |= features;
15329 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15330 !tg3_flag(tp, TSO_CAPABLE) &&
15331 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15332 tg3_flag_set(tp, MAX_RXPEND_64);
15333 tp->rx_pending = 63;
15336 err = tg3_get_device_address(tp);
15337 if (err) {
15338 dev_err(&pdev->dev,
15339 "Could not obtain valid ethernet address, aborting\n");
15340 goto err_out_apeunmap;
15344 * Reset chip in case UNDI or EFI driver did not shutdown
15345 * DMA self test will enable WDMAC and we'll see (spurious)
15346 * pending DMA on the PCI bus at that point.
15348 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15349 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15350 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15351 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15354 err = tg3_test_dma(tp);
15355 if (err) {
15356 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15357 goto err_out_apeunmap;
15360 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15361 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15362 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15363 for (i = 0; i < tp->irq_max; i++) {
15364 struct tg3_napi *tnapi = &tp->napi[i];
15366 tnapi->tp = tp;
15367 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15369 tnapi->int_mbox = intmbx;
15370 if (i < 4)
15371 intmbx += 0x8;
15372 else
15373 intmbx += 0x4;
15375 tnapi->consmbox = rcvmbx;
15376 tnapi->prodmbox = sndmbx;
15378 if (i)
15379 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15380 else
15381 tnapi->coal_now = HOSTCC_MODE_NOW;
15383 if (!tg3_flag(tp, SUPPORT_MSIX))
15384 break;
15387 * If we support MSIX, we'll be using RSS. If we're using
15388 * RSS, the first vector only handles link interrupts and the
15389 * remaining vectors handle rx and tx interrupts. Reuse the
15390 * mailbox values for the next iteration. The values we setup
15391 * above are still useful for the single vectored mode.
15393 if (!i)
15394 continue;
15396 rcvmbx += 0x8;
15398 if (sndmbx & 0x4)
15399 sndmbx -= 0x4;
15400 else
15401 sndmbx += 0xc;
15404 tg3_init_coal(tp);
15406 pci_set_drvdata(pdev, dev);
15408 if (tg3_flag(tp, 5717_PLUS)) {
15409 /* Resume a low-power mode */
15410 tg3_frob_aux_power(tp, false);
15413 err = register_netdev(dev);
15414 if (err) {
15415 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15416 goto err_out_apeunmap;
15419 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15420 tp->board_part_number,
15421 tp->pci_chip_rev_id,
15422 tg3_bus_string(tp, str),
15423 dev->dev_addr);
15425 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15426 struct phy_device *phydev;
15427 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15428 netdev_info(dev,
15429 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15430 phydev->drv->name, dev_name(&phydev->dev));
15431 } else {
15432 char *ethtype;
15434 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15435 ethtype = "10/100Base-TX";
15436 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15437 ethtype = "1000Base-SX";
15438 else
15439 ethtype = "10/100/1000Base-T";
15441 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15442 "(WireSpeed[%d], EEE[%d])\n",
15443 tg3_phy_string(tp), ethtype,
15444 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15445 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15448 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15449 (dev->features & NETIF_F_RXCSUM) != 0,
15450 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15451 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15452 tg3_flag(tp, ENABLE_ASF) != 0,
15453 tg3_flag(tp, TSO_CAPABLE) != 0);
15454 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15455 tp->dma_rwctrl,
15456 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15457 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15459 pci_save_state(pdev);
15461 return 0;
15463 err_out_apeunmap:
15464 if (tp->aperegs) {
15465 iounmap(tp->aperegs);
15466 tp->aperegs = NULL;
15469 err_out_iounmap:
15470 if (tp->regs) {
15471 iounmap(tp->regs);
15472 tp->regs = NULL;
15475 err_out_free_dev:
15476 free_netdev(dev);
15478 err_out_power_down:
15479 pci_set_power_state(pdev, PCI_D3hot);
15481 err_out_free_res:
15482 pci_release_regions(pdev);
15484 err_out_disable_pdev:
15485 pci_disable_device(pdev);
15486 pci_set_drvdata(pdev, NULL);
15487 return err;
15490 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15492 struct net_device *dev = pci_get_drvdata(pdev);
15494 if (dev) {
15495 struct tg3 *tp = netdev_priv(dev);
15497 if (tp->fw)
15498 release_firmware(tp->fw);
15500 cancel_work_sync(&tp->reset_task);
15502 if (!tg3_flag(tp, USE_PHYLIB)) {
15503 tg3_phy_fini(tp);
15504 tg3_mdio_fini(tp);
15507 unregister_netdev(dev);
15508 if (tp->aperegs) {
15509 iounmap(tp->aperegs);
15510 tp->aperegs = NULL;
15512 if (tp->regs) {
15513 iounmap(tp->regs);
15514 tp->regs = NULL;
15516 free_netdev(dev);
15517 pci_release_regions(pdev);
15518 pci_disable_device(pdev);
15519 pci_set_drvdata(pdev, NULL);
15523 #ifdef CONFIG_PM_SLEEP
15524 static int tg3_suspend(struct device *device)
15526 struct pci_dev *pdev = to_pci_dev(device);
15527 struct net_device *dev = pci_get_drvdata(pdev);
15528 struct tg3 *tp = netdev_priv(dev);
15529 int err;
15531 if (!netif_running(dev))
15532 return 0;
15534 flush_work_sync(&tp->reset_task);
15535 tg3_phy_stop(tp);
15536 tg3_netif_stop(tp);
15538 del_timer_sync(&tp->timer);
15540 tg3_full_lock(tp, 1);
15541 tg3_disable_ints(tp);
15542 tg3_full_unlock(tp);
15544 netif_device_detach(dev);
15546 tg3_full_lock(tp, 0);
15547 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15548 tg3_flag_clear(tp, INIT_COMPLETE);
15549 tg3_full_unlock(tp);
15551 err = tg3_power_down_prepare(tp);
15552 if (err) {
15553 int err2;
15555 tg3_full_lock(tp, 0);
15557 tg3_flag_set(tp, INIT_COMPLETE);
15558 err2 = tg3_restart_hw(tp, 1);
15559 if (err2)
15560 goto out;
15562 tp->timer.expires = jiffies + tp->timer_offset;
15563 add_timer(&tp->timer);
15565 netif_device_attach(dev);
15566 tg3_netif_start(tp);
15568 out:
15569 tg3_full_unlock(tp);
15571 if (!err2)
15572 tg3_phy_start(tp);
15575 return err;
15578 static int tg3_resume(struct device *device)
15580 struct pci_dev *pdev = to_pci_dev(device);
15581 struct net_device *dev = pci_get_drvdata(pdev);
15582 struct tg3 *tp = netdev_priv(dev);
15583 int err;
15585 if (!netif_running(dev))
15586 return 0;
15588 netif_device_attach(dev);
15590 tg3_full_lock(tp, 0);
15592 tg3_flag_set(tp, INIT_COMPLETE);
15593 err = tg3_restart_hw(tp, 1);
15594 if (err)
15595 goto out;
15597 tp->timer.expires = jiffies + tp->timer_offset;
15598 add_timer(&tp->timer);
15600 tg3_netif_start(tp);
15602 out:
15603 tg3_full_unlock(tp);
15605 if (!err)
15606 tg3_phy_start(tp);
15608 return err;
15611 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15612 #define TG3_PM_OPS (&tg3_pm_ops)
15614 #else
15616 #define TG3_PM_OPS NULL
15618 #endif /* CONFIG_PM_SLEEP */
15621 * tg3_io_error_detected - called when PCI error is detected
15622 * @pdev: Pointer to PCI device
15623 * @state: The current pci connection state
15625 * This function is called after a PCI bus error affecting
15626 * this device has been detected.
15628 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15629 pci_channel_state_t state)
15631 struct net_device *netdev = pci_get_drvdata(pdev);
15632 struct tg3 *tp = netdev_priv(netdev);
15633 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15635 netdev_info(netdev, "PCI I/O error detected\n");
15637 rtnl_lock();
15639 if (!netif_running(netdev))
15640 goto done;
15642 tg3_phy_stop(tp);
15644 tg3_netif_stop(tp);
15646 del_timer_sync(&tp->timer);
15647 tg3_flag_clear(tp, RESTART_TIMER);
15649 /* Want to make sure that the reset task doesn't run */
15650 cancel_work_sync(&tp->reset_task);
15651 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15652 tg3_flag_clear(tp, RESTART_TIMER);
15654 netif_device_detach(netdev);
15656 /* Clean up software state, even if MMIO is blocked */
15657 tg3_full_lock(tp, 0);
15658 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15659 tg3_full_unlock(tp);
15661 done:
15662 if (state == pci_channel_io_perm_failure)
15663 err = PCI_ERS_RESULT_DISCONNECT;
15664 else
15665 pci_disable_device(pdev);
15667 rtnl_unlock();
15669 return err;
15673 * tg3_io_slot_reset - called after the pci bus has been reset.
15674 * @pdev: Pointer to PCI device
15676 * Restart the card from scratch, as if from a cold-boot.
15677 * At this point, the card has exprienced a hard reset,
15678 * followed by fixups by BIOS, and has its config space
15679 * set up identically to what it was at cold boot.
15681 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15683 struct net_device *netdev = pci_get_drvdata(pdev);
15684 struct tg3 *tp = netdev_priv(netdev);
15685 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15686 int err;
15688 rtnl_lock();
15690 if (pci_enable_device(pdev)) {
15691 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15692 goto done;
15695 pci_set_master(pdev);
15696 pci_restore_state(pdev);
15697 pci_save_state(pdev);
15699 if (!netif_running(netdev)) {
15700 rc = PCI_ERS_RESULT_RECOVERED;
15701 goto done;
15704 err = tg3_power_up(tp);
15705 if (err)
15706 goto done;
15708 rc = PCI_ERS_RESULT_RECOVERED;
15710 done:
15711 rtnl_unlock();
15713 return rc;
15717 * tg3_io_resume - called when traffic can start flowing again.
15718 * @pdev: Pointer to PCI device
15720 * This callback is called when the error recovery driver tells
15721 * us that its OK to resume normal operation.
15723 static void tg3_io_resume(struct pci_dev *pdev)
15725 struct net_device *netdev = pci_get_drvdata(pdev);
15726 struct tg3 *tp = netdev_priv(netdev);
15727 int err;
15729 rtnl_lock();
15731 if (!netif_running(netdev))
15732 goto done;
15734 tg3_full_lock(tp, 0);
15735 tg3_flag_set(tp, INIT_COMPLETE);
15736 err = tg3_restart_hw(tp, 1);
15737 tg3_full_unlock(tp);
15738 if (err) {
15739 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15740 goto done;
15743 netif_device_attach(netdev);
15745 tp->timer.expires = jiffies + tp->timer_offset;
15746 add_timer(&tp->timer);
15748 tg3_netif_start(tp);
15750 tg3_phy_start(tp);
15752 done:
15753 rtnl_unlock();
15756 static struct pci_error_handlers tg3_err_handler = {
15757 .error_detected = tg3_io_error_detected,
15758 .slot_reset = tg3_io_slot_reset,
15759 .resume = tg3_io_resume
15762 static struct pci_driver tg3_driver = {
15763 .name = DRV_MODULE_NAME,
15764 .id_table = tg3_pci_tbl,
15765 .probe = tg3_init_one,
15766 .remove = __devexit_p(tg3_remove_one),
15767 .err_handler = &tg3_err_handler,
15768 .driver.pm = TG3_PM_OPS,
15771 static int __init tg3_init(void)
15773 return pci_register_driver(&tg3_driver);
15776 static void __exit tg3_cleanup(void)
15778 pci_unregister_driver(&tg3_driver);
15781 module_init(tg3_init);
15782 module_exit(tg3_cleanup);