tg3: Check transitions to D0 power state
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / tg3.c
blob5d4283e7be7a23e73ab35859f6332d538f0903d2
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
61 #define BAR_0 0
62 #define BAR_2 2
64 #include "tg3.h"
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 set_bit(flag, bits);
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
101 (NETIF_MSG_DRV | \
102 NETIF_MSG_PROBE | \
103 NETIF_MSG_LINK | \
104 NETIF_MSG_TIMER | \
105 NETIF_MSG_IFDOWN | \
106 NETIF_MSG_IFUP | \
107 NETIF_MSG_RX_ERR | \
108 NETIF_MSG_TX_ERR)
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
112 /* length of time before we decide the hardware is borked,
113 * and dev->tx_timeout() should be called to fix the problem
116 #define TG3_TX_TIMEOUT (5 * HZ)
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU 60
120 #define TG3_MAX_MTU(tp) \
121 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124 * You can't change the ring sizes, but you can change where you place
125 * them in the NIC onboard memory.
127 #define TG3_RX_STD_RING_SIZE(tp) \
128 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING 200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
135 #define TG3_RSS_INDIR_TBL_SIZE 128
137 /* Do not place this n-ring entries value into the tp struct itself,
138 * we really want to expose these constants to GCC so that modulo et
139 * al. operations are done with shifts and masks instead of with
140 * hw multiply/modulo instructions. Another solution would be to
141 * replace things like '% foo' with '& (foo - 1)'.
144 #define TG3_TX_RING_SIZE 512
145 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
147 #define TG3_RX_STD_RING_BYTES(tp) \
148 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
154 TG3_TX_RING_SIZE)
155 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157 #define TG3_DMA_BYTE_ENAB 64
159 #define TG3_RX_STD_DMA_SZ 1536
160 #define TG3_RX_JMB_DMA_SZ 9046
162 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
164 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174 * that are at least dword aligned when used in PCIX mode. The driver
175 * works around this bug by double copying the packet. This workaround
176 * is built into the normal double copy length check for efficiency.
178 * However, the double copy is only necessary on those architectures
179 * where unaligned memory accesses are inefficient. For those architectures
180 * where unaligned memory accesses incur little penalty, we can reintegrate
181 * the 5701 in the normal rx path. Doing so saves a device structure
182 * dereference by hardcoding the double copy threshold in place.
184 #define TG3_RX_COPY_THRESHOLD 256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
187 #else
188 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189 #endif
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
194 #define TG3_RAW_IP_ALIGN 2
196 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
198 #define FIRMWARE_TG3 "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
202 static char version[] __devinitdata =
203 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
213 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
304 static const struct {
305 const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307 { "rx_octets" },
308 { "rx_fragments" },
309 { "rx_ucast_packets" },
310 { "rx_mcast_packets" },
311 { "rx_bcast_packets" },
312 { "rx_fcs_errors" },
313 { "rx_align_errors" },
314 { "rx_xon_pause_rcvd" },
315 { "rx_xoff_pause_rcvd" },
316 { "rx_mac_ctrl_rcvd" },
317 { "rx_xoff_entered" },
318 { "rx_frame_too_long_errors" },
319 { "rx_jabbers" },
320 { "rx_undersize_packets" },
321 { "rx_in_length_errors" },
322 { "rx_out_length_errors" },
323 { "rx_64_or_less_octet_packets" },
324 { "rx_65_to_127_octet_packets" },
325 { "rx_128_to_255_octet_packets" },
326 { "rx_256_to_511_octet_packets" },
327 { "rx_512_to_1023_octet_packets" },
328 { "rx_1024_to_1522_octet_packets" },
329 { "rx_1523_to_2047_octet_packets" },
330 { "rx_2048_to_4095_octet_packets" },
331 { "rx_4096_to_8191_octet_packets" },
332 { "rx_8192_to_9022_octet_packets" },
334 { "tx_octets" },
335 { "tx_collisions" },
337 { "tx_xon_sent" },
338 { "tx_xoff_sent" },
339 { "tx_flow_control" },
340 { "tx_mac_errors" },
341 { "tx_single_collisions" },
342 { "tx_mult_collisions" },
343 { "tx_deferred" },
344 { "tx_excessive_collisions" },
345 { "tx_late_collisions" },
346 { "tx_collide_2times" },
347 { "tx_collide_3times" },
348 { "tx_collide_4times" },
349 { "tx_collide_5times" },
350 { "tx_collide_6times" },
351 { "tx_collide_7times" },
352 { "tx_collide_8times" },
353 { "tx_collide_9times" },
354 { "tx_collide_10times" },
355 { "tx_collide_11times" },
356 { "tx_collide_12times" },
357 { "tx_collide_13times" },
358 { "tx_collide_14times" },
359 { "tx_collide_15times" },
360 { "tx_ucast_packets" },
361 { "tx_mcast_packets" },
362 { "tx_bcast_packets" },
363 { "tx_carrier_sense_errors" },
364 { "tx_discards" },
365 { "tx_errors" },
367 { "dma_writeq_full" },
368 { "dma_write_prioq_full" },
369 { "rxbds_empty" },
370 { "rx_discards" },
371 { "rx_errors" },
372 { "rx_threshold_hit" },
374 { "dma_readq_full" },
375 { "dma_read_prioq_full" },
376 { "tx_comp_queue_full" },
378 { "ring_set_send_prod_index" },
379 { "ring_status_update" },
380 { "nic_irqs" },
381 { "nic_avoided_irqs" },
382 { "nic_tx_threshold_hit" },
384 { "mbuf_lwm_thresh_hit" },
387 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
390 static const struct {
391 const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393 { "nvram test (online) " },
394 { "link test (online) " },
395 { "register test (offline)" },
396 { "memory test (offline)" },
397 { "loopback test (offline)" },
398 { "interrupt test (offline)" },
401 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
406 writel(val, tp->regs + off);
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
411 return readl(tp->regs + off);
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
416 writel(val, tp->aperegs + off);
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
421 return readl(tp->aperegs + off);
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
426 unsigned long flags;
428 spin_lock_irqsave(&tp->indirect_lock, flags);
429 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431 spin_unlock_irqrestore(&tp->indirect_lock, flags);
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
436 writel(val, tp->regs + off);
437 readl(tp->regs + off);
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
442 unsigned long flags;
443 u32 val;
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 return val;
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
454 unsigned long flags;
456 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
461 if (off == TG3_RX_STD_PROD_IDX_REG) {
462 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463 TG3_64BIT_REG_LOW, val);
464 return;
467 spin_lock_irqsave(&tp->indirect_lock, flags);
468 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470 spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 /* In indirect mode when disabling interrupts, we also need
473 * to clear the interrupt bit in the GRC local ctrl register.
475 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476 (val == 0x1)) {
477 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
484 unsigned long flags;
485 u32 val;
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 return val;
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495 * where it is unsafe to read back the register without some delay.
496 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
501 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502 /* Non-posted methods */
503 tp->write32(tp, off, val);
504 else {
505 /* Posted method */
506 tg3_write32(tp, off, val);
507 if (usec_wait)
508 udelay(usec_wait);
509 tp->read32(tp, off);
511 /* Wait again after the read for the posted method to guarantee that
512 * the wait time is met.
514 if (usec_wait)
515 udelay(usec_wait);
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
520 tp->write32_mbox(tp, off, val);
521 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522 tp->read32_mbox(tp, off);
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
527 void __iomem *mbox = tp->regs + off;
528 writel(val, mbox);
529 if (tg3_flag(tp, TXD_MBOX_HWBUG))
530 writel(val, mbox);
531 if (tg3_flag(tp, MBOX_WRITE_REORDER))
532 readl(mbox);
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
537 return readl(tp->regs + off + GRCMBOX_BASE);
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
542 writel(val, tp->regs + off + GRCMBOX_BASE);
545 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
551 #define tw32(reg, val) tp->write32(tp, reg, val)
552 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg) tp->read32(tp, reg)
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
558 unsigned long flags;
560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562 return;
564 spin_lock_irqsave(&tp->indirect_lock, flags);
565 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
569 /* Always leave this as zero. */
570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571 } else {
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573 tw32_f(TG3PCI_MEM_WIN_DATA, val);
575 /* Always leave this as zero. */
576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
578 spin_unlock_irqrestore(&tp->indirect_lock, flags);
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
583 unsigned long flags;
585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587 *val = 0;
588 return;
591 spin_lock_irqsave(&tp->indirect_lock, flags);
592 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
596 /* Always leave this as zero. */
597 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598 } else {
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600 *val = tr32(TG3PCI_MEM_WIN_DATA);
602 /* Always leave this as zero. */
603 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
605 spin_unlock_irqrestore(&tp->indirect_lock, flags);
608 static void tg3_ape_lock_init(struct tg3 *tp)
610 int i;
611 u32 regbase;
613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614 regbase = TG3_APE_LOCK_GRANT;
615 else
616 regbase = TG3_APE_PER_LOCK_GRANT;
618 /* Make sure the driver hasn't any stale locks. */
619 for (i = 0; i < 8; i++)
620 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
623 static int tg3_ape_lock(struct tg3 *tp, int locknum)
625 int i, off;
626 int ret = 0;
627 u32 status, req, gnt;
629 if (!tg3_flag(tp, ENABLE_APE))
630 return 0;
632 switch (locknum) {
633 case TG3_APE_LOCK_GRC:
634 case TG3_APE_LOCK_MEM:
635 break;
636 default:
637 return -EINVAL;
640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
641 req = TG3_APE_LOCK_REQ;
642 gnt = TG3_APE_LOCK_GRANT;
643 } else {
644 req = TG3_APE_PER_LOCK_REQ;
645 gnt = TG3_APE_PER_LOCK_GRANT;
648 off = 4 * locknum;
650 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
652 /* Wait for up to 1 millisecond to acquire lock. */
653 for (i = 0; i < 100; i++) {
654 status = tg3_ape_read32(tp, gnt + off);
655 if (status == APE_LOCK_GRANT_DRIVER)
656 break;
657 udelay(10);
660 if (status != APE_LOCK_GRANT_DRIVER) {
661 /* Revoke the lock request. */
662 tg3_ape_write32(tp, gnt + off,
663 APE_LOCK_GRANT_DRIVER);
665 ret = -EBUSY;
668 return ret;
671 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
673 u32 gnt;
675 if (!tg3_flag(tp, ENABLE_APE))
676 return;
678 switch (locknum) {
679 case TG3_APE_LOCK_GRC:
680 case TG3_APE_LOCK_MEM:
681 break;
682 default:
683 return;
686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
687 gnt = TG3_APE_LOCK_GRANT;
688 else
689 gnt = TG3_APE_PER_LOCK_GRANT;
691 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
694 static void tg3_disable_ints(struct tg3 *tp)
696 int i;
698 tw32(TG3PCI_MISC_HOST_CTRL,
699 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
700 for (i = 0; i < tp->irq_max; i++)
701 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
704 static void tg3_enable_ints(struct tg3 *tp)
706 int i;
708 tp->irq_sync = 0;
709 wmb();
711 tw32(TG3PCI_MISC_HOST_CTRL,
712 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
714 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
715 for (i = 0; i < tp->irq_cnt; i++) {
716 struct tg3_napi *tnapi = &tp->napi[i];
718 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719 if (tg3_flag(tp, 1SHOT_MSI))
720 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
722 tp->coal_now |= tnapi->coal_now;
725 /* Force an initial interrupt */
726 if (!tg3_flag(tp, TAGGED_STATUS) &&
727 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
728 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
729 else
730 tw32(HOSTCC_MODE, tp->coal_now);
732 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
735 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
737 struct tg3 *tp = tnapi->tp;
738 struct tg3_hw_status *sblk = tnapi->hw_status;
739 unsigned int work_exists = 0;
741 /* check for phy events */
742 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
743 if (sblk->status & SD_STATUS_LINK_CHG)
744 work_exists = 1;
746 /* check for RX/TX work to do */
747 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
748 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
749 work_exists = 1;
751 return work_exists;
754 /* tg3_int_reenable
755 * similar to tg3_enable_ints, but it accurately determines whether there
756 * is new work pending and can return without flushing the PIO write
757 * which reenables interrupts
759 static void tg3_int_reenable(struct tg3_napi *tnapi)
761 struct tg3 *tp = tnapi->tp;
763 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
764 mmiowb();
766 /* When doing tagged status, this work check is unnecessary.
767 * The last_tag we write above tells the chip which piece of
768 * work we've completed.
770 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
771 tw32(HOSTCC_MODE, tp->coalesce_mode |
772 HOSTCC_MODE_ENABLE | tnapi->coal_now);
775 static void tg3_switch_clocks(struct tg3 *tp)
777 u32 clock_ctrl;
778 u32 orig_clock_ctrl;
780 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
781 return;
783 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
785 orig_clock_ctrl = clock_ctrl;
786 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
787 CLOCK_CTRL_CLKRUN_OENABLE |
788 0x1f);
789 tp->pci_clock_ctrl = clock_ctrl;
791 if (tg3_flag(tp, 5705_PLUS)) {
792 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
793 tw32_wait_f(TG3PCI_CLOCK_CTRL,
794 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
796 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
797 tw32_wait_f(TG3PCI_CLOCK_CTRL,
798 clock_ctrl |
799 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
800 40);
801 tw32_wait_f(TG3PCI_CLOCK_CTRL,
802 clock_ctrl | (CLOCK_CTRL_ALTCLK),
803 40);
805 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
808 #define PHY_BUSY_LOOPS 5000
810 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
812 u32 frame_val;
813 unsigned int loops;
814 int ret;
816 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
817 tw32_f(MAC_MI_MODE,
818 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
819 udelay(80);
822 *val = 0x0;
824 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
825 MI_COM_PHY_ADDR_MASK);
826 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
827 MI_COM_REG_ADDR_MASK);
828 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
830 tw32_f(MAC_MI_COM, frame_val);
832 loops = PHY_BUSY_LOOPS;
833 while (loops != 0) {
834 udelay(10);
835 frame_val = tr32(MAC_MI_COM);
837 if ((frame_val & MI_COM_BUSY) == 0) {
838 udelay(5);
839 frame_val = tr32(MAC_MI_COM);
840 break;
842 loops -= 1;
845 ret = -EBUSY;
846 if (loops != 0) {
847 *val = frame_val & MI_COM_DATA_MASK;
848 ret = 0;
851 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
852 tw32_f(MAC_MI_MODE, tp->mi_mode);
853 udelay(80);
856 return ret;
859 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
861 u32 frame_val;
862 unsigned int loops;
863 int ret;
865 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
866 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
867 return 0;
869 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
870 tw32_f(MAC_MI_MODE,
871 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
872 udelay(80);
875 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
876 MI_COM_PHY_ADDR_MASK);
877 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
878 MI_COM_REG_ADDR_MASK);
879 frame_val |= (val & MI_COM_DATA_MASK);
880 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
882 tw32_f(MAC_MI_COM, frame_val);
884 loops = PHY_BUSY_LOOPS;
885 while (loops != 0) {
886 udelay(10);
887 frame_val = tr32(MAC_MI_COM);
888 if ((frame_val & MI_COM_BUSY) == 0) {
889 udelay(5);
890 frame_val = tr32(MAC_MI_COM);
891 break;
893 loops -= 1;
896 ret = -EBUSY;
897 if (loops != 0)
898 ret = 0;
900 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
901 tw32_f(MAC_MI_MODE, tp->mi_mode);
902 udelay(80);
905 return ret;
908 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
910 int err;
912 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
913 if (err)
914 goto done;
916 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
917 if (err)
918 goto done;
920 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
921 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
922 if (err)
923 goto done;
925 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
927 done:
928 return err;
931 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
933 int err;
935 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
936 if (err)
937 goto done;
939 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
940 if (err)
941 goto done;
943 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
944 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
945 if (err)
946 goto done;
948 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
950 done:
951 return err;
954 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
956 int err;
958 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
959 if (!err)
960 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
962 return err;
965 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
967 int err;
969 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
970 if (!err)
971 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
973 return err;
976 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
978 int err;
980 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
981 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
982 MII_TG3_AUXCTL_SHDWSEL_MISC);
983 if (!err)
984 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
986 return err;
989 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
991 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
992 set |= MII_TG3_AUXCTL_MISC_WREN;
994 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
997 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
998 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1000 MII_TG3_AUXCTL_ACTL_TX_6DB)
1002 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1003 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1004 MII_TG3_AUXCTL_ACTL_TX_6DB);
1006 static int tg3_bmcr_reset(struct tg3 *tp)
1008 u32 phy_control;
1009 int limit, err;
1011 /* OK, reset it, and poll the BMCR_RESET bit until it
1012 * clears or we time out.
1014 phy_control = BMCR_RESET;
1015 err = tg3_writephy(tp, MII_BMCR, phy_control);
1016 if (err != 0)
1017 return -EBUSY;
1019 limit = 5000;
1020 while (limit--) {
1021 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1022 if (err != 0)
1023 return -EBUSY;
1025 if ((phy_control & BMCR_RESET) == 0) {
1026 udelay(40);
1027 break;
1029 udelay(10);
1031 if (limit < 0)
1032 return -EBUSY;
1034 return 0;
1037 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1039 struct tg3 *tp = bp->priv;
1040 u32 val;
1042 spin_lock_bh(&tp->lock);
1044 if (tg3_readphy(tp, reg, &val))
1045 val = -EIO;
1047 spin_unlock_bh(&tp->lock);
1049 return val;
1052 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1054 struct tg3 *tp = bp->priv;
1055 u32 ret = 0;
1057 spin_lock_bh(&tp->lock);
1059 if (tg3_writephy(tp, reg, val))
1060 ret = -EIO;
1062 spin_unlock_bh(&tp->lock);
1064 return ret;
1067 static int tg3_mdio_reset(struct mii_bus *bp)
1069 return 0;
1072 static void tg3_mdio_config_5785(struct tg3 *tp)
1074 u32 val;
1075 struct phy_device *phydev;
1077 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1078 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1079 case PHY_ID_BCM50610:
1080 case PHY_ID_BCM50610M:
1081 val = MAC_PHYCFG2_50610_LED_MODES;
1082 break;
1083 case PHY_ID_BCMAC131:
1084 val = MAC_PHYCFG2_AC131_LED_MODES;
1085 break;
1086 case PHY_ID_RTL8211C:
1087 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1088 break;
1089 case PHY_ID_RTL8201E:
1090 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1091 break;
1092 default:
1093 return;
1096 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1097 tw32(MAC_PHYCFG2, val);
1099 val = tr32(MAC_PHYCFG1);
1100 val &= ~(MAC_PHYCFG1_RGMII_INT |
1101 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1102 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1103 tw32(MAC_PHYCFG1, val);
1105 return;
1108 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1109 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1110 MAC_PHYCFG2_FMODE_MASK_MASK |
1111 MAC_PHYCFG2_GMODE_MASK_MASK |
1112 MAC_PHYCFG2_ACT_MASK_MASK |
1113 MAC_PHYCFG2_QUAL_MASK_MASK |
1114 MAC_PHYCFG2_INBAND_ENABLE;
1116 tw32(MAC_PHYCFG2, val);
1118 val = tr32(MAC_PHYCFG1);
1119 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1120 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1121 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1122 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1123 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1124 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1125 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1127 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1128 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1129 tw32(MAC_PHYCFG1, val);
1131 val = tr32(MAC_EXT_RGMII_MODE);
1132 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1133 MAC_RGMII_MODE_RX_QUALITY |
1134 MAC_RGMII_MODE_RX_ACTIVITY |
1135 MAC_RGMII_MODE_RX_ENG_DET |
1136 MAC_RGMII_MODE_TX_ENABLE |
1137 MAC_RGMII_MODE_TX_LOWPWR |
1138 MAC_RGMII_MODE_TX_RESET);
1139 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1140 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1141 val |= MAC_RGMII_MODE_RX_INT_B |
1142 MAC_RGMII_MODE_RX_QUALITY |
1143 MAC_RGMII_MODE_RX_ACTIVITY |
1144 MAC_RGMII_MODE_RX_ENG_DET;
1145 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1146 val |= MAC_RGMII_MODE_TX_ENABLE |
1147 MAC_RGMII_MODE_TX_LOWPWR |
1148 MAC_RGMII_MODE_TX_RESET;
1150 tw32(MAC_EXT_RGMII_MODE, val);
1153 static void tg3_mdio_start(struct tg3 *tp)
1155 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1156 tw32_f(MAC_MI_MODE, tp->mi_mode);
1157 udelay(80);
1159 if (tg3_flag(tp, MDIOBUS_INITED) &&
1160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1161 tg3_mdio_config_5785(tp);
1164 static int tg3_mdio_init(struct tg3 *tp)
1166 int i;
1167 u32 reg;
1168 struct phy_device *phydev;
1170 if (tg3_flag(tp, 5717_PLUS)) {
1171 u32 is_serdes;
1173 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1175 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1176 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1177 else
1178 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1179 TG3_CPMU_PHY_STRAP_IS_SERDES;
1180 if (is_serdes)
1181 tp->phy_addr += 7;
1182 } else
1183 tp->phy_addr = TG3_PHY_MII_ADDR;
1185 tg3_mdio_start(tp);
1187 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1188 return 0;
1190 tp->mdio_bus = mdiobus_alloc();
1191 if (tp->mdio_bus == NULL)
1192 return -ENOMEM;
1194 tp->mdio_bus->name = "tg3 mdio bus";
1195 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1196 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1197 tp->mdio_bus->priv = tp;
1198 tp->mdio_bus->parent = &tp->pdev->dev;
1199 tp->mdio_bus->read = &tg3_mdio_read;
1200 tp->mdio_bus->write = &tg3_mdio_write;
1201 tp->mdio_bus->reset = &tg3_mdio_reset;
1202 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1203 tp->mdio_bus->irq = &tp->mdio_irq[0];
1205 for (i = 0; i < PHY_MAX_ADDR; i++)
1206 tp->mdio_bus->irq[i] = PHY_POLL;
1208 /* The bus registration will look for all the PHYs on the mdio bus.
1209 * Unfortunately, it does not ensure the PHY is powered up before
1210 * accessing the PHY ID registers. A chip reset is the
1211 * quickest way to bring the device back to an operational state..
1213 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1214 tg3_bmcr_reset(tp);
1216 i = mdiobus_register(tp->mdio_bus);
1217 if (i) {
1218 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1219 mdiobus_free(tp->mdio_bus);
1220 return i;
1223 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1225 if (!phydev || !phydev->drv) {
1226 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1227 mdiobus_unregister(tp->mdio_bus);
1228 mdiobus_free(tp->mdio_bus);
1229 return -ENODEV;
1232 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1233 case PHY_ID_BCM57780:
1234 phydev->interface = PHY_INTERFACE_MODE_GMII;
1235 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1236 break;
1237 case PHY_ID_BCM50610:
1238 case PHY_ID_BCM50610M:
1239 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1240 PHY_BRCM_RX_REFCLK_UNUSED |
1241 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1242 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1243 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1244 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1245 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1246 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1247 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1248 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1249 /* fallthru */
1250 case PHY_ID_RTL8211C:
1251 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1252 break;
1253 case PHY_ID_RTL8201E:
1254 case PHY_ID_BCMAC131:
1255 phydev->interface = PHY_INTERFACE_MODE_MII;
1256 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1257 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1258 break;
1261 tg3_flag_set(tp, MDIOBUS_INITED);
1263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1264 tg3_mdio_config_5785(tp);
1266 return 0;
1269 static void tg3_mdio_fini(struct tg3 *tp)
1271 if (tg3_flag(tp, MDIOBUS_INITED)) {
1272 tg3_flag_clear(tp, MDIOBUS_INITED);
1273 mdiobus_unregister(tp->mdio_bus);
1274 mdiobus_free(tp->mdio_bus);
1278 /* tp->lock is held. */
1279 static inline void tg3_generate_fw_event(struct tg3 *tp)
1281 u32 val;
1283 val = tr32(GRC_RX_CPU_EVENT);
1284 val |= GRC_RX_CPU_DRIVER_EVENT;
1285 tw32_f(GRC_RX_CPU_EVENT, val);
1287 tp->last_event_jiffies = jiffies;
1290 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1292 /* tp->lock is held. */
1293 static void tg3_wait_for_event_ack(struct tg3 *tp)
1295 int i;
1296 unsigned int delay_cnt;
1297 long time_remain;
1299 /* If enough time has passed, no wait is necessary. */
1300 time_remain = (long)(tp->last_event_jiffies + 1 +
1301 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1302 (long)jiffies;
1303 if (time_remain < 0)
1304 return;
1306 /* Check if we can shorten the wait time. */
1307 delay_cnt = jiffies_to_usecs(time_remain);
1308 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1309 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1310 delay_cnt = (delay_cnt >> 3) + 1;
1312 for (i = 0; i < delay_cnt; i++) {
1313 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1314 break;
1315 udelay(8);
1319 /* tp->lock is held. */
1320 static void tg3_ump_link_report(struct tg3 *tp)
1322 u32 reg;
1323 u32 val;
1325 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1326 return;
1328 tg3_wait_for_event_ack(tp);
1330 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1332 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1334 val = 0;
1335 if (!tg3_readphy(tp, MII_BMCR, &reg))
1336 val = reg << 16;
1337 if (!tg3_readphy(tp, MII_BMSR, &reg))
1338 val |= (reg & 0xffff);
1339 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1341 val = 0;
1342 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1343 val = reg << 16;
1344 if (!tg3_readphy(tp, MII_LPA, &reg))
1345 val |= (reg & 0xffff);
1346 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1348 val = 0;
1349 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1350 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1351 val = reg << 16;
1352 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1353 val |= (reg & 0xffff);
1355 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1357 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1358 val = reg << 16;
1359 else
1360 val = 0;
1361 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1363 tg3_generate_fw_event(tp);
1366 static void tg3_link_report(struct tg3 *tp)
1368 if (!netif_carrier_ok(tp->dev)) {
1369 netif_info(tp, link, tp->dev, "Link is down\n");
1370 tg3_ump_link_report(tp);
1371 } else if (netif_msg_link(tp)) {
1372 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1373 (tp->link_config.active_speed == SPEED_1000 ?
1374 1000 :
1375 (tp->link_config.active_speed == SPEED_100 ?
1376 100 : 10)),
1377 (tp->link_config.active_duplex == DUPLEX_FULL ?
1378 "full" : "half"));
1380 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1381 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1382 "on" : "off",
1383 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1384 "on" : "off");
1386 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1387 netdev_info(tp->dev, "EEE is %s\n",
1388 tp->setlpicnt ? "enabled" : "disabled");
1390 tg3_ump_link_report(tp);
1394 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1396 u16 miireg;
1398 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1399 miireg = ADVERTISE_PAUSE_CAP;
1400 else if (flow_ctrl & FLOW_CTRL_TX)
1401 miireg = ADVERTISE_PAUSE_ASYM;
1402 else if (flow_ctrl & FLOW_CTRL_RX)
1403 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1404 else
1405 miireg = 0;
1407 return miireg;
1410 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1412 u16 miireg;
1414 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1415 miireg = ADVERTISE_1000XPAUSE;
1416 else if (flow_ctrl & FLOW_CTRL_TX)
1417 miireg = ADVERTISE_1000XPSE_ASYM;
1418 else if (flow_ctrl & FLOW_CTRL_RX)
1419 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1420 else
1421 miireg = 0;
1423 return miireg;
1426 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1428 u8 cap = 0;
1430 if (lcladv & ADVERTISE_1000XPAUSE) {
1431 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1432 if (rmtadv & LPA_1000XPAUSE)
1433 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1434 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1435 cap = FLOW_CTRL_RX;
1436 } else {
1437 if (rmtadv & LPA_1000XPAUSE)
1438 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1440 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1441 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1442 cap = FLOW_CTRL_TX;
1445 return cap;
1448 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1450 u8 autoneg;
1451 u8 flowctrl = 0;
1452 u32 old_rx_mode = tp->rx_mode;
1453 u32 old_tx_mode = tp->tx_mode;
1455 if (tg3_flag(tp, USE_PHYLIB))
1456 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1457 else
1458 autoneg = tp->link_config.autoneg;
1460 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1461 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1462 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1463 else
1464 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1465 } else
1466 flowctrl = tp->link_config.flowctrl;
1468 tp->link_config.active_flowctrl = flowctrl;
1470 if (flowctrl & FLOW_CTRL_RX)
1471 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1472 else
1473 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1475 if (old_rx_mode != tp->rx_mode)
1476 tw32_f(MAC_RX_MODE, tp->rx_mode);
1478 if (flowctrl & FLOW_CTRL_TX)
1479 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1480 else
1481 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1483 if (old_tx_mode != tp->tx_mode)
1484 tw32_f(MAC_TX_MODE, tp->tx_mode);
1487 static void tg3_adjust_link(struct net_device *dev)
1489 u8 oldflowctrl, linkmesg = 0;
1490 u32 mac_mode, lcl_adv, rmt_adv;
1491 struct tg3 *tp = netdev_priv(dev);
1492 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1494 spin_lock_bh(&tp->lock);
1496 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1497 MAC_MODE_HALF_DUPLEX);
1499 oldflowctrl = tp->link_config.active_flowctrl;
1501 if (phydev->link) {
1502 lcl_adv = 0;
1503 rmt_adv = 0;
1505 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1506 mac_mode |= MAC_MODE_PORT_MODE_MII;
1507 else if (phydev->speed == SPEED_1000 ||
1508 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1509 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1510 else
1511 mac_mode |= MAC_MODE_PORT_MODE_MII;
1513 if (phydev->duplex == DUPLEX_HALF)
1514 mac_mode |= MAC_MODE_HALF_DUPLEX;
1515 else {
1516 lcl_adv = tg3_advert_flowctrl_1000T(
1517 tp->link_config.flowctrl);
1519 if (phydev->pause)
1520 rmt_adv = LPA_PAUSE_CAP;
1521 if (phydev->asym_pause)
1522 rmt_adv |= LPA_PAUSE_ASYM;
1525 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1526 } else
1527 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1529 if (mac_mode != tp->mac_mode) {
1530 tp->mac_mode = mac_mode;
1531 tw32_f(MAC_MODE, tp->mac_mode);
1532 udelay(40);
1535 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1536 if (phydev->speed == SPEED_10)
1537 tw32(MAC_MI_STAT,
1538 MAC_MI_STAT_10MBPS_MODE |
1539 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540 else
1541 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1544 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1545 tw32(MAC_TX_LENGTHS,
1546 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547 (6 << TX_LENGTHS_IPG_SHIFT) |
1548 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1549 else
1550 tw32(MAC_TX_LENGTHS,
1551 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1552 (6 << TX_LENGTHS_IPG_SHIFT) |
1553 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1555 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1556 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1557 phydev->speed != tp->link_config.active_speed ||
1558 phydev->duplex != tp->link_config.active_duplex ||
1559 oldflowctrl != tp->link_config.active_flowctrl)
1560 linkmesg = 1;
1562 tp->link_config.active_speed = phydev->speed;
1563 tp->link_config.active_duplex = phydev->duplex;
1565 spin_unlock_bh(&tp->lock);
1567 if (linkmesg)
1568 tg3_link_report(tp);
1571 static int tg3_phy_init(struct tg3 *tp)
1573 struct phy_device *phydev;
1575 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1576 return 0;
1578 /* Bring the PHY back to a known state. */
1579 tg3_bmcr_reset(tp);
1581 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1583 /* Attach the MAC to the PHY. */
1584 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1585 phydev->dev_flags, phydev->interface);
1586 if (IS_ERR(phydev)) {
1587 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1588 return PTR_ERR(phydev);
1591 /* Mask with MAC supported features. */
1592 switch (phydev->interface) {
1593 case PHY_INTERFACE_MODE_GMII:
1594 case PHY_INTERFACE_MODE_RGMII:
1595 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1596 phydev->supported &= (PHY_GBIT_FEATURES |
1597 SUPPORTED_Pause |
1598 SUPPORTED_Asym_Pause);
1599 break;
1601 /* fallthru */
1602 case PHY_INTERFACE_MODE_MII:
1603 phydev->supported &= (PHY_BASIC_FEATURES |
1604 SUPPORTED_Pause |
1605 SUPPORTED_Asym_Pause);
1606 break;
1607 default:
1608 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1609 return -EINVAL;
1612 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1614 phydev->advertising = phydev->supported;
1616 return 0;
1619 static void tg3_phy_start(struct tg3 *tp)
1621 struct phy_device *phydev;
1623 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1624 return;
1626 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1628 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1629 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1630 phydev->speed = tp->link_config.orig_speed;
1631 phydev->duplex = tp->link_config.orig_duplex;
1632 phydev->autoneg = tp->link_config.orig_autoneg;
1633 phydev->advertising = tp->link_config.orig_advertising;
1636 phy_start(phydev);
1638 phy_start_aneg(phydev);
1641 static void tg3_phy_stop(struct tg3 *tp)
1643 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1644 return;
1646 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1649 static void tg3_phy_fini(struct tg3 *tp)
1651 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1652 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1653 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1657 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1659 u32 phytest;
1661 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1662 u32 phy;
1664 tg3_writephy(tp, MII_TG3_FET_TEST,
1665 phytest | MII_TG3_FET_SHADOW_EN);
1666 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1667 if (enable)
1668 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669 else
1670 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1671 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1673 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1677 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1679 u32 reg;
1681 if (!tg3_flag(tp, 5705_PLUS) ||
1682 (tg3_flag(tp, 5717_PLUS) &&
1683 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1684 return;
1686 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1687 tg3_phy_fet_toggle_apd(tp, enable);
1688 return;
1691 reg = MII_TG3_MISC_SHDW_WREN |
1692 MII_TG3_MISC_SHDW_SCR5_SEL |
1693 MII_TG3_MISC_SHDW_SCR5_LPED |
1694 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1695 MII_TG3_MISC_SHDW_SCR5_SDTL |
1696 MII_TG3_MISC_SHDW_SCR5_C125OE;
1697 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1698 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1700 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1703 reg = MII_TG3_MISC_SHDW_WREN |
1704 MII_TG3_MISC_SHDW_APD_SEL |
1705 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1706 if (enable)
1707 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1709 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1712 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1714 u32 phy;
1716 if (!tg3_flag(tp, 5705_PLUS) ||
1717 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1718 return;
1720 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1721 u32 ephy;
1723 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1724 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1726 tg3_writephy(tp, MII_TG3_FET_TEST,
1727 ephy | MII_TG3_FET_SHADOW_EN);
1728 if (!tg3_readphy(tp, reg, &phy)) {
1729 if (enable)
1730 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731 else
1732 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1733 tg3_writephy(tp, reg, phy);
1735 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1737 } else {
1738 int ret;
1740 ret = tg3_phy_auxctl_read(tp,
1741 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1742 if (!ret) {
1743 if (enable)
1744 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745 else
1746 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1747 tg3_phy_auxctl_write(tp,
1748 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1753 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1755 int ret;
1756 u32 val;
1758 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1759 return;
1761 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1762 if (!ret)
1763 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1764 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1767 static void tg3_phy_apply_otp(struct tg3 *tp)
1769 u32 otp, phy;
1771 if (!tp->phy_otp)
1772 return;
1774 otp = tp->phy_otp;
1776 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1777 return;
1779 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1780 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1781 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1783 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1784 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1785 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1787 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1788 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1789 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1791 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1792 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1794 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1795 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1797 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1798 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1799 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1801 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1804 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1806 u32 val;
1808 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1809 return;
1811 tp->setlpicnt = 0;
1813 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1814 current_link_up == 1 &&
1815 tp->link_config.active_duplex == DUPLEX_FULL &&
1816 (tp->link_config.active_speed == SPEED_100 ||
1817 tp->link_config.active_speed == SPEED_1000)) {
1818 u32 eeectl;
1820 if (tp->link_config.active_speed == SPEED_1000)
1821 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1822 else
1823 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1825 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1827 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1828 TG3_CL45_D7_EEERES_STAT, &val);
1830 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1831 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1832 tp->setlpicnt = 2;
1835 if (!tp->setlpicnt) {
1836 val = tr32(TG3_CPMU_EEE_MODE);
1837 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1841 static void tg3_phy_eee_enable(struct tg3 *tp)
1843 u32 val;
1845 if (tp->link_config.active_speed == SPEED_1000 &&
1846 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1849 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1850 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1851 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1854 val = tr32(TG3_CPMU_EEE_MODE);
1855 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1858 static int tg3_wait_macro_done(struct tg3 *tp)
1860 int limit = 100;
1862 while (limit--) {
1863 u32 tmp32;
1865 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1866 if ((tmp32 & 0x1000) == 0)
1867 break;
1870 if (limit < 0)
1871 return -EBUSY;
1873 return 0;
1876 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1878 static const u32 test_pat[4][6] = {
1879 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1880 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1881 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1882 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1884 int chan;
1886 for (chan = 0; chan < 4; chan++) {
1887 int i;
1889 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1890 (chan * 0x2000) | 0x0200);
1891 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1893 for (i = 0; i < 6; i++)
1894 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1895 test_pat[chan][i]);
1897 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1898 if (tg3_wait_macro_done(tp)) {
1899 *resetp = 1;
1900 return -EBUSY;
1903 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1904 (chan * 0x2000) | 0x0200);
1905 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1906 if (tg3_wait_macro_done(tp)) {
1907 *resetp = 1;
1908 return -EBUSY;
1911 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1912 if (tg3_wait_macro_done(tp)) {
1913 *resetp = 1;
1914 return -EBUSY;
1917 for (i = 0; i < 6; i += 2) {
1918 u32 low, high;
1920 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1921 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1922 tg3_wait_macro_done(tp)) {
1923 *resetp = 1;
1924 return -EBUSY;
1926 low &= 0x7fff;
1927 high &= 0x000f;
1928 if (low != test_pat[chan][i] ||
1929 high != test_pat[chan][i+1]) {
1930 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1931 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1932 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1934 return -EBUSY;
1939 return 0;
1942 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1944 int chan;
1946 for (chan = 0; chan < 4; chan++) {
1947 int i;
1949 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1950 (chan * 0x2000) | 0x0200);
1951 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1952 for (i = 0; i < 6; i++)
1953 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1954 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1955 if (tg3_wait_macro_done(tp))
1956 return -EBUSY;
1959 return 0;
1962 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1964 u32 reg32, phy9_orig;
1965 int retries, do_phy_reset, err;
1967 retries = 10;
1968 do_phy_reset = 1;
1969 do {
1970 if (do_phy_reset) {
1971 err = tg3_bmcr_reset(tp);
1972 if (err)
1973 return err;
1974 do_phy_reset = 0;
1977 /* Disable transmitter and interrupt. */
1978 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1979 continue;
1981 reg32 |= 0x3000;
1982 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1984 /* Set full-duplex, 1000 mbps. */
1985 tg3_writephy(tp, MII_BMCR,
1986 BMCR_FULLDPLX | BMCR_SPEED1000);
1988 /* Set to master mode. */
1989 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1990 continue;
1992 tg3_writephy(tp, MII_CTRL1000,
1993 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1995 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1996 if (err)
1997 return err;
1999 /* Block the PHY control access. */
2000 tg3_phydsp_write(tp, 0x8005, 0x0800);
2002 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2003 if (!err)
2004 break;
2005 } while (--retries);
2007 err = tg3_phy_reset_chanpat(tp);
2008 if (err)
2009 return err;
2011 tg3_phydsp_write(tp, 0x8005, 0x0000);
2013 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2014 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2016 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2018 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2020 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2021 reg32 &= ~0x3000;
2022 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2023 } else if (!err)
2024 err = -EBUSY;
2026 return err;
2029 /* This will reset the tigon3 PHY if there is no valid
2030 * link unless the FORCE argument is non-zero.
2032 static int tg3_phy_reset(struct tg3 *tp)
2034 u32 val, cpmuctrl;
2035 int err;
2037 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2038 val = tr32(GRC_MISC_CFG);
2039 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2040 udelay(40);
2042 err = tg3_readphy(tp, MII_BMSR, &val);
2043 err |= tg3_readphy(tp, MII_BMSR, &val);
2044 if (err != 0)
2045 return -EBUSY;
2047 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2048 netif_carrier_off(tp->dev);
2049 tg3_link_report(tp);
2052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2053 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2054 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2055 err = tg3_phy_reset_5703_4_5(tp);
2056 if (err)
2057 return err;
2058 goto out;
2061 cpmuctrl = 0;
2062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2063 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2064 cpmuctrl = tr32(TG3_CPMU_CTRL);
2065 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2066 tw32(TG3_CPMU_CTRL,
2067 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2070 err = tg3_bmcr_reset(tp);
2071 if (err)
2072 return err;
2074 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2075 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2076 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2078 tw32(TG3_CPMU_CTRL, cpmuctrl);
2081 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2082 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2083 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2084 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2085 CPMU_LSPD_1000MB_MACCLK_12_5) {
2086 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2087 udelay(40);
2088 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2092 if (tg3_flag(tp, 5717_PLUS) &&
2093 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2094 return 0;
2096 tg3_phy_apply_otp(tp);
2098 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2099 tg3_phy_toggle_apd(tp, true);
2100 else
2101 tg3_phy_toggle_apd(tp, false);
2103 out:
2104 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2105 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2106 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2107 tg3_phydsp_write(tp, 0x000a, 0x0323);
2108 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2111 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2112 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2116 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2117 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2118 tg3_phydsp_write(tp, 0x000a, 0x310b);
2119 tg3_phydsp_write(tp, 0x201f, 0x9506);
2120 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2121 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2123 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2124 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2126 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2127 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2128 tg3_writephy(tp, MII_TG3_TEST1,
2129 MII_TG3_TEST1_TRIM_EN | 0x4);
2130 } else
2131 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2133 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2137 /* Set Extended packet length bit (bit 14) on all chips that */
2138 /* support jumbo frames */
2139 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2140 /* Cannot do read-modify-write on 5401 */
2141 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2142 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2143 /* Set bit 14 with read-modify-write to preserve other bits */
2144 err = tg3_phy_auxctl_read(tp,
2145 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2146 if (!err)
2147 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2148 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2151 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2152 * jumbo frames transmission.
2154 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2155 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2156 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2157 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2161 /* adjust output voltage */
2162 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2165 tg3_phy_toggle_automdix(tp, 1);
2166 tg3_phy_set_wirespeed(tp);
2167 return 0;
2170 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2172 if (!tg3_flag(tp, IS_NIC))
2173 return 0;
2175 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2176 TG3_GRC_LCLCTL_PWRSW_DELAY);
2178 return 0;
2181 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2183 u32 grc_local_ctrl;
2185 if (!tg3_flag(tp, IS_NIC) ||
2186 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2187 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2188 return;
2190 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2192 tw32_wait_f(GRC_LOCAL_CTRL,
2193 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2194 TG3_GRC_LCLCTL_PWRSW_DELAY);
2196 tw32_wait_f(GRC_LOCAL_CTRL,
2197 grc_local_ctrl,
2198 TG3_GRC_LCLCTL_PWRSW_DELAY);
2200 tw32_wait_f(GRC_LOCAL_CTRL,
2201 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2202 TG3_GRC_LCLCTL_PWRSW_DELAY);
2205 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2207 if (!tg3_flag(tp, IS_NIC))
2208 return;
2210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2212 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2213 (GRC_LCLCTRL_GPIO_OE0 |
2214 GRC_LCLCTRL_GPIO_OE1 |
2215 GRC_LCLCTRL_GPIO_OE2 |
2216 GRC_LCLCTRL_GPIO_OUTPUT0 |
2217 GRC_LCLCTRL_GPIO_OUTPUT1),
2218 TG3_GRC_LCLCTL_PWRSW_DELAY);
2219 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2220 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2221 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2222 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2223 GRC_LCLCTRL_GPIO_OE1 |
2224 GRC_LCLCTRL_GPIO_OE2 |
2225 GRC_LCLCTRL_GPIO_OUTPUT0 |
2226 GRC_LCLCTRL_GPIO_OUTPUT1 |
2227 tp->grc_local_ctrl;
2228 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2229 TG3_GRC_LCLCTL_PWRSW_DELAY);
2231 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2232 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2233 TG3_GRC_LCLCTL_PWRSW_DELAY);
2235 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2236 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2237 TG3_GRC_LCLCTL_PWRSW_DELAY);
2238 } else {
2239 u32 no_gpio2;
2240 u32 grc_local_ctrl = 0;
2242 /* Workaround to prevent overdrawing Amps. */
2243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2244 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2245 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2246 grc_local_ctrl,
2247 TG3_GRC_LCLCTL_PWRSW_DELAY);
2250 /* On 5753 and variants, GPIO2 cannot be used. */
2251 no_gpio2 = tp->nic_sram_data_cfg &
2252 NIC_SRAM_DATA_CFG_NO_GPIO2;
2254 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2255 GRC_LCLCTRL_GPIO_OE1 |
2256 GRC_LCLCTRL_GPIO_OE2 |
2257 GRC_LCLCTRL_GPIO_OUTPUT1 |
2258 GRC_LCLCTRL_GPIO_OUTPUT2;
2259 if (no_gpio2) {
2260 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2261 GRC_LCLCTRL_GPIO_OUTPUT2);
2263 tw32_wait_f(GRC_LOCAL_CTRL,
2264 tp->grc_local_ctrl | grc_local_ctrl,
2265 TG3_GRC_LCLCTL_PWRSW_DELAY);
2267 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2269 tw32_wait_f(GRC_LOCAL_CTRL,
2270 tp->grc_local_ctrl | grc_local_ctrl,
2271 TG3_GRC_LCLCTL_PWRSW_DELAY);
2273 if (!no_gpio2) {
2274 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2275 tw32_wait_f(GRC_LOCAL_CTRL,
2276 tp->grc_local_ctrl | grc_local_ctrl,
2277 TG3_GRC_LCLCTL_PWRSW_DELAY);
2282 static void tg3_frob_aux_power(struct tg3 *tp)
2284 bool need_vaux = false;
2286 /* The GPIOs do something completely different on 57765. */
2287 if (!tg3_flag(tp, IS_NIC) ||
2288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2290 return;
2292 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2295 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2296 tp->pdev_peer != tp->pdev) {
2297 struct net_device *dev_peer;
2299 dev_peer = pci_get_drvdata(tp->pdev_peer);
2301 /* remove_one() may have been run on the peer. */
2302 if (dev_peer) {
2303 struct tg3 *tp_peer = netdev_priv(dev_peer);
2305 if (tg3_flag(tp_peer, INIT_COMPLETE))
2306 return;
2308 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2309 tg3_flag(tp_peer, ENABLE_ASF))
2310 need_vaux = true;
2314 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2315 need_vaux = true;
2317 if (need_vaux)
2318 tg3_pwrsrc_switch_to_vaux(tp);
2319 else
2320 tg3_pwrsrc_die_with_vmain(tp);
2323 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2325 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2326 return 1;
2327 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2328 if (speed != SPEED_10)
2329 return 1;
2330 } else if (speed == SPEED_10)
2331 return 1;
2333 return 0;
2336 static int tg3_setup_phy(struct tg3 *, int);
2338 #define RESET_KIND_SHUTDOWN 0
2339 #define RESET_KIND_INIT 1
2340 #define RESET_KIND_SUSPEND 2
2342 static void tg3_write_sig_post_reset(struct tg3 *, int);
2343 static int tg3_halt_cpu(struct tg3 *, u32);
2345 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2347 u32 val;
2349 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2351 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2352 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2354 sg_dig_ctrl |=
2355 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2356 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2357 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2359 return;
2362 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2363 tg3_bmcr_reset(tp);
2364 val = tr32(GRC_MISC_CFG);
2365 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2366 udelay(40);
2367 return;
2368 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2369 u32 phytest;
2370 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2371 u32 phy;
2373 tg3_writephy(tp, MII_ADVERTISE, 0);
2374 tg3_writephy(tp, MII_BMCR,
2375 BMCR_ANENABLE | BMCR_ANRESTART);
2377 tg3_writephy(tp, MII_TG3_FET_TEST,
2378 phytest | MII_TG3_FET_SHADOW_EN);
2379 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2380 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2381 tg3_writephy(tp,
2382 MII_TG3_FET_SHDW_AUXMODE4,
2383 phy);
2385 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2387 return;
2388 } else if (do_low_power) {
2389 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2390 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2392 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2393 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2394 MII_TG3_AUXCTL_PCTL_VREG_11V;
2395 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2398 /* The PHY should not be powered down on some chips because
2399 * of bugs.
2401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2402 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2403 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2404 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2405 return;
2407 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2408 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2409 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2410 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2411 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2412 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2415 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2418 /* tp->lock is held. */
2419 static int tg3_nvram_lock(struct tg3 *tp)
2421 if (tg3_flag(tp, NVRAM)) {
2422 int i;
2424 if (tp->nvram_lock_cnt == 0) {
2425 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2426 for (i = 0; i < 8000; i++) {
2427 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2428 break;
2429 udelay(20);
2431 if (i == 8000) {
2432 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2433 return -ENODEV;
2436 tp->nvram_lock_cnt++;
2438 return 0;
2441 /* tp->lock is held. */
2442 static void tg3_nvram_unlock(struct tg3 *tp)
2444 if (tg3_flag(tp, NVRAM)) {
2445 if (tp->nvram_lock_cnt > 0)
2446 tp->nvram_lock_cnt--;
2447 if (tp->nvram_lock_cnt == 0)
2448 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2452 /* tp->lock is held. */
2453 static void tg3_enable_nvram_access(struct tg3 *tp)
2455 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2456 u32 nvaccess = tr32(NVRAM_ACCESS);
2458 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2462 /* tp->lock is held. */
2463 static void tg3_disable_nvram_access(struct tg3 *tp)
2465 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2466 u32 nvaccess = tr32(NVRAM_ACCESS);
2468 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2472 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2473 u32 offset, u32 *val)
2475 u32 tmp;
2476 int i;
2478 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2479 return -EINVAL;
2481 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2482 EEPROM_ADDR_DEVID_MASK |
2483 EEPROM_ADDR_READ);
2484 tw32(GRC_EEPROM_ADDR,
2485 tmp |
2486 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2487 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2488 EEPROM_ADDR_ADDR_MASK) |
2489 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2491 for (i = 0; i < 1000; i++) {
2492 tmp = tr32(GRC_EEPROM_ADDR);
2494 if (tmp & EEPROM_ADDR_COMPLETE)
2495 break;
2496 msleep(1);
2498 if (!(tmp & EEPROM_ADDR_COMPLETE))
2499 return -EBUSY;
2501 tmp = tr32(GRC_EEPROM_DATA);
2504 * The data will always be opposite the native endian
2505 * format. Perform a blind byteswap to compensate.
2507 *val = swab32(tmp);
2509 return 0;
2512 #define NVRAM_CMD_TIMEOUT 10000
2514 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2516 int i;
2518 tw32(NVRAM_CMD, nvram_cmd);
2519 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2520 udelay(10);
2521 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2522 udelay(10);
2523 break;
2527 if (i == NVRAM_CMD_TIMEOUT)
2528 return -EBUSY;
2530 return 0;
2533 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2535 if (tg3_flag(tp, NVRAM) &&
2536 tg3_flag(tp, NVRAM_BUFFERED) &&
2537 tg3_flag(tp, FLASH) &&
2538 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2539 (tp->nvram_jedecnum == JEDEC_ATMEL))
2541 addr = ((addr / tp->nvram_pagesize) <<
2542 ATMEL_AT45DB0X1B_PAGE_POS) +
2543 (addr % tp->nvram_pagesize);
2545 return addr;
2548 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2550 if (tg3_flag(tp, NVRAM) &&
2551 tg3_flag(tp, NVRAM_BUFFERED) &&
2552 tg3_flag(tp, FLASH) &&
2553 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2554 (tp->nvram_jedecnum == JEDEC_ATMEL))
2556 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2557 tp->nvram_pagesize) +
2558 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2560 return addr;
2563 /* NOTE: Data read in from NVRAM is byteswapped according to
2564 * the byteswapping settings for all other register accesses.
2565 * tg3 devices are BE devices, so on a BE machine, the data
2566 * returned will be exactly as it is seen in NVRAM. On a LE
2567 * machine, the 32-bit value will be byteswapped.
2569 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2571 int ret;
2573 if (!tg3_flag(tp, NVRAM))
2574 return tg3_nvram_read_using_eeprom(tp, offset, val);
2576 offset = tg3_nvram_phys_addr(tp, offset);
2578 if (offset > NVRAM_ADDR_MSK)
2579 return -EINVAL;
2581 ret = tg3_nvram_lock(tp);
2582 if (ret)
2583 return ret;
2585 tg3_enable_nvram_access(tp);
2587 tw32(NVRAM_ADDR, offset);
2588 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2589 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2591 if (ret == 0)
2592 *val = tr32(NVRAM_RDDATA);
2594 tg3_disable_nvram_access(tp);
2596 tg3_nvram_unlock(tp);
2598 return ret;
2601 /* Ensures NVRAM data is in bytestream format. */
2602 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2604 u32 v;
2605 int res = tg3_nvram_read(tp, offset, &v);
2606 if (!res)
2607 *val = cpu_to_be32(v);
2608 return res;
2611 /* tp->lock is held. */
2612 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2614 u32 addr_high, addr_low;
2615 int i;
2617 addr_high = ((tp->dev->dev_addr[0] << 8) |
2618 tp->dev->dev_addr[1]);
2619 addr_low = ((tp->dev->dev_addr[2] << 24) |
2620 (tp->dev->dev_addr[3] << 16) |
2621 (tp->dev->dev_addr[4] << 8) |
2622 (tp->dev->dev_addr[5] << 0));
2623 for (i = 0; i < 4; i++) {
2624 if (i == 1 && skip_mac_1)
2625 continue;
2626 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2627 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2632 for (i = 0; i < 12; i++) {
2633 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2634 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2638 addr_high = (tp->dev->dev_addr[0] +
2639 tp->dev->dev_addr[1] +
2640 tp->dev->dev_addr[2] +
2641 tp->dev->dev_addr[3] +
2642 tp->dev->dev_addr[4] +
2643 tp->dev->dev_addr[5]) &
2644 TX_BACKOFF_SEED_MASK;
2645 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2648 static void tg3_enable_register_access(struct tg3 *tp)
2651 * Make sure register accesses (indirect or otherwise) will function
2652 * correctly.
2654 pci_write_config_dword(tp->pdev,
2655 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2658 static int tg3_power_up(struct tg3 *tp)
2660 int err;
2662 tg3_enable_register_access(tp);
2664 err = pci_set_power_state(tp->pdev, PCI_D0);
2665 if (!err) {
2666 /* Switch out of Vaux if it is a NIC */
2667 tg3_pwrsrc_switch_to_vmain(tp);
2668 } else {
2669 netdev_err(tp->dev, "Transition to D0 failed\n");
2672 return err;
2675 static int tg3_power_down_prepare(struct tg3 *tp)
2677 u32 misc_host_ctrl;
2678 bool device_should_wake, do_low_power;
2680 tg3_enable_register_access(tp);
2682 /* Restore the CLKREQ setting. */
2683 if (tg3_flag(tp, CLKREQ_BUG)) {
2684 u16 lnkctl;
2686 pci_read_config_word(tp->pdev,
2687 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2688 &lnkctl);
2689 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2690 pci_write_config_word(tp->pdev,
2691 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2692 lnkctl);
2695 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2696 tw32(TG3PCI_MISC_HOST_CTRL,
2697 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2699 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2700 tg3_flag(tp, WOL_ENABLE);
2702 if (tg3_flag(tp, USE_PHYLIB)) {
2703 do_low_power = false;
2704 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2705 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2706 struct phy_device *phydev;
2707 u32 phyid, advertising;
2709 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2711 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2713 tp->link_config.orig_speed = phydev->speed;
2714 tp->link_config.orig_duplex = phydev->duplex;
2715 tp->link_config.orig_autoneg = phydev->autoneg;
2716 tp->link_config.orig_advertising = phydev->advertising;
2718 advertising = ADVERTISED_TP |
2719 ADVERTISED_Pause |
2720 ADVERTISED_Autoneg |
2721 ADVERTISED_10baseT_Half;
2723 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2724 if (tg3_flag(tp, WOL_SPEED_100MB))
2725 advertising |=
2726 ADVERTISED_100baseT_Half |
2727 ADVERTISED_100baseT_Full |
2728 ADVERTISED_10baseT_Full;
2729 else
2730 advertising |= ADVERTISED_10baseT_Full;
2733 phydev->advertising = advertising;
2735 phy_start_aneg(phydev);
2737 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2738 if (phyid != PHY_ID_BCMAC131) {
2739 phyid &= PHY_BCM_OUI_MASK;
2740 if (phyid == PHY_BCM_OUI_1 ||
2741 phyid == PHY_BCM_OUI_2 ||
2742 phyid == PHY_BCM_OUI_3)
2743 do_low_power = true;
2746 } else {
2747 do_low_power = true;
2749 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2750 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2751 tp->link_config.orig_speed = tp->link_config.speed;
2752 tp->link_config.orig_duplex = tp->link_config.duplex;
2753 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2756 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2757 tp->link_config.speed = SPEED_10;
2758 tp->link_config.duplex = DUPLEX_HALF;
2759 tp->link_config.autoneg = AUTONEG_ENABLE;
2760 tg3_setup_phy(tp, 0);
2764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2765 u32 val;
2767 val = tr32(GRC_VCPU_EXT_CTRL);
2768 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2769 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2770 int i;
2771 u32 val;
2773 for (i = 0; i < 200; i++) {
2774 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2775 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2776 break;
2777 msleep(1);
2780 if (tg3_flag(tp, WOL_CAP))
2781 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2782 WOL_DRV_STATE_SHUTDOWN |
2783 WOL_DRV_WOL |
2784 WOL_SET_MAGIC_PKT);
2786 if (device_should_wake) {
2787 u32 mac_mode;
2789 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2790 if (do_low_power &&
2791 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2792 tg3_phy_auxctl_write(tp,
2793 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2794 MII_TG3_AUXCTL_PCTL_WOL_EN |
2795 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2796 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2797 udelay(40);
2800 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2801 mac_mode = MAC_MODE_PORT_MODE_GMII;
2802 else
2803 mac_mode = MAC_MODE_PORT_MODE_MII;
2805 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2806 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2807 ASIC_REV_5700) {
2808 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2809 SPEED_100 : SPEED_10;
2810 if (tg3_5700_link_polarity(tp, speed))
2811 mac_mode |= MAC_MODE_LINK_POLARITY;
2812 else
2813 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2815 } else {
2816 mac_mode = MAC_MODE_PORT_MODE_TBI;
2819 if (!tg3_flag(tp, 5750_PLUS))
2820 tw32(MAC_LED_CTRL, tp->led_ctrl);
2822 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2823 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2824 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2825 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2827 if (tg3_flag(tp, ENABLE_APE))
2828 mac_mode |= MAC_MODE_APE_TX_EN |
2829 MAC_MODE_APE_RX_EN |
2830 MAC_MODE_TDE_ENABLE;
2832 tw32_f(MAC_MODE, mac_mode);
2833 udelay(100);
2835 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2836 udelay(10);
2839 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2840 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2841 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2842 u32 base_val;
2844 base_val = tp->pci_clock_ctrl;
2845 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2846 CLOCK_CTRL_TXCLK_DISABLE);
2848 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2849 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2850 } else if (tg3_flag(tp, 5780_CLASS) ||
2851 tg3_flag(tp, CPMU_PRESENT) ||
2852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2853 /* do nothing */
2854 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2855 u32 newbits1, newbits2;
2857 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2858 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2859 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2860 CLOCK_CTRL_TXCLK_DISABLE |
2861 CLOCK_CTRL_ALTCLK);
2862 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2863 } else if (tg3_flag(tp, 5705_PLUS)) {
2864 newbits1 = CLOCK_CTRL_625_CORE;
2865 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2866 } else {
2867 newbits1 = CLOCK_CTRL_ALTCLK;
2868 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2871 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2872 40);
2874 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2875 40);
2877 if (!tg3_flag(tp, 5705_PLUS)) {
2878 u32 newbits3;
2880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2881 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2882 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2883 CLOCK_CTRL_TXCLK_DISABLE |
2884 CLOCK_CTRL_44MHZ_CORE);
2885 } else {
2886 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2889 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2890 tp->pci_clock_ctrl | newbits3, 40);
2894 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2895 tg3_power_down_phy(tp, do_low_power);
2897 tg3_frob_aux_power(tp);
2899 /* Workaround for unstable PLL clock */
2900 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2901 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2902 u32 val = tr32(0x7d00);
2904 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2905 tw32(0x7d00, val);
2906 if (!tg3_flag(tp, ENABLE_ASF)) {
2907 int err;
2909 err = tg3_nvram_lock(tp);
2910 tg3_halt_cpu(tp, RX_CPU_BASE);
2911 if (!err)
2912 tg3_nvram_unlock(tp);
2916 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2918 return 0;
2921 static void tg3_power_down(struct tg3 *tp)
2923 tg3_power_down_prepare(tp);
2925 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2926 pci_set_power_state(tp->pdev, PCI_D3hot);
2929 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2931 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2932 case MII_TG3_AUX_STAT_10HALF:
2933 *speed = SPEED_10;
2934 *duplex = DUPLEX_HALF;
2935 break;
2937 case MII_TG3_AUX_STAT_10FULL:
2938 *speed = SPEED_10;
2939 *duplex = DUPLEX_FULL;
2940 break;
2942 case MII_TG3_AUX_STAT_100HALF:
2943 *speed = SPEED_100;
2944 *duplex = DUPLEX_HALF;
2945 break;
2947 case MII_TG3_AUX_STAT_100FULL:
2948 *speed = SPEED_100;
2949 *duplex = DUPLEX_FULL;
2950 break;
2952 case MII_TG3_AUX_STAT_1000HALF:
2953 *speed = SPEED_1000;
2954 *duplex = DUPLEX_HALF;
2955 break;
2957 case MII_TG3_AUX_STAT_1000FULL:
2958 *speed = SPEED_1000;
2959 *duplex = DUPLEX_FULL;
2960 break;
2962 default:
2963 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2964 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2965 SPEED_10;
2966 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2967 DUPLEX_HALF;
2968 break;
2970 *speed = SPEED_INVALID;
2971 *duplex = DUPLEX_INVALID;
2972 break;
2976 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2978 int err = 0;
2979 u32 val, new_adv;
2981 new_adv = ADVERTISE_CSMA;
2982 if (advertise & ADVERTISED_10baseT_Half)
2983 new_adv |= ADVERTISE_10HALF;
2984 if (advertise & ADVERTISED_10baseT_Full)
2985 new_adv |= ADVERTISE_10FULL;
2986 if (advertise & ADVERTISED_100baseT_Half)
2987 new_adv |= ADVERTISE_100HALF;
2988 if (advertise & ADVERTISED_100baseT_Full)
2989 new_adv |= ADVERTISE_100FULL;
2991 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2993 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2994 if (err)
2995 goto done;
2997 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2998 goto done;
3000 new_adv = 0;
3001 if (advertise & ADVERTISED_1000baseT_Half)
3002 new_adv |= ADVERTISE_1000HALF;
3003 if (advertise & ADVERTISED_1000baseT_Full)
3004 new_adv |= ADVERTISE_1000FULL;
3006 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3007 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3008 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3010 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3011 if (err)
3012 goto done;
3014 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3015 goto done;
3017 tw32(TG3_CPMU_EEE_MODE,
3018 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3020 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3021 if (!err) {
3022 u32 err2;
3024 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3025 case ASIC_REV_5717:
3026 case ASIC_REV_57765:
3027 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3028 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3029 MII_TG3_DSP_CH34TP2_HIBW01);
3030 /* Fall through */
3031 case ASIC_REV_5719:
3032 val = MII_TG3_DSP_TAP26_ALNOKO |
3033 MII_TG3_DSP_TAP26_RMRXSTO |
3034 MII_TG3_DSP_TAP26_OPCSINPT;
3035 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3038 val = 0;
3039 /* Advertise 100-BaseTX EEE ability */
3040 if (advertise & ADVERTISED_100baseT_Full)
3041 val |= MDIO_AN_EEE_ADV_100TX;
3042 /* Advertise 1000-BaseT EEE ability */
3043 if (advertise & ADVERTISED_1000baseT_Full)
3044 val |= MDIO_AN_EEE_ADV_1000T;
3045 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3047 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3048 if (!err)
3049 err = err2;
3052 done:
3053 return err;
3056 static void tg3_phy_copper_begin(struct tg3 *tp)
3058 u32 new_adv;
3059 int i;
3061 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3062 new_adv = ADVERTISED_10baseT_Half |
3063 ADVERTISED_10baseT_Full;
3064 if (tg3_flag(tp, WOL_SPEED_100MB))
3065 new_adv |= ADVERTISED_100baseT_Half |
3066 ADVERTISED_100baseT_Full;
3068 tg3_phy_autoneg_cfg(tp, new_adv,
3069 FLOW_CTRL_TX | FLOW_CTRL_RX);
3070 } else if (tp->link_config.speed == SPEED_INVALID) {
3071 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3072 tp->link_config.advertising &=
3073 ~(ADVERTISED_1000baseT_Half |
3074 ADVERTISED_1000baseT_Full);
3076 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3077 tp->link_config.flowctrl);
3078 } else {
3079 /* Asking for a specific link mode. */
3080 if (tp->link_config.speed == SPEED_1000) {
3081 if (tp->link_config.duplex == DUPLEX_FULL)
3082 new_adv = ADVERTISED_1000baseT_Full;
3083 else
3084 new_adv = ADVERTISED_1000baseT_Half;
3085 } else if (tp->link_config.speed == SPEED_100) {
3086 if (tp->link_config.duplex == DUPLEX_FULL)
3087 new_adv = ADVERTISED_100baseT_Full;
3088 else
3089 new_adv = ADVERTISED_100baseT_Half;
3090 } else {
3091 if (tp->link_config.duplex == DUPLEX_FULL)
3092 new_adv = ADVERTISED_10baseT_Full;
3093 else
3094 new_adv = ADVERTISED_10baseT_Half;
3097 tg3_phy_autoneg_cfg(tp, new_adv,
3098 tp->link_config.flowctrl);
3101 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3102 tp->link_config.speed != SPEED_INVALID) {
3103 u32 bmcr, orig_bmcr;
3105 tp->link_config.active_speed = tp->link_config.speed;
3106 tp->link_config.active_duplex = tp->link_config.duplex;
3108 bmcr = 0;
3109 switch (tp->link_config.speed) {
3110 default:
3111 case SPEED_10:
3112 break;
3114 case SPEED_100:
3115 bmcr |= BMCR_SPEED100;
3116 break;
3118 case SPEED_1000:
3119 bmcr |= BMCR_SPEED1000;
3120 break;
3123 if (tp->link_config.duplex == DUPLEX_FULL)
3124 bmcr |= BMCR_FULLDPLX;
3126 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3127 (bmcr != orig_bmcr)) {
3128 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3129 for (i = 0; i < 1500; i++) {
3130 u32 tmp;
3132 udelay(10);
3133 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3134 tg3_readphy(tp, MII_BMSR, &tmp))
3135 continue;
3136 if (!(tmp & BMSR_LSTATUS)) {
3137 udelay(40);
3138 break;
3141 tg3_writephy(tp, MII_BMCR, bmcr);
3142 udelay(40);
3144 } else {
3145 tg3_writephy(tp, MII_BMCR,
3146 BMCR_ANENABLE | BMCR_ANRESTART);
3150 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3152 int err;
3154 /* Turn off tap power management. */
3155 /* Set Extended packet length bit */
3156 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3158 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3159 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3160 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3161 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3162 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3164 udelay(40);
3166 return err;
3169 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3171 u32 adv_reg, all_mask = 0;
3173 if (mask & ADVERTISED_10baseT_Half)
3174 all_mask |= ADVERTISE_10HALF;
3175 if (mask & ADVERTISED_10baseT_Full)
3176 all_mask |= ADVERTISE_10FULL;
3177 if (mask & ADVERTISED_100baseT_Half)
3178 all_mask |= ADVERTISE_100HALF;
3179 if (mask & ADVERTISED_100baseT_Full)
3180 all_mask |= ADVERTISE_100FULL;
3182 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3183 return 0;
3185 if ((adv_reg & all_mask) != all_mask)
3186 return 0;
3187 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3188 u32 tg3_ctrl;
3190 all_mask = 0;
3191 if (mask & ADVERTISED_1000baseT_Half)
3192 all_mask |= ADVERTISE_1000HALF;
3193 if (mask & ADVERTISED_1000baseT_Full)
3194 all_mask |= ADVERTISE_1000FULL;
3196 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3197 return 0;
3199 if ((tg3_ctrl & all_mask) != all_mask)
3200 return 0;
3202 return 1;
3205 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3207 u32 curadv, reqadv;
3209 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3210 return 1;
3212 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3213 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3215 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3216 if (curadv != reqadv)
3217 return 0;
3219 if (tg3_flag(tp, PAUSE_AUTONEG))
3220 tg3_readphy(tp, MII_LPA, rmtadv);
3221 } else {
3222 /* Reprogram the advertisement register, even if it
3223 * does not affect the current link. If the link
3224 * gets renegotiated in the future, we can save an
3225 * additional renegotiation cycle by advertising
3226 * it correctly in the first place.
3228 if (curadv != reqadv) {
3229 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3230 ADVERTISE_PAUSE_ASYM);
3231 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3235 return 1;
3238 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3240 int current_link_up;
3241 u32 bmsr, val;
3242 u32 lcl_adv, rmt_adv;
3243 u16 current_speed;
3244 u8 current_duplex;
3245 int i, err;
3247 tw32(MAC_EVENT, 0);
3249 tw32_f(MAC_STATUS,
3250 (MAC_STATUS_SYNC_CHANGED |
3251 MAC_STATUS_CFG_CHANGED |
3252 MAC_STATUS_MI_COMPLETION |
3253 MAC_STATUS_LNKSTATE_CHANGED));
3254 udelay(40);
3256 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3257 tw32_f(MAC_MI_MODE,
3258 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3259 udelay(80);
3262 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3264 /* Some third-party PHYs need to be reset on link going
3265 * down.
3267 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3269 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3270 netif_carrier_ok(tp->dev)) {
3271 tg3_readphy(tp, MII_BMSR, &bmsr);
3272 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3273 !(bmsr & BMSR_LSTATUS))
3274 force_reset = 1;
3276 if (force_reset)
3277 tg3_phy_reset(tp);
3279 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3280 tg3_readphy(tp, MII_BMSR, &bmsr);
3281 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3282 !tg3_flag(tp, INIT_COMPLETE))
3283 bmsr = 0;
3285 if (!(bmsr & BMSR_LSTATUS)) {
3286 err = tg3_init_5401phy_dsp(tp);
3287 if (err)
3288 return err;
3290 tg3_readphy(tp, MII_BMSR, &bmsr);
3291 for (i = 0; i < 1000; i++) {
3292 udelay(10);
3293 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3294 (bmsr & BMSR_LSTATUS)) {
3295 udelay(40);
3296 break;
3300 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3301 TG3_PHY_REV_BCM5401_B0 &&
3302 !(bmsr & BMSR_LSTATUS) &&
3303 tp->link_config.active_speed == SPEED_1000) {
3304 err = tg3_phy_reset(tp);
3305 if (!err)
3306 err = tg3_init_5401phy_dsp(tp);
3307 if (err)
3308 return err;
3311 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3312 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3313 /* 5701 {A0,B0} CRC bug workaround */
3314 tg3_writephy(tp, 0x15, 0x0a75);
3315 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3316 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3317 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3320 /* Clear pending interrupts... */
3321 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3322 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3324 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3325 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3326 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3327 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3331 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3332 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3333 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3334 else
3335 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3338 current_link_up = 0;
3339 current_speed = SPEED_INVALID;
3340 current_duplex = DUPLEX_INVALID;
3342 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3343 err = tg3_phy_auxctl_read(tp,
3344 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3345 &val);
3346 if (!err && !(val & (1 << 10))) {
3347 tg3_phy_auxctl_write(tp,
3348 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3349 val | (1 << 10));
3350 goto relink;
3354 bmsr = 0;
3355 for (i = 0; i < 100; i++) {
3356 tg3_readphy(tp, MII_BMSR, &bmsr);
3357 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3358 (bmsr & BMSR_LSTATUS))
3359 break;
3360 udelay(40);
3363 if (bmsr & BMSR_LSTATUS) {
3364 u32 aux_stat, bmcr;
3366 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3367 for (i = 0; i < 2000; i++) {
3368 udelay(10);
3369 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3370 aux_stat)
3371 break;
3374 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3375 &current_speed,
3376 &current_duplex);
3378 bmcr = 0;
3379 for (i = 0; i < 200; i++) {
3380 tg3_readphy(tp, MII_BMCR, &bmcr);
3381 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3382 continue;
3383 if (bmcr && bmcr != 0x7fff)
3384 break;
3385 udelay(10);
3388 lcl_adv = 0;
3389 rmt_adv = 0;
3391 tp->link_config.active_speed = current_speed;
3392 tp->link_config.active_duplex = current_duplex;
3394 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3395 if ((bmcr & BMCR_ANENABLE) &&
3396 tg3_copper_is_advertising_all(tp,
3397 tp->link_config.advertising)) {
3398 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3399 &rmt_adv))
3400 current_link_up = 1;
3402 } else {
3403 if (!(bmcr & BMCR_ANENABLE) &&
3404 tp->link_config.speed == current_speed &&
3405 tp->link_config.duplex == current_duplex &&
3406 tp->link_config.flowctrl ==
3407 tp->link_config.active_flowctrl) {
3408 current_link_up = 1;
3412 if (current_link_up == 1 &&
3413 tp->link_config.active_duplex == DUPLEX_FULL)
3414 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3417 relink:
3418 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3419 tg3_phy_copper_begin(tp);
3421 tg3_readphy(tp, MII_BMSR, &bmsr);
3422 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3423 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3424 current_link_up = 1;
3427 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3428 if (current_link_up == 1) {
3429 if (tp->link_config.active_speed == SPEED_100 ||
3430 tp->link_config.active_speed == SPEED_10)
3431 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3432 else
3433 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3434 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3435 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3436 else
3437 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3439 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3440 if (tp->link_config.active_duplex == DUPLEX_HALF)
3441 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3444 if (current_link_up == 1 &&
3445 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3446 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3447 else
3448 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3451 /* ??? Without this setting Netgear GA302T PHY does not
3452 * ??? send/receive packets...
3454 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3455 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3456 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3457 tw32_f(MAC_MI_MODE, tp->mi_mode);
3458 udelay(80);
3461 tw32_f(MAC_MODE, tp->mac_mode);
3462 udelay(40);
3464 tg3_phy_eee_adjust(tp, current_link_up);
3466 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3467 /* Polled via timer. */
3468 tw32_f(MAC_EVENT, 0);
3469 } else {
3470 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3472 udelay(40);
3474 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3475 current_link_up == 1 &&
3476 tp->link_config.active_speed == SPEED_1000 &&
3477 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3478 udelay(120);
3479 tw32_f(MAC_STATUS,
3480 (MAC_STATUS_SYNC_CHANGED |
3481 MAC_STATUS_CFG_CHANGED));
3482 udelay(40);
3483 tg3_write_mem(tp,
3484 NIC_SRAM_FIRMWARE_MBOX,
3485 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3488 /* Prevent send BD corruption. */
3489 if (tg3_flag(tp, CLKREQ_BUG)) {
3490 u16 oldlnkctl, newlnkctl;
3492 pci_read_config_word(tp->pdev,
3493 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3494 &oldlnkctl);
3495 if (tp->link_config.active_speed == SPEED_100 ||
3496 tp->link_config.active_speed == SPEED_10)
3497 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3498 else
3499 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3500 if (newlnkctl != oldlnkctl)
3501 pci_write_config_word(tp->pdev,
3502 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3503 newlnkctl);
3506 if (current_link_up != netif_carrier_ok(tp->dev)) {
3507 if (current_link_up)
3508 netif_carrier_on(tp->dev);
3509 else
3510 netif_carrier_off(tp->dev);
3511 tg3_link_report(tp);
3514 return 0;
3517 struct tg3_fiber_aneginfo {
3518 int state;
3519 #define ANEG_STATE_UNKNOWN 0
3520 #define ANEG_STATE_AN_ENABLE 1
3521 #define ANEG_STATE_RESTART_INIT 2
3522 #define ANEG_STATE_RESTART 3
3523 #define ANEG_STATE_DISABLE_LINK_OK 4
3524 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3525 #define ANEG_STATE_ABILITY_DETECT 6
3526 #define ANEG_STATE_ACK_DETECT_INIT 7
3527 #define ANEG_STATE_ACK_DETECT 8
3528 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3529 #define ANEG_STATE_COMPLETE_ACK 10
3530 #define ANEG_STATE_IDLE_DETECT_INIT 11
3531 #define ANEG_STATE_IDLE_DETECT 12
3532 #define ANEG_STATE_LINK_OK 13
3533 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3534 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3536 u32 flags;
3537 #define MR_AN_ENABLE 0x00000001
3538 #define MR_RESTART_AN 0x00000002
3539 #define MR_AN_COMPLETE 0x00000004
3540 #define MR_PAGE_RX 0x00000008
3541 #define MR_NP_LOADED 0x00000010
3542 #define MR_TOGGLE_TX 0x00000020
3543 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3544 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3545 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3546 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3547 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3548 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3549 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3550 #define MR_TOGGLE_RX 0x00002000
3551 #define MR_NP_RX 0x00004000
3553 #define MR_LINK_OK 0x80000000
3555 unsigned long link_time, cur_time;
3557 u32 ability_match_cfg;
3558 int ability_match_count;
3560 char ability_match, idle_match, ack_match;
3562 u32 txconfig, rxconfig;
3563 #define ANEG_CFG_NP 0x00000080
3564 #define ANEG_CFG_ACK 0x00000040
3565 #define ANEG_CFG_RF2 0x00000020
3566 #define ANEG_CFG_RF1 0x00000010
3567 #define ANEG_CFG_PS2 0x00000001
3568 #define ANEG_CFG_PS1 0x00008000
3569 #define ANEG_CFG_HD 0x00004000
3570 #define ANEG_CFG_FD 0x00002000
3571 #define ANEG_CFG_INVAL 0x00001f06
3574 #define ANEG_OK 0
3575 #define ANEG_DONE 1
3576 #define ANEG_TIMER_ENAB 2
3577 #define ANEG_FAILED -1
3579 #define ANEG_STATE_SETTLE_TIME 10000
3581 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3582 struct tg3_fiber_aneginfo *ap)
3584 u16 flowctrl;
3585 unsigned long delta;
3586 u32 rx_cfg_reg;
3587 int ret;
3589 if (ap->state == ANEG_STATE_UNKNOWN) {
3590 ap->rxconfig = 0;
3591 ap->link_time = 0;
3592 ap->cur_time = 0;
3593 ap->ability_match_cfg = 0;
3594 ap->ability_match_count = 0;
3595 ap->ability_match = 0;
3596 ap->idle_match = 0;
3597 ap->ack_match = 0;
3599 ap->cur_time++;
3601 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3602 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3604 if (rx_cfg_reg != ap->ability_match_cfg) {
3605 ap->ability_match_cfg = rx_cfg_reg;
3606 ap->ability_match = 0;
3607 ap->ability_match_count = 0;
3608 } else {
3609 if (++ap->ability_match_count > 1) {
3610 ap->ability_match = 1;
3611 ap->ability_match_cfg = rx_cfg_reg;
3614 if (rx_cfg_reg & ANEG_CFG_ACK)
3615 ap->ack_match = 1;
3616 else
3617 ap->ack_match = 0;
3619 ap->idle_match = 0;
3620 } else {
3621 ap->idle_match = 1;
3622 ap->ability_match_cfg = 0;
3623 ap->ability_match_count = 0;
3624 ap->ability_match = 0;
3625 ap->ack_match = 0;
3627 rx_cfg_reg = 0;
3630 ap->rxconfig = rx_cfg_reg;
3631 ret = ANEG_OK;
3633 switch (ap->state) {
3634 case ANEG_STATE_UNKNOWN:
3635 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3636 ap->state = ANEG_STATE_AN_ENABLE;
3638 /* fallthru */
3639 case ANEG_STATE_AN_ENABLE:
3640 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3641 if (ap->flags & MR_AN_ENABLE) {
3642 ap->link_time = 0;
3643 ap->cur_time = 0;
3644 ap->ability_match_cfg = 0;
3645 ap->ability_match_count = 0;
3646 ap->ability_match = 0;
3647 ap->idle_match = 0;
3648 ap->ack_match = 0;
3650 ap->state = ANEG_STATE_RESTART_INIT;
3651 } else {
3652 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3654 break;
3656 case ANEG_STATE_RESTART_INIT:
3657 ap->link_time = ap->cur_time;
3658 ap->flags &= ~(MR_NP_LOADED);
3659 ap->txconfig = 0;
3660 tw32(MAC_TX_AUTO_NEG, 0);
3661 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3662 tw32_f(MAC_MODE, tp->mac_mode);
3663 udelay(40);
3665 ret = ANEG_TIMER_ENAB;
3666 ap->state = ANEG_STATE_RESTART;
3668 /* fallthru */
3669 case ANEG_STATE_RESTART:
3670 delta = ap->cur_time - ap->link_time;
3671 if (delta > ANEG_STATE_SETTLE_TIME)
3672 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3673 else
3674 ret = ANEG_TIMER_ENAB;
3675 break;
3677 case ANEG_STATE_DISABLE_LINK_OK:
3678 ret = ANEG_DONE;
3679 break;
3681 case ANEG_STATE_ABILITY_DETECT_INIT:
3682 ap->flags &= ~(MR_TOGGLE_TX);
3683 ap->txconfig = ANEG_CFG_FD;
3684 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3685 if (flowctrl & ADVERTISE_1000XPAUSE)
3686 ap->txconfig |= ANEG_CFG_PS1;
3687 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3688 ap->txconfig |= ANEG_CFG_PS2;
3689 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3690 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3691 tw32_f(MAC_MODE, tp->mac_mode);
3692 udelay(40);
3694 ap->state = ANEG_STATE_ABILITY_DETECT;
3695 break;
3697 case ANEG_STATE_ABILITY_DETECT:
3698 if (ap->ability_match != 0 && ap->rxconfig != 0)
3699 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3700 break;
3702 case ANEG_STATE_ACK_DETECT_INIT:
3703 ap->txconfig |= ANEG_CFG_ACK;
3704 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3705 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3706 tw32_f(MAC_MODE, tp->mac_mode);
3707 udelay(40);
3709 ap->state = ANEG_STATE_ACK_DETECT;
3711 /* fallthru */
3712 case ANEG_STATE_ACK_DETECT:
3713 if (ap->ack_match != 0) {
3714 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3715 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3716 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3717 } else {
3718 ap->state = ANEG_STATE_AN_ENABLE;
3720 } else if (ap->ability_match != 0 &&
3721 ap->rxconfig == 0) {
3722 ap->state = ANEG_STATE_AN_ENABLE;
3724 break;
3726 case ANEG_STATE_COMPLETE_ACK_INIT:
3727 if (ap->rxconfig & ANEG_CFG_INVAL) {
3728 ret = ANEG_FAILED;
3729 break;
3731 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3732 MR_LP_ADV_HALF_DUPLEX |
3733 MR_LP_ADV_SYM_PAUSE |
3734 MR_LP_ADV_ASYM_PAUSE |
3735 MR_LP_ADV_REMOTE_FAULT1 |
3736 MR_LP_ADV_REMOTE_FAULT2 |
3737 MR_LP_ADV_NEXT_PAGE |
3738 MR_TOGGLE_RX |
3739 MR_NP_RX);
3740 if (ap->rxconfig & ANEG_CFG_FD)
3741 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3742 if (ap->rxconfig & ANEG_CFG_HD)
3743 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3744 if (ap->rxconfig & ANEG_CFG_PS1)
3745 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3746 if (ap->rxconfig & ANEG_CFG_PS2)
3747 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3748 if (ap->rxconfig & ANEG_CFG_RF1)
3749 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3750 if (ap->rxconfig & ANEG_CFG_RF2)
3751 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3752 if (ap->rxconfig & ANEG_CFG_NP)
3753 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3755 ap->link_time = ap->cur_time;
3757 ap->flags ^= (MR_TOGGLE_TX);
3758 if (ap->rxconfig & 0x0008)
3759 ap->flags |= MR_TOGGLE_RX;
3760 if (ap->rxconfig & ANEG_CFG_NP)
3761 ap->flags |= MR_NP_RX;
3762 ap->flags |= MR_PAGE_RX;
3764 ap->state = ANEG_STATE_COMPLETE_ACK;
3765 ret = ANEG_TIMER_ENAB;
3766 break;
3768 case ANEG_STATE_COMPLETE_ACK:
3769 if (ap->ability_match != 0 &&
3770 ap->rxconfig == 0) {
3771 ap->state = ANEG_STATE_AN_ENABLE;
3772 break;
3774 delta = ap->cur_time - ap->link_time;
3775 if (delta > ANEG_STATE_SETTLE_TIME) {
3776 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3777 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3778 } else {
3779 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3780 !(ap->flags & MR_NP_RX)) {
3781 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3782 } else {
3783 ret = ANEG_FAILED;
3787 break;
3789 case ANEG_STATE_IDLE_DETECT_INIT:
3790 ap->link_time = ap->cur_time;
3791 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3792 tw32_f(MAC_MODE, tp->mac_mode);
3793 udelay(40);
3795 ap->state = ANEG_STATE_IDLE_DETECT;
3796 ret = ANEG_TIMER_ENAB;
3797 break;
3799 case ANEG_STATE_IDLE_DETECT:
3800 if (ap->ability_match != 0 &&
3801 ap->rxconfig == 0) {
3802 ap->state = ANEG_STATE_AN_ENABLE;
3803 break;
3805 delta = ap->cur_time - ap->link_time;
3806 if (delta > ANEG_STATE_SETTLE_TIME) {
3807 /* XXX another gem from the Broadcom driver :( */
3808 ap->state = ANEG_STATE_LINK_OK;
3810 break;
3812 case ANEG_STATE_LINK_OK:
3813 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3814 ret = ANEG_DONE;
3815 break;
3817 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3818 /* ??? unimplemented */
3819 break;
3821 case ANEG_STATE_NEXT_PAGE_WAIT:
3822 /* ??? unimplemented */
3823 break;
3825 default:
3826 ret = ANEG_FAILED;
3827 break;
3830 return ret;
3833 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3835 int res = 0;
3836 struct tg3_fiber_aneginfo aninfo;
3837 int status = ANEG_FAILED;
3838 unsigned int tick;
3839 u32 tmp;
3841 tw32_f(MAC_TX_AUTO_NEG, 0);
3843 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3844 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3845 udelay(40);
3847 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3848 udelay(40);
3850 memset(&aninfo, 0, sizeof(aninfo));
3851 aninfo.flags |= MR_AN_ENABLE;
3852 aninfo.state = ANEG_STATE_UNKNOWN;
3853 aninfo.cur_time = 0;
3854 tick = 0;
3855 while (++tick < 195000) {
3856 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3857 if (status == ANEG_DONE || status == ANEG_FAILED)
3858 break;
3860 udelay(1);
3863 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3864 tw32_f(MAC_MODE, tp->mac_mode);
3865 udelay(40);
3867 *txflags = aninfo.txconfig;
3868 *rxflags = aninfo.flags;
3870 if (status == ANEG_DONE &&
3871 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3872 MR_LP_ADV_FULL_DUPLEX)))
3873 res = 1;
3875 return res;
3878 static void tg3_init_bcm8002(struct tg3 *tp)
3880 u32 mac_status = tr32(MAC_STATUS);
3881 int i;
3883 /* Reset when initting first time or we have a link. */
3884 if (tg3_flag(tp, INIT_COMPLETE) &&
3885 !(mac_status & MAC_STATUS_PCS_SYNCED))
3886 return;
3888 /* Set PLL lock range. */
3889 tg3_writephy(tp, 0x16, 0x8007);
3891 /* SW reset */
3892 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3894 /* Wait for reset to complete. */
3895 /* XXX schedule_timeout() ... */
3896 for (i = 0; i < 500; i++)
3897 udelay(10);
3899 /* Config mode; select PMA/Ch 1 regs. */
3900 tg3_writephy(tp, 0x10, 0x8411);
3902 /* Enable auto-lock and comdet, select txclk for tx. */
3903 tg3_writephy(tp, 0x11, 0x0a10);
3905 tg3_writephy(tp, 0x18, 0x00a0);
3906 tg3_writephy(tp, 0x16, 0x41ff);
3908 /* Assert and deassert POR. */
3909 tg3_writephy(tp, 0x13, 0x0400);
3910 udelay(40);
3911 tg3_writephy(tp, 0x13, 0x0000);
3913 tg3_writephy(tp, 0x11, 0x0a50);
3914 udelay(40);
3915 tg3_writephy(tp, 0x11, 0x0a10);
3917 /* Wait for signal to stabilize */
3918 /* XXX schedule_timeout() ... */
3919 for (i = 0; i < 15000; i++)
3920 udelay(10);
3922 /* Deselect the channel register so we can read the PHYID
3923 * later.
3925 tg3_writephy(tp, 0x10, 0x8011);
3928 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3930 u16 flowctrl;
3931 u32 sg_dig_ctrl, sg_dig_status;
3932 u32 serdes_cfg, expected_sg_dig_ctrl;
3933 int workaround, port_a;
3934 int current_link_up;
3936 serdes_cfg = 0;
3937 expected_sg_dig_ctrl = 0;
3938 workaround = 0;
3939 port_a = 1;
3940 current_link_up = 0;
3942 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3943 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3944 workaround = 1;
3945 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3946 port_a = 0;
3948 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3949 /* preserve bits 20-23 for voltage regulator */
3950 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3953 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3955 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3956 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3957 if (workaround) {
3958 u32 val = serdes_cfg;
3960 if (port_a)
3961 val |= 0xc010000;
3962 else
3963 val |= 0x4010000;
3964 tw32_f(MAC_SERDES_CFG, val);
3967 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3969 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3970 tg3_setup_flow_control(tp, 0, 0);
3971 current_link_up = 1;
3973 goto out;
3976 /* Want auto-negotiation. */
3977 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3979 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3980 if (flowctrl & ADVERTISE_1000XPAUSE)
3981 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3982 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3983 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3985 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3986 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3987 tp->serdes_counter &&
3988 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3989 MAC_STATUS_RCVD_CFG)) ==
3990 MAC_STATUS_PCS_SYNCED)) {
3991 tp->serdes_counter--;
3992 current_link_up = 1;
3993 goto out;
3995 restart_autoneg:
3996 if (workaround)
3997 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3998 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3999 udelay(5);
4000 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4002 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4003 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4004 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4005 MAC_STATUS_SIGNAL_DET)) {
4006 sg_dig_status = tr32(SG_DIG_STATUS);
4007 mac_status = tr32(MAC_STATUS);
4009 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4010 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4011 u32 local_adv = 0, remote_adv = 0;
4013 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4014 local_adv |= ADVERTISE_1000XPAUSE;
4015 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4016 local_adv |= ADVERTISE_1000XPSE_ASYM;
4018 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4019 remote_adv |= LPA_1000XPAUSE;
4020 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4021 remote_adv |= LPA_1000XPAUSE_ASYM;
4023 tg3_setup_flow_control(tp, local_adv, remote_adv);
4024 current_link_up = 1;
4025 tp->serdes_counter = 0;
4026 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4027 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4028 if (tp->serdes_counter)
4029 tp->serdes_counter--;
4030 else {
4031 if (workaround) {
4032 u32 val = serdes_cfg;
4034 if (port_a)
4035 val |= 0xc010000;
4036 else
4037 val |= 0x4010000;
4039 tw32_f(MAC_SERDES_CFG, val);
4042 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4043 udelay(40);
4045 /* Link parallel detection - link is up */
4046 /* only if we have PCS_SYNC and not */
4047 /* receiving config code words */
4048 mac_status = tr32(MAC_STATUS);
4049 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4050 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4051 tg3_setup_flow_control(tp, 0, 0);
4052 current_link_up = 1;
4053 tp->phy_flags |=
4054 TG3_PHYFLG_PARALLEL_DETECT;
4055 tp->serdes_counter =
4056 SERDES_PARALLEL_DET_TIMEOUT;
4057 } else
4058 goto restart_autoneg;
4061 } else {
4062 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4063 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4066 out:
4067 return current_link_up;
4070 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4072 int current_link_up = 0;
4074 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4075 goto out;
4077 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4078 u32 txflags, rxflags;
4079 int i;
4081 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4082 u32 local_adv = 0, remote_adv = 0;
4084 if (txflags & ANEG_CFG_PS1)
4085 local_adv |= ADVERTISE_1000XPAUSE;
4086 if (txflags & ANEG_CFG_PS2)
4087 local_adv |= ADVERTISE_1000XPSE_ASYM;
4089 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4090 remote_adv |= LPA_1000XPAUSE;
4091 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4092 remote_adv |= LPA_1000XPAUSE_ASYM;
4094 tg3_setup_flow_control(tp, local_adv, remote_adv);
4096 current_link_up = 1;
4098 for (i = 0; i < 30; i++) {
4099 udelay(20);
4100 tw32_f(MAC_STATUS,
4101 (MAC_STATUS_SYNC_CHANGED |
4102 MAC_STATUS_CFG_CHANGED));
4103 udelay(40);
4104 if ((tr32(MAC_STATUS) &
4105 (MAC_STATUS_SYNC_CHANGED |
4106 MAC_STATUS_CFG_CHANGED)) == 0)
4107 break;
4110 mac_status = tr32(MAC_STATUS);
4111 if (current_link_up == 0 &&
4112 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4113 !(mac_status & MAC_STATUS_RCVD_CFG))
4114 current_link_up = 1;
4115 } else {
4116 tg3_setup_flow_control(tp, 0, 0);
4118 /* Forcing 1000FD link up. */
4119 current_link_up = 1;
4121 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4122 udelay(40);
4124 tw32_f(MAC_MODE, tp->mac_mode);
4125 udelay(40);
4128 out:
4129 return current_link_up;
4132 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4134 u32 orig_pause_cfg;
4135 u16 orig_active_speed;
4136 u8 orig_active_duplex;
4137 u32 mac_status;
4138 int current_link_up;
4139 int i;
4141 orig_pause_cfg = tp->link_config.active_flowctrl;
4142 orig_active_speed = tp->link_config.active_speed;
4143 orig_active_duplex = tp->link_config.active_duplex;
4145 if (!tg3_flag(tp, HW_AUTONEG) &&
4146 netif_carrier_ok(tp->dev) &&
4147 tg3_flag(tp, INIT_COMPLETE)) {
4148 mac_status = tr32(MAC_STATUS);
4149 mac_status &= (MAC_STATUS_PCS_SYNCED |
4150 MAC_STATUS_SIGNAL_DET |
4151 MAC_STATUS_CFG_CHANGED |
4152 MAC_STATUS_RCVD_CFG);
4153 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4154 MAC_STATUS_SIGNAL_DET)) {
4155 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4156 MAC_STATUS_CFG_CHANGED));
4157 return 0;
4161 tw32_f(MAC_TX_AUTO_NEG, 0);
4163 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4164 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4165 tw32_f(MAC_MODE, tp->mac_mode);
4166 udelay(40);
4168 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4169 tg3_init_bcm8002(tp);
4171 /* Enable link change event even when serdes polling. */
4172 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4173 udelay(40);
4175 current_link_up = 0;
4176 mac_status = tr32(MAC_STATUS);
4178 if (tg3_flag(tp, HW_AUTONEG))
4179 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4180 else
4181 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4183 tp->napi[0].hw_status->status =
4184 (SD_STATUS_UPDATED |
4185 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4187 for (i = 0; i < 100; i++) {
4188 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4189 MAC_STATUS_CFG_CHANGED));
4190 udelay(5);
4191 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4192 MAC_STATUS_CFG_CHANGED |
4193 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4194 break;
4197 mac_status = tr32(MAC_STATUS);
4198 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4199 current_link_up = 0;
4200 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4201 tp->serdes_counter == 0) {
4202 tw32_f(MAC_MODE, (tp->mac_mode |
4203 MAC_MODE_SEND_CONFIGS));
4204 udelay(1);
4205 tw32_f(MAC_MODE, tp->mac_mode);
4209 if (current_link_up == 1) {
4210 tp->link_config.active_speed = SPEED_1000;
4211 tp->link_config.active_duplex = DUPLEX_FULL;
4212 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4213 LED_CTRL_LNKLED_OVERRIDE |
4214 LED_CTRL_1000MBPS_ON));
4215 } else {
4216 tp->link_config.active_speed = SPEED_INVALID;
4217 tp->link_config.active_duplex = DUPLEX_INVALID;
4218 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4219 LED_CTRL_LNKLED_OVERRIDE |
4220 LED_CTRL_TRAFFIC_OVERRIDE));
4223 if (current_link_up != netif_carrier_ok(tp->dev)) {
4224 if (current_link_up)
4225 netif_carrier_on(tp->dev);
4226 else
4227 netif_carrier_off(tp->dev);
4228 tg3_link_report(tp);
4229 } else {
4230 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4231 if (orig_pause_cfg != now_pause_cfg ||
4232 orig_active_speed != tp->link_config.active_speed ||
4233 orig_active_duplex != tp->link_config.active_duplex)
4234 tg3_link_report(tp);
4237 return 0;
4240 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4242 int current_link_up, err = 0;
4243 u32 bmsr, bmcr;
4244 u16 current_speed;
4245 u8 current_duplex;
4246 u32 local_adv, remote_adv;
4248 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4249 tw32_f(MAC_MODE, tp->mac_mode);
4250 udelay(40);
4252 tw32(MAC_EVENT, 0);
4254 tw32_f(MAC_STATUS,
4255 (MAC_STATUS_SYNC_CHANGED |
4256 MAC_STATUS_CFG_CHANGED |
4257 MAC_STATUS_MI_COMPLETION |
4258 MAC_STATUS_LNKSTATE_CHANGED));
4259 udelay(40);
4261 if (force_reset)
4262 tg3_phy_reset(tp);
4264 current_link_up = 0;
4265 current_speed = SPEED_INVALID;
4266 current_duplex = DUPLEX_INVALID;
4268 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4269 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4271 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4272 bmsr |= BMSR_LSTATUS;
4273 else
4274 bmsr &= ~BMSR_LSTATUS;
4277 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4279 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4280 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4281 /* do nothing, just check for link up at the end */
4282 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4283 u32 adv, new_adv;
4285 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4286 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4287 ADVERTISE_1000XPAUSE |
4288 ADVERTISE_1000XPSE_ASYM |
4289 ADVERTISE_SLCT);
4291 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4293 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4294 new_adv |= ADVERTISE_1000XHALF;
4295 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4296 new_adv |= ADVERTISE_1000XFULL;
4298 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4299 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4300 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4301 tg3_writephy(tp, MII_BMCR, bmcr);
4303 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4304 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4305 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4307 return err;
4309 } else {
4310 u32 new_bmcr;
4312 bmcr &= ~BMCR_SPEED1000;
4313 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4315 if (tp->link_config.duplex == DUPLEX_FULL)
4316 new_bmcr |= BMCR_FULLDPLX;
4318 if (new_bmcr != bmcr) {
4319 /* BMCR_SPEED1000 is a reserved bit that needs
4320 * to be set on write.
4322 new_bmcr |= BMCR_SPEED1000;
4324 /* Force a linkdown */
4325 if (netif_carrier_ok(tp->dev)) {
4326 u32 adv;
4328 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4329 adv &= ~(ADVERTISE_1000XFULL |
4330 ADVERTISE_1000XHALF |
4331 ADVERTISE_SLCT);
4332 tg3_writephy(tp, MII_ADVERTISE, adv);
4333 tg3_writephy(tp, MII_BMCR, bmcr |
4334 BMCR_ANRESTART |
4335 BMCR_ANENABLE);
4336 udelay(10);
4337 netif_carrier_off(tp->dev);
4339 tg3_writephy(tp, MII_BMCR, new_bmcr);
4340 bmcr = new_bmcr;
4341 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4342 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4343 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4344 ASIC_REV_5714) {
4345 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4346 bmsr |= BMSR_LSTATUS;
4347 else
4348 bmsr &= ~BMSR_LSTATUS;
4350 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4354 if (bmsr & BMSR_LSTATUS) {
4355 current_speed = SPEED_1000;
4356 current_link_up = 1;
4357 if (bmcr & BMCR_FULLDPLX)
4358 current_duplex = DUPLEX_FULL;
4359 else
4360 current_duplex = DUPLEX_HALF;
4362 local_adv = 0;
4363 remote_adv = 0;
4365 if (bmcr & BMCR_ANENABLE) {
4366 u32 common;
4368 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4369 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4370 common = local_adv & remote_adv;
4371 if (common & (ADVERTISE_1000XHALF |
4372 ADVERTISE_1000XFULL)) {
4373 if (common & ADVERTISE_1000XFULL)
4374 current_duplex = DUPLEX_FULL;
4375 else
4376 current_duplex = DUPLEX_HALF;
4377 } else if (!tg3_flag(tp, 5780_CLASS)) {
4378 /* Link is up via parallel detect */
4379 } else {
4380 current_link_up = 0;
4385 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4386 tg3_setup_flow_control(tp, local_adv, remote_adv);
4388 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4389 if (tp->link_config.active_duplex == DUPLEX_HALF)
4390 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4392 tw32_f(MAC_MODE, tp->mac_mode);
4393 udelay(40);
4395 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4397 tp->link_config.active_speed = current_speed;
4398 tp->link_config.active_duplex = current_duplex;
4400 if (current_link_up != netif_carrier_ok(tp->dev)) {
4401 if (current_link_up)
4402 netif_carrier_on(tp->dev);
4403 else {
4404 netif_carrier_off(tp->dev);
4405 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4407 tg3_link_report(tp);
4409 return err;
4412 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4414 if (tp->serdes_counter) {
4415 /* Give autoneg time to complete. */
4416 tp->serdes_counter--;
4417 return;
4420 if (!netif_carrier_ok(tp->dev) &&
4421 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4422 u32 bmcr;
4424 tg3_readphy(tp, MII_BMCR, &bmcr);
4425 if (bmcr & BMCR_ANENABLE) {
4426 u32 phy1, phy2;
4428 /* Select shadow register 0x1f */
4429 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4430 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4432 /* Select expansion interrupt status register */
4433 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4434 MII_TG3_DSP_EXP1_INT_STAT);
4435 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4436 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4438 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4439 /* We have signal detect and not receiving
4440 * config code words, link is up by parallel
4441 * detection.
4444 bmcr &= ~BMCR_ANENABLE;
4445 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4446 tg3_writephy(tp, MII_BMCR, bmcr);
4447 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4450 } else if (netif_carrier_ok(tp->dev) &&
4451 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4452 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4453 u32 phy2;
4455 /* Select expansion interrupt status register */
4456 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4457 MII_TG3_DSP_EXP1_INT_STAT);
4458 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4459 if (phy2 & 0x20) {
4460 u32 bmcr;
4462 /* Config code words received, turn on autoneg. */
4463 tg3_readphy(tp, MII_BMCR, &bmcr);
4464 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4466 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4472 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4474 u32 val;
4475 int err;
4477 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4478 err = tg3_setup_fiber_phy(tp, force_reset);
4479 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4480 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4481 else
4482 err = tg3_setup_copper_phy(tp, force_reset);
4484 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4485 u32 scale;
4487 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4488 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4489 scale = 65;
4490 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4491 scale = 6;
4492 else
4493 scale = 12;
4495 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4496 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4497 tw32(GRC_MISC_CFG, val);
4500 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4501 (6 << TX_LENGTHS_IPG_SHIFT);
4502 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4503 val |= tr32(MAC_TX_LENGTHS) &
4504 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4505 TX_LENGTHS_CNT_DWN_VAL_MSK);
4507 if (tp->link_config.active_speed == SPEED_1000 &&
4508 tp->link_config.active_duplex == DUPLEX_HALF)
4509 tw32(MAC_TX_LENGTHS, val |
4510 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4511 else
4512 tw32(MAC_TX_LENGTHS, val |
4513 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4515 if (!tg3_flag(tp, 5705_PLUS)) {
4516 if (netif_carrier_ok(tp->dev)) {
4517 tw32(HOSTCC_STAT_COAL_TICKS,
4518 tp->coal.stats_block_coalesce_usecs);
4519 } else {
4520 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4524 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4525 val = tr32(PCIE_PWR_MGMT_THRESH);
4526 if (!netif_carrier_ok(tp->dev))
4527 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4528 tp->pwrmgmt_thresh;
4529 else
4530 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4531 tw32(PCIE_PWR_MGMT_THRESH, val);
4534 return err;
4537 static inline int tg3_irq_sync(struct tg3 *tp)
4539 return tp->irq_sync;
4542 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4544 int i;
4546 dst = (u32 *)((u8 *)dst + off);
4547 for (i = 0; i < len; i += sizeof(u32))
4548 *dst++ = tr32(off + i);
4551 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4553 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4554 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4555 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4556 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4557 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4558 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4559 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4560 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4561 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4562 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4563 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4564 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4565 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4566 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4567 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4568 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4569 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4570 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4571 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4573 if (tg3_flag(tp, SUPPORT_MSIX))
4574 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4576 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4577 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4578 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4579 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4580 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4581 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4582 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4583 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4585 if (!tg3_flag(tp, 5705_PLUS)) {
4586 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4587 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4588 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4591 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4592 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4593 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4594 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4595 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4597 if (tg3_flag(tp, NVRAM))
4598 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4601 static void tg3_dump_state(struct tg3 *tp)
4603 int i;
4604 u32 *regs;
4606 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4607 if (!regs) {
4608 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4609 return;
4612 if (tg3_flag(tp, PCI_EXPRESS)) {
4613 /* Read up to but not including private PCI registers */
4614 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4615 regs[i / sizeof(u32)] = tr32(i);
4616 } else
4617 tg3_dump_legacy_regs(tp, regs);
4619 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4620 if (!regs[i + 0] && !regs[i + 1] &&
4621 !regs[i + 2] && !regs[i + 3])
4622 continue;
4624 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4625 i * 4,
4626 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4629 kfree(regs);
4631 for (i = 0; i < tp->irq_cnt; i++) {
4632 struct tg3_napi *tnapi = &tp->napi[i];
4634 /* SW status block */
4635 netdev_err(tp->dev,
4636 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4638 tnapi->hw_status->status,
4639 tnapi->hw_status->status_tag,
4640 tnapi->hw_status->rx_jumbo_consumer,
4641 tnapi->hw_status->rx_consumer,
4642 tnapi->hw_status->rx_mini_consumer,
4643 tnapi->hw_status->idx[0].rx_producer,
4644 tnapi->hw_status->idx[0].tx_consumer);
4646 netdev_err(tp->dev,
4647 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4649 tnapi->last_tag, tnapi->last_irq_tag,
4650 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4651 tnapi->rx_rcb_ptr,
4652 tnapi->prodring.rx_std_prod_idx,
4653 tnapi->prodring.rx_std_cons_idx,
4654 tnapi->prodring.rx_jmb_prod_idx,
4655 tnapi->prodring.rx_jmb_cons_idx);
4659 /* This is called whenever we suspect that the system chipset is re-
4660 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4661 * is bogus tx completions. We try to recover by setting the
4662 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4663 * in the workqueue.
4665 static void tg3_tx_recover(struct tg3 *tp)
4667 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4668 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4670 netdev_warn(tp->dev,
4671 "The system may be re-ordering memory-mapped I/O "
4672 "cycles to the network device, attempting to recover. "
4673 "Please report the problem to the driver maintainer "
4674 "and include system chipset information.\n");
4676 spin_lock(&tp->lock);
4677 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4678 spin_unlock(&tp->lock);
4681 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4683 /* Tell compiler to fetch tx indices from memory. */
4684 barrier();
4685 return tnapi->tx_pending -
4686 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4689 /* Tigon3 never reports partial packet sends. So we do not
4690 * need special logic to handle SKBs that have not had all
4691 * of their frags sent yet, like SunGEM does.
4693 static void tg3_tx(struct tg3_napi *tnapi)
4695 struct tg3 *tp = tnapi->tp;
4696 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4697 u32 sw_idx = tnapi->tx_cons;
4698 struct netdev_queue *txq;
4699 int index = tnapi - tp->napi;
4701 if (tg3_flag(tp, ENABLE_TSS))
4702 index--;
4704 txq = netdev_get_tx_queue(tp->dev, index);
4706 while (sw_idx != hw_idx) {
4707 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4708 struct sk_buff *skb = ri->skb;
4709 int i, tx_bug = 0;
4711 if (unlikely(skb == NULL)) {
4712 tg3_tx_recover(tp);
4713 return;
4716 pci_unmap_single(tp->pdev,
4717 dma_unmap_addr(ri, mapping),
4718 skb_headlen(skb),
4719 PCI_DMA_TODEVICE);
4721 ri->skb = NULL;
4723 sw_idx = NEXT_TX(sw_idx);
4725 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4726 ri = &tnapi->tx_buffers[sw_idx];
4727 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4728 tx_bug = 1;
4730 pci_unmap_page(tp->pdev,
4731 dma_unmap_addr(ri, mapping),
4732 skb_shinfo(skb)->frags[i].size,
4733 PCI_DMA_TODEVICE);
4734 sw_idx = NEXT_TX(sw_idx);
4737 dev_kfree_skb(skb);
4739 if (unlikely(tx_bug)) {
4740 tg3_tx_recover(tp);
4741 return;
4745 tnapi->tx_cons = sw_idx;
4747 /* Need to make the tx_cons update visible to tg3_start_xmit()
4748 * before checking for netif_queue_stopped(). Without the
4749 * memory barrier, there is a small possibility that tg3_start_xmit()
4750 * will miss it and cause the queue to be stopped forever.
4752 smp_mb();
4754 if (unlikely(netif_tx_queue_stopped(txq) &&
4755 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4756 __netif_tx_lock(txq, smp_processor_id());
4757 if (netif_tx_queue_stopped(txq) &&
4758 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4759 netif_tx_wake_queue(txq);
4760 __netif_tx_unlock(txq);
4764 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4766 if (!ri->skb)
4767 return;
4769 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4770 map_sz, PCI_DMA_FROMDEVICE);
4771 dev_kfree_skb_any(ri->skb);
4772 ri->skb = NULL;
4775 /* Returns size of skb allocated or < 0 on error.
4777 * We only need to fill in the address because the other members
4778 * of the RX descriptor are invariant, see tg3_init_rings.
4780 * Note the purposeful assymetry of cpu vs. chip accesses. For
4781 * posting buffers we only dirty the first cache line of the RX
4782 * descriptor (containing the address). Whereas for the RX status
4783 * buffers the cpu only reads the last cacheline of the RX descriptor
4784 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4786 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4787 u32 opaque_key, u32 dest_idx_unmasked)
4789 struct tg3_rx_buffer_desc *desc;
4790 struct ring_info *map;
4791 struct sk_buff *skb;
4792 dma_addr_t mapping;
4793 int skb_size, dest_idx;
4795 switch (opaque_key) {
4796 case RXD_OPAQUE_RING_STD:
4797 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4798 desc = &tpr->rx_std[dest_idx];
4799 map = &tpr->rx_std_buffers[dest_idx];
4800 skb_size = tp->rx_pkt_map_sz;
4801 break;
4803 case RXD_OPAQUE_RING_JUMBO:
4804 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4805 desc = &tpr->rx_jmb[dest_idx].std;
4806 map = &tpr->rx_jmb_buffers[dest_idx];
4807 skb_size = TG3_RX_JMB_MAP_SZ;
4808 break;
4810 default:
4811 return -EINVAL;
4814 /* Do not overwrite any of the map or rp information
4815 * until we are sure we can commit to a new buffer.
4817 * Callers depend upon this behavior and assume that
4818 * we leave everything unchanged if we fail.
4820 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4821 if (skb == NULL)
4822 return -ENOMEM;
4824 skb_reserve(skb, tp->rx_offset);
4826 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4827 PCI_DMA_FROMDEVICE);
4828 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4829 dev_kfree_skb(skb);
4830 return -EIO;
4833 map->skb = skb;
4834 dma_unmap_addr_set(map, mapping, mapping);
4836 desc->addr_hi = ((u64)mapping >> 32);
4837 desc->addr_lo = ((u64)mapping & 0xffffffff);
4839 return skb_size;
4842 /* We only need to move over in the address because the other
4843 * members of the RX descriptor are invariant. See notes above
4844 * tg3_alloc_rx_skb for full details.
4846 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4847 struct tg3_rx_prodring_set *dpr,
4848 u32 opaque_key, int src_idx,
4849 u32 dest_idx_unmasked)
4851 struct tg3 *tp = tnapi->tp;
4852 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4853 struct ring_info *src_map, *dest_map;
4854 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4855 int dest_idx;
4857 switch (opaque_key) {
4858 case RXD_OPAQUE_RING_STD:
4859 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4860 dest_desc = &dpr->rx_std[dest_idx];
4861 dest_map = &dpr->rx_std_buffers[dest_idx];
4862 src_desc = &spr->rx_std[src_idx];
4863 src_map = &spr->rx_std_buffers[src_idx];
4864 break;
4866 case RXD_OPAQUE_RING_JUMBO:
4867 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4868 dest_desc = &dpr->rx_jmb[dest_idx].std;
4869 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4870 src_desc = &spr->rx_jmb[src_idx].std;
4871 src_map = &spr->rx_jmb_buffers[src_idx];
4872 break;
4874 default:
4875 return;
4878 dest_map->skb = src_map->skb;
4879 dma_unmap_addr_set(dest_map, mapping,
4880 dma_unmap_addr(src_map, mapping));
4881 dest_desc->addr_hi = src_desc->addr_hi;
4882 dest_desc->addr_lo = src_desc->addr_lo;
4884 /* Ensure that the update to the skb happens after the physical
4885 * addresses have been transferred to the new BD location.
4887 smp_wmb();
4889 src_map->skb = NULL;
4892 /* The RX ring scheme is composed of multiple rings which post fresh
4893 * buffers to the chip, and one special ring the chip uses to report
4894 * status back to the host.
4896 * The special ring reports the status of received packets to the
4897 * host. The chip does not write into the original descriptor the
4898 * RX buffer was obtained from. The chip simply takes the original
4899 * descriptor as provided by the host, updates the status and length
4900 * field, then writes this into the next status ring entry.
4902 * Each ring the host uses to post buffers to the chip is described
4903 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4904 * it is first placed into the on-chip ram. When the packet's length
4905 * is known, it walks down the TG3_BDINFO entries to select the ring.
4906 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4907 * which is within the range of the new packet's length is chosen.
4909 * The "separate ring for rx status" scheme may sound queer, but it makes
4910 * sense from a cache coherency perspective. If only the host writes
4911 * to the buffer post rings, and only the chip writes to the rx status
4912 * rings, then cache lines never move beyond shared-modified state.
4913 * If both the host and chip were to write into the same ring, cache line
4914 * eviction could occur since both entities want it in an exclusive state.
4916 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4918 struct tg3 *tp = tnapi->tp;
4919 u32 work_mask, rx_std_posted = 0;
4920 u32 std_prod_idx, jmb_prod_idx;
4921 u32 sw_idx = tnapi->rx_rcb_ptr;
4922 u16 hw_idx;
4923 int received;
4924 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4926 hw_idx = *(tnapi->rx_rcb_prod_idx);
4928 * We need to order the read of hw_idx and the read of
4929 * the opaque cookie.
4931 rmb();
4932 work_mask = 0;
4933 received = 0;
4934 std_prod_idx = tpr->rx_std_prod_idx;
4935 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4936 while (sw_idx != hw_idx && budget > 0) {
4937 struct ring_info *ri;
4938 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4939 unsigned int len;
4940 struct sk_buff *skb;
4941 dma_addr_t dma_addr;
4942 u32 opaque_key, desc_idx, *post_ptr;
4944 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4945 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4946 if (opaque_key == RXD_OPAQUE_RING_STD) {
4947 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4948 dma_addr = dma_unmap_addr(ri, mapping);
4949 skb = ri->skb;
4950 post_ptr = &std_prod_idx;
4951 rx_std_posted++;
4952 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4953 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4954 dma_addr = dma_unmap_addr(ri, mapping);
4955 skb = ri->skb;
4956 post_ptr = &jmb_prod_idx;
4957 } else
4958 goto next_pkt_nopost;
4960 work_mask |= opaque_key;
4962 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4963 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4964 drop_it:
4965 tg3_recycle_rx(tnapi, tpr, opaque_key,
4966 desc_idx, *post_ptr);
4967 drop_it_no_recycle:
4968 /* Other statistics kept track of by card. */
4969 tp->rx_dropped++;
4970 goto next_pkt;
4973 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4974 ETH_FCS_LEN;
4976 if (len > TG3_RX_COPY_THRESH(tp)) {
4977 int skb_size;
4979 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4980 *post_ptr);
4981 if (skb_size < 0)
4982 goto drop_it;
4984 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4985 PCI_DMA_FROMDEVICE);
4987 /* Ensure that the update to the skb happens
4988 * after the usage of the old DMA mapping.
4990 smp_wmb();
4992 ri->skb = NULL;
4994 skb_put(skb, len);
4995 } else {
4996 struct sk_buff *copy_skb;
4998 tg3_recycle_rx(tnapi, tpr, opaque_key,
4999 desc_idx, *post_ptr);
5001 copy_skb = netdev_alloc_skb(tp->dev, len +
5002 TG3_RAW_IP_ALIGN);
5003 if (copy_skb == NULL)
5004 goto drop_it_no_recycle;
5006 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5007 skb_put(copy_skb, len);
5008 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5009 skb_copy_from_linear_data(skb, copy_skb->data, len);
5010 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5012 /* We'll reuse the original ring buffer. */
5013 skb = copy_skb;
5016 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5017 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5018 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5019 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5020 skb->ip_summed = CHECKSUM_UNNECESSARY;
5021 else
5022 skb_checksum_none_assert(skb);
5024 skb->protocol = eth_type_trans(skb, tp->dev);
5026 if (len > (tp->dev->mtu + ETH_HLEN) &&
5027 skb->protocol != htons(ETH_P_8021Q)) {
5028 dev_kfree_skb(skb);
5029 goto drop_it_no_recycle;
5032 if (desc->type_flags & RXD_FLAG_VLAN &&
5033 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5034 __vlan_hwaccel_put_tag(skb,
5035 desc->err_vlan & RXD_VLAN_MASK);
5037 napi_gro_receive(&tnapi->napi, skb);
5039 received++;
5040 budget--;
5042 next_pkt:
5043 (*post_ptr)++;
5045 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5046 tpr->rx_std_prod_idx = std_prod_idx &
5047 tp->rx_std_ring_mask;
5048 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5049 tpr->rx_std_prod_idx);
5050 work_mask &= ~RXD_OPAQUE_RING_STD;
5051 rx_std_posted = 0;
5053 next_pkt_nopost:
5054 sw_idx++;
5055 sw_idx &= tp->rx_ret_ring_mask;
5057 /* Refresh hw_idx to see if there is new work */
5058 if (sw_idx == hw_idx) {
5059 hw_idx = *(tnapi->rx_rcb_prod_idx);
5060 rmb();
5064 /* ACK the status ring. */
5065 tnapi->rx_rcb_ptr = sw_idx;
5066 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5068 /* Refill RX ring(s). */
5069 if (!tg3_flag(tp, ENABLE_RSS)) {
5070 if (work_mask & RXD_OPAQUE_RING_STD) {
5071 tpr->rx_std_prod_idx = std_prod_idx &
5072 tp->rx_std_ring_mask;
5073 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5074 tpr->rx_std_prod_idx);
5076 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5077 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5078 tp->rx_jmb_ring_mask;
5079 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5080 tpr->rx_jmb_prod_idx);
5082 mmiowb();
5083 } else if (work_mask) {
5084 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5085 * updated before the producer indices can be updated.
5087 smp_wmb();
5089 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5090 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5092 if (tnapi != &tp->napi[1])
5093 napi_schedule(&tp->napi[1].napi);
5096 return received;
5099 static void tg3_poll_link(struct tg3 *tp)
5101 /* handle link change and other phy events */
5102 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5103 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5105 if (sblk->status & SD_STATUS_LINK_CHG) {
5106 sblk->status = SD_STATUS_UPDATED |
5107 (sblk->status & ~SD_STATUS_LINK_CHG);
5108 spin_lock(&tp->lock);
5109 if (tg3_flag(tp, USE_PHYLIB)) {
5110 tw32_f(MAC_STATUS,
5111 (MAC_STATUS_SYNC_CHANGED |
5112 MAC_STATUS_CFG_CHANGED |
5113 MAC_STATUS_MI_COMPLETION |
5114 MAC_STATUS_LNKSTATE_CHANGED));
5115 udelay(40);
5116 } else
5117 tg3_setup_phy(tp, 0);
5118 spin_unlock(&tp->lock);
5123 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5124 struct tg3_rx_prodring_set *dpr,
5125 struct tg3_rx_prodring_set *spr)
5127 u32 si, di, cpycnt, src_prod_idx;
5128 int i, err = 0;
5130 while (1) {
5131 src_prod_idx = spr->rx_std_prod_idx;
5133 /* Make sure updates to the rx_std_buffers[] entries and the
5134 * standard producer index are seen in the correct order.
5136 smp_rmb();
5138 if (spr->rx_std_cons_idx == src_prod_idx)
5139 break;
5141 if (spr->rx_std_cons_idx < src_prod_idx)
5142 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5143 else
5144 cpycnt = tp->rx_std_ring_mask + 1 -
5145 spr->rx_std_cons_idx;
5147 cpycnt = min(cpycnt,
5148 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5150 si = spr->rx_std_cons_idx;
5151 di = dpr->rx_std_prod_idx;
5153 for (i = di; i < di + cpycnt; i++) {
5154 if (dpr->rx_std_buffers[i].skb) {
5155 cpycnt = i - di;
5156 err = -ENOSPC;
5157 break;
5161 if (!cpycnt)
5162 break;
5164 /* Ensure that updates to the rx_std_buffers ring and the
5165 * shadowed hardware producer ring from tg3_recycle_skb() are
5166 * ordered correctly WRT the skb check above.
5168 smp_rmb();
5170 memcpy(&dpr->rx_std_buffers[di],
5171 &spr->rx_std_buffers[si],
5172 cpycnt * sizeof(struct ring_info));
5174 for (i = 0; i < cpycnt; i++, di++, si++) {
5175 struct tg3_rx_buffer_desc *sbd, *dbd;
5176 sbd = &spr->rx_std[si];
5177 dbd = &dpr->rx_std[di];
5178 dbd->addr_hi = sbd->addr_hi;
5179 dbd->addr_lo = sbd->addr_lo;
5182 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5183 tp->rx_std_ring_mask;
5184 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5185 tp->rx_std_ring_mask;
5188 while (1) {
5189 src_prod_idx = spr->rx_jmb_prod_idx;
5191 /* Make sure updates to the rx_jmb_buffers[] entries and
5192 * the jumbo producer index are seen in the correct order.
5194 smp_rmb();
5196 if (spr->rx_jmb_cons_idx == src_prod_idx)
5197 break;
5199 if (spr->rx_jmb_cons_idx < src_prod_idx)
5200 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5201 else
5202 cpycnt = tp->rx_jmb_ring_mask + 1 -
5203 spr->rx_jmb_cons_idx;
5205 cpycnt = min(cpycnt,
5206 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5208 si = spr->rx_jmb_cons_idx;
5209 di = dpr->rx_jmb_prod_idx;
5211 for (i = di; i < di + cpycnt; i++) {
5212 if (dpr->rx_jmb_buffers[i].skb) {
5213 cpycnt = i - di;
5214 err = -ENOSPC;
5215 break;
5219 if (!cpycnt)
5220 break;
5222 /* Ensure that updates to the rx_jmb_buffers ring and the
5223 * shadowed hardware producer ring from tg3_recycle_skb() are
5224 * ordered correctly WRT the skb check above.
5226 smp_rmb();
5228 memcpy(&dpr->rx_jmb_buffers[di],
5229 &spr->rx_jmb_buffers[si],
5230 cpycnt * sizeof(struct ring_info));
5232 for (i = 0; i < cpycnt; i++, di++, si++) {
5233 struct tg3_rx_buffer_desc *sbd, *dbd;
5234 sbd = &spr->rx_jmb[si].std;
5235 dbd = &dpr->rx_jmb[di].std;
5236 dbd->addr_hi = sbd->addr_hi;
5237 dbd->addr_lo = sbd->addr_lo;
5240 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5241 tp->rx_jmb_ring_mask;
5242 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5243 tp->rx_jmb_ring_mask;
5246 return err;
5249 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5251 struct tg3 *tp = tnapi->tp;
5253 /* run TX completion thread */
5254 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5255 tg3_tx(tnapi);
5256 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5257 return work_done;
5260 /* run RX thread, within the bounds set by NAPI.
5261 * All RX "locking" is done by ensuring outside
5262 * code synchronizes with tg3->napi.poll()
5264 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5265 work_done += tg3_rx(tnapi, budget - work_done);
5267 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5268 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5269 int i, err = 0;
5270 u32 std_prod_idx = dpr->rx_std_prod_idx;
5271 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5273 for (i = 1; i < tp->irq_cnt; i++)
5274 err |= tg3_rx_prodring_xfer(tp, dpr,
5275 &tp->napi[i].prodring);
5277 wmb();
5279 if (std_prod_idx != dpr->rx_std_prod_idx)
5280 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5281 dpr->rx_std_prod_idx);
5283 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5284 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5285 dpr->rx_jmb_prod_idx);
5287 mmiowb();
5289 if (err)
5290 tw32_f(HOSTCC_MODE, tp->coal_now);
5293 return work_done;
5296 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5298 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5299 struct tg3 *tp = tnapi->tp;
5300 int work_done = 0;
5301 struct tg3_hw_status *sblk = tnapi->hw_status;
5303 while (1) {
5304 work_done = tg3_poll_work(tnapi, work_done, budget);
5306 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5307 goto tx_recovery;
5309 if (unlikely(work_done >= budget))
5310 break;
5312 /* tp->last_tag is used in tg3_int_reenable() below
5313 * to tell the hw how much work has been processed,
5314 * so we must read it before checking for more work.
5316 tnapi->last_tag = sblk->status_tag;
5317 tnapi->last_irq_tag = tnapi->last_tag;
5318 rmb();
5320 /* check for RX/TX work to do */
5321 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5322 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5323 napi_complete(napi);
5324 /* Reenable interrupts. */
5325 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5326 mmiowb();
5327 break;
5331 return work_done;
5333 tx_recovery:
5334 /* work_done is guaranteed to be less than budget. */
5335 napi_complete(napi);
5336 schedule_work(&tp->reset_task);
5337 return work_done;
5340 static void tg3_process_error(struct tg3 *tp)
5342 u32 val;
5343 bool real_error = false;
5345 if (tg3_flag(tp, ERROR_PROCESSED))
5346 return;
5348 /* Check Flow Attention register */
5349 val = tr32(HOSTCC_FLOW_ATTN);
5350 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5351 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5352 real_error = true;
5355 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5356 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5357 real_error = true;
5360 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5361 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5362 real_error = true;
5365 if (!real_error)
5366 return;
5368 tg3_dump_state(tp);
5370 tg3_flag_set(tp, ERROR_PROCESSED);
5371 schedule_work(&tp->reset_task);
5374 static int tg3_poll(struct napi_struct *napi, int budget)
5376 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5377 struct tg3 *tp = tnapi->tp;
5378 int work_done = 0;
5379 struct tg3_hw_status *sblk = tnapi->hw_status;
5381 while (1) {
5382 if (sblk->status & SD_STATUS_ERROR)
5383 tg3_process_error(tp);
5385 tg3_poll_link(tp);
5387 work_done = tg3_poll_work(tnapi, work_done, budget);
5389 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5390 goto tx_recovery;
5392 if (unlikely(work_done >= budget))
5393 break;
5395 if (tg3_flag(tp, TAGGED_STATUS)) {
5396 /* tp->last_tag is used in tg3_int_reenable() below
5397 * to tell the hw how much work has been processed,
5398 * so we must read it before checking for more work.
5400 tnapi->last_tag = sblk->status_tag;
5401 tnapi->last_irq_tag = tnapi->last_tag;
5402 rmb();
5403 } else
5404 sblk->status &= ~SD_STATUS_UPDATED;
5406 if (likely(!tg3_has_work(tnapi))) {
5407 napi_complete(napi);
5408 tg3_int_reenable(tnapi);
5409 break;
5413 return work_done;
5415 tx_recovery:
5416 /* work_done is guaranteed to be less than budget. */
5417 napi_complete(napi);
5418 schedule_work(&tp->reset_task);
5419 return work_done;
5422 static void tg3_napi_disable(struct tg3 *tp)
5424 int i;
5426 for (i = tp->irq_cnt - 1; i >= 0; i--)
5427 napi_disable(&tp->napi[i].napi);
5430 static void tg3_napi_enable(struct tg3 *tp)
5432 int i;
5434 for (i = 0; i < tp->irq_cnt; i++)
5435 napi_enable(&tp->napi[i].napi);
5438 static void tg3_napi_init(struct tg3 *tp)
5440 int i;
5442 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5443 for (i = 1; i < tp->irq_cnt; i++)
5444 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5447 static void tg3_napi_fini(struct tg3 *tp)
5449 int i;
5451 for (i = 0; i < tp->irq_cnt; i++)
5452 netif_napi_del(&tp->napi[i].napi);
5455 static inline void tg3_netif_stop(struct tg3 *tp)
5457 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5458 tg3_napi_disable(tp);
5459 netif_tx_disable(tp->dev);
5462 static inline void tg3_netif_start(struct tg3 *tp)
5464 /* NOTE: unconditional netif_tx_wake_all_queues is only
5465 * appropriate so long as all callers are assured to
5466 * have free tx slots (such as after tg3_init_hw)
5468 netif_tx_wake_all_queues(tp->dev);
5470 tg3_napi_enable(tp);
5471 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5472 tg3_enable_ints(tp);
5475 static void tg3_irq_quiesce(struct tg3 *tp)
5477 int i;
5479 BUG_ON(tp->irq_sync);
5481 tp->irq_sync = 1;
5482 smp_mb();
5484 for (i = 0; i < tp->irq_cnt; i++)
5485 synchronize_irq(tp->napi[i].irq_vec);
5488 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5489 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5490 * with as well. Most of the time, this is not necessary except when
5491 * shutting down the device.
5493 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5495 spin_lock_bh(&tp->lock);
5496 if (irq_sync)
5497 tg3_irq_quiesce(tp);
5500 static inline void tg3_full_unlock(struct tg3 *tp)
5502 spin_unlock_bh(&tp->lock);
5505 /* One-shot MSI handler - Chip automatically disables interrupt
5506 * after sending MSI so driver doesn't have to do it.
5508 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5510 struct tg3_napi *tnapi = dev_id;
5511 struct tg3 *tp = tnapi->tp;
5513 prefetch(tnapi->hw_status);
5514 if (tnapi->rx_rcb)
5515 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5517 if (likely(!tg3_irq_sync(tp)))
5518 napi_schedule(&tnapi->napi);
5520 return IRQ_HANDLED;
5523 /* MSI ISR - No need to check for interrupt sharing and no need to
5524 * flush status block and interrupt mailbox. PCI ordering rules
5525 * guarantee that MSI will arrive after the status block.
5527 static irqreturn_t tg3_msi(int irq, void *dev_id)
5529 struct tg3_napi *tnapi = dev_id;
5530 struct tg3 *tp = tnapi->tp;
5532 prefetch(tnapi->hw_status);
5533 if (tnapi->rx_rcb)
5534 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5536 * Writing any value to intr-mbox-0 clears PCI INTA# and
5537 * chip-internal interrupt pending events.
5538 * Writing non-zero to intr-mbox-0 additional tells the
5539 * NIC to stop sending us irqs, engaging "in-intr-handler"
5540 * event coalescing.
5542 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5543 if (likely(!tg3_irq_sync(tp)))
5544 napi_schedule(&tnapi->napi);
5546 return IRQ_RETVAL(1);
5549 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5551 struct tg3_napi *tnapi = dev_id;
5552 struct tg3 *tp = tnapi->tp;
5553 struct tg3_hw_status *sblk = tnapi->hw_status;
5554 unsigned int handled = 1;
5556 /* In INTx mode, it is possible for the interrupt to arrive at
5557 * the CPU before the status block posted prior to the interrupt.
5558 * Reading the PCI State register will confirm whether the
5559 * interrupt is ours and will flush the status block.
5561 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5562 if (tg3_flag(tp, CHIP_RESETTING) ||
5563 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5564 handled = 0;
5565 goto out;
5570 * Writing any value to intr-mbox-0 clears PCI INTA# and
5571 * chip-internal interrupt pending events.
5572 * Writing non-zero to intr-mbox-0 additional tells the
5573 * NIC to stop sending us irqs, engaging "in-intr-handler"
5574 * event coalescing.
5576 * Flush the mailbox to de-assert the IRQ immediately to prevent
5577 * spurious interrupts. The flush impacts performance but
5578 * excessive spurious interrupts can be worse in some cases.
5580 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5581 if (tg3_irq_sync(tp))
5582 goto out;
5583 sblk->status &= ~SD_STATUS_UPDATED;
5584 if (likely(tg3_has_work(tnapi))) {
5585 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5586 napi_schedule(&tnapi->napi);
5587 } else {
5588 /* No work, shared interrupt perhaps? re-enable
5589 * interrupts, and flush that PCI write
5591 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5592 0x00000000);
5594 out:
5595 return IRQ_RETVAL(handled);
5598 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5600 struct tg3_napi *tnapi = dev_id;
5601 struct tg3 *tp = tnapi->tp;
5602 struct tg3_hw_status *sblk = tnapi->hw_status;
5603 unsigned int handled = 1;
5605 /* In INTx mode, it is possible for the interrupt to arrive at
5606 * the CPU before the status block posted prior to the interrupt.
5607 * Reading the PCI State register will confirm whether the
5608 * interrupt is ours and will flush the status block.
5610 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5611 if (tg3_flag(tp, CHIP_RESETTING) ||
5612 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5613 handled = 0;
5614 goto out;
5619 * writing any value to intr-mbox-0 clears PCI INTA# and
5620 * chip-internal interrupt pending events.
5621 * writing non-zero to intr-mbox-0 additional tells the
5622 * NIC to stop sending us irqs, engaging "in-intr-handler"
5623 * event coalescing.
5625 * Flush the mailbox to de-assert the IRQ immediately to prevent
5626 * spurious interrupts. The flush impacts performance but
5627 * excessive spurious interrupts can be worse in some cases.
5629 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5632 * In a shared interrupt configuration, sometimes other devices'
5633 * interrupts will scream. We record the current status tag here
5634 * so that the above check can report that the screaming interrupts
5635 * are unhandled. Eventually they will be silenced.
5637 tnapi->last_irq_tag = sblk->status_tag;
5639 if (tg3_irq_sync(tp))
5640 goto out;
5642 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5644 napi_schedule(&tnapi->napi);
5646 out:
5647 return IRQ_RETVAL(handled);
5650 /* ISR for interrupt test */
5651 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5653 struct tg3_napi *tnapi = dev_id;
5654 struct tg3 *tp = tnapi->tp;
5655 struct tg3_hw_status *sblk = tnapi->hw_status;
5657 if ((sblk->status & SD_STATUS_UPDATED) ||
5658 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5659 tg3_disable_ints(tp);
5660 return IRQ_RETVAL(1);
5662 return IRQ_RETVAL(0);
5665 static int tg3_init_hw(struct tg3 *, int);
5666 static int tg3_halt(struct tg3 *, int, int);
5668 /* Restart hardware after configuration changes, self-test, etc.
5669 * Invoked with tp->lock held.
5671 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5672 __releases(tp->lock)
5673 __acquires(tp->lock)
5675 int err;
5677 err = tg3_init_hw(tp, reset_phy);
5678 if (err) {
5679 netdev_err(tp->dev,
5680 "Failed to re-initialize device, aborting\n");
5681 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5682 tg3_full_unlock(tp);
5683 del_timer_sync(&tp->timer);
5684 tp->irq_sync = 0;
5685 tg3_napi_enable(tp);
5686 dev_close(tp->dev);
5687 tg3_full_lock(tp, 0);
5689 return err;
5692 #ifdef CONFIG_NET_POLL_CONTROLLER
5693 static void tg3_poll_controller(struct net_device *dev)
5695 int i;
5696 struct tg3 *tp = netdev_priv(dev);
5698 for (i = 0; i < tp->irq_cnt; i++)
5699 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5701 #endif
5703 static void tg3_reset_task(struct work_struct *work)
5705 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5706 int err;
5707 unsigned int restart_timer;
5709 tg3_full_lock(tp, 0);
5711 if (!netif_running(tp->dev)) {
5712 tg3_full_unlock(tp);
5713 return;
5716 tg3_full_unlock(tp);
5718 tg3_phy_stop(tp);
5720 tg3_netif_stop(tp);
5722 tg3_full_lock(tp, 1);
5724 restart_timer = tg3_flag(tp, RESTART_TIMER);
5725 tg3_flag_clear(tp, RESTART_TIMER);
5727 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5728 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5729 tp->write32_rx_mbox = tg3_write_flush_reg32;
5730 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5731 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5734 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5735 err = tg3_init_hw(tp, 1);
5736 if (err)
5737 goto out;
5739 tg3_netif_start(tp);
5741 if (restart_timer)
5742 mod_timer(&tp->timer, jiffies + 1);
5744 out:
5745 tg3_full_unlock(tp);
5747 if (!err)
5748 tg3_phy_start(tp);
5751 static void tg3_tx_timeout(struct net_device *dev)
5753 struct tg3 *tp = netdev_priv(dev);
5755 if (netif_msg_tx_err(tp)) {
5756 netdev_err(dev, "transmit timed out, resetting\n");
5757 tg3_dump_state(tp);
5760 schedule_work(&tp->reset_task);
5763 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5764 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5766 u32 base = (u32) mapping & 0xffffffff;
5768 return (base > 0xffffdcc0) && (base + len + 8 < base);
5771 /* Test for DMA addresses > 40-bit */
5772 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5773 int len)
5775 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5776 if (tg3_flag(tp, 40BIT_DMA_BUG))
5777 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5778 return 0;
5779 #else
5780 return 0;
5781 #endif
5784 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5785 dma_addr_t mapping, int len, u32 flags,
5786 u32 mss_and_is_end)
5788 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5789 int is_end = (mss_and_is_end & 0x1);
5790 u32 mss = (mss_and_is_end >> 1);
5791 u32 vlan_tag = 0;
5793 if (is_end)
5794 flags |= TXD_FLAG_END;
5795 if (flags & TXD_FLAG_VLAN) {
5796 vlan_tag = flags >> 16;
5797 flags &= 0xffff;
5799 vlan_tag |= (mss << TXD_MSS_SHIFT);
5801 txd->addr_hi = ((u64) mapping >> 32);
5802 txd->addr_lo = ((u64) mapping & 0xffffffff);
5803 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5804 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5807 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5808 struct sk_buff *skb, int last)
5810 int i;
5811 u32 entry = tnapi->tx_prod;
5812 struct ring_info *txb = &tnapi->tx_buffers[entry];
5814 pci_unmap_single(tnapi->tp->pdev,
5815 dma_unmap_addr(txb, mapping),
5816 skb_headlen(skb),
5817 PCI_DMA_TODEVICE);
5818 for (i = 0; i < last; i++) {
5819 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5821 entry = NEXT_TX(entry);
5822 txb = &tnapi->tx_buffers[entry];
5824 pci_unmap_page(tnapi->tp->pdev,
5825 dma_unmap_addr(txb, mapping),
5826 frag->size, PCI_DMA_TODEVICE);
5830 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5831 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5832 struct sk_buff *skb,
5833 u32 base_flags, u32 mss)
5835 struct tg3 *tp = tnapi->tp;
5836 struct sk_buff *new_skb;
5837 dma_addr_t new_addr = 0;
5838 u32 entry = tnapi->tx_prod;
5839 int ret = 0;
5841 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5842 new_skb = skb_copy(skb, GFP_ATOMIC);
5843 else {
5844 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5846 new_skb = skb_copy_expand(skb,
5847 skb_headroom(skb) + more_headroom,
5848 skb_tailroom(skb), GFP_ATOMIC);
5851 if (!new_skb) {
5852 ret = -1;
5853 } else {
5854 /* New SKB is guaranteed to be linear. */
5855 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5856 PCI_DMA_TODEVICE);
5857 /* Make sure the mapping succeeded */
5858 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5859 ret = -1;
5860 dev_kfree_skb(new_skb);
5862 /* Make sure new skb does not cross any 4G boundaries.
5863 * Drop the packet if it does.
5865 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5866 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5867 PCI_DMA_TODEVICE);
5868 ret = -1;
5869 dev_kfree_skb(new_skb);
5870 } else {
5871 tnapi->tx_buffers[entry].skb = new_skb;
5872 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5873 mapping, new_addr);
5875 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5876 base_flags, 1 | (mss << 1));
5880 dev_kfree_skb(skb);
5882 return ret;
5885 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5887 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5888 * TSO header is greater than 80 bytes.
5890 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5892 struct sk_buff *segs, *nskb;
5893 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5895 /* Estimate the number of fragments in the worst case */
5896 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5897 netif_stop_queue(tp->dev);
5899 /* netif_tx_stop_queue() must be done before checking
5900 * checking tx index in tg3_tx_avail() below, because in
5901 * tg3_tx(), we update tx index before checking for
5902 * netif_tx_queue_stopped().
5904 smp_mb();
5905 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5906 return NETDEV_TX_BUSY;
5908 netif_wake_queue(tp->dev);
5911 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5912 if (IS_ERR(segs))
5913 goto tg3_tso_bug_end;
5915 do {
5916 nskb = segs;
5917 segs = segs->next;
5918 nskb->next = NULL;
5919 tg3_start_xmit(nskb, tp->dev);
5920 } while (segs);
5922 tg3_tso_bug_end:
5923 dev_kfree_skb(skb);
5925 return NETDEV_TX_OK;
5928 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5929 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5931 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5933 struct tg3 *tp = netdev_priv(dev);
5934 u32 len, entry, base_flags, mss;
5935 int i = -1, would_hit_hwbug;
5936 dma_addr_t mapping;
5937 struct tg3_napi *tnapi;
5938 struct netdev_queue *txq;
5939 unsigned int last;
5941 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5942 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5943 if (tg3_flag(tp, ENABLE_TSS))
5944 tnapi++;
5946 /* We are running in BH disabled context with netif_tx_lock
5947 * and TX reclaim runs via tp->napi.poll inside of a software
5948 * interrupt. Furthermore, IRQ processing runs lockless so we have
5949 * no IRQ context deadlocks to worry about either. Rejoice!
5951 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5952 if (!netif_tx_queue_stopped(txq)) {
5953 netif_tx_stop_queue(txq);
5955 /* This is a hard error, log it. */
5956 netdev_err(dev,
5957 "BUG! Tx Ring full when queue awake!\n");
5959 return NETDEV_TX_BUSY;
5962 entry = tnapi->tx_prod;
5963 base_flags = 0;
5964 if (skb->ip_summed == CHECKSUM_PARTIAL)
5965 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5967 mss = skb_shinfo(skb)->gso_size;
5968 if (mss) {
5969 struct iphdr *iph;
5970 u32 tcp_opt_len, hdr_len;
5972 if (skb_header_cloned(skb) &&
5973 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5974 dev_kfree_skb(skb);
5975 goto out_unlock;
5978 iph = ip_hdr(skb);
5979 tcp_opt_len = tcp_optlen(skb);
5981 if (skb_is_gso_v6(skb)) {
5982 hdr_len = skb_headlen(skb) - ETH_HLEN;
5983 } else {
5984 u32 ip_tcp_len;
5986 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5987 hdr_len = ip_tcp_len + tcp_opt_len;
5989 iph->check = 0;
5990 iph->tot_len = htons(mss + hdr_len);
5993 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5994 tg3_flag(tp, TSO_BUG))
5995 return tg3_tso_bug(tp, skb);
5997 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5998 TXD_FLAG_CPU_POST_DMA);
6000 if (tg3_flag(tp, HW_TSO_1) ||
6001 tg3_flag(tp, HW_TSO_2) ||
6002 tg3_flag(tp, HW_TSO_3)) {
6003 tcp_hdr(skb)->check = 0;
6004 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6005 } else
6006 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6007 iph->daddr, 0,
6008 IPPROTO_TCP,
6011 if (tg3_flag(tp, HW_TSO_3)) {
6012 mss |= (hdr_len & 0xc) << 12;
6013 if (hdr_len & 0x10)
6014 base_flags |= 0x00000010;
6015 base_flags |= (hdr_len & 0x3e0) << 5;
6016 } else if (tg3_flag(tp, HW_TSO_2))
6017 mss |= hdr_len << 9;
6018 else if (tg3_flag(tp, HW_TSO_1) ||
6019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6020 if (tcp_opt_len || iph->ihl > 5) {
6021 int tsflags;
6023 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6024 mss |= (tsflags << 11);
6026 } else {
6027 if (tcp_opt_len || iph->ihl > 5) {
6028 int tsflags;
6030 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6031 base_flags |= tsflags << 12;
6036 if (vlan_tx_tag_present(skb))
6037 base_flags |= (TXD_FLAG_VLAN |
6038 (vlan_tx_tag_get(skb) << 16));
6040 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6041 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6042 base_flags |= TXD_FLAG_JMB_PKT;
6044 len = skb_headlen(skb);
6046 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6047 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6048 dev_kfree_skb(skb);
6049 goto out_unlock;
6052 tnapi->tx_buffers[entry].skb = skb;
6053 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6055 would_hit_hwbug = 0;
6057 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6058 would_hit_hwbug = 1;
6060 if (tg3_4g_overflow_test(mapping, len))
6061 would_hit_hwbug = 1;
6063 if (tg3_40bit_overflow_test(tp, mapping, len))
6064 would_hit_hwbug = 1;
6066 if (tg3_flag(tp, 5701_DMA_BUG))
6067 would_hit_hwbug = 1;
6069 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6070 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6072 entry = NEXT_TX(entry);
6074 /* Now loop through additional data fragments, and queue them. */
6075 if (skb_shinfo(skb)->nr_frags > 0) {
6076 last = skb_shinfo(skb)->nr_frags - 1;
6077 for (i = 0; i <= last; i++) {
6078 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6080 len = frag->size;
6081 mapping = pci_map_page(tp->pdev,
6082 frag->page,
6083 frag->page_offset,
6084 len, PCI_DMA_TODEVICE);
6086 tnapi->tx_buffers[entry].skb = NULL;
6087 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6088 mapping);
6089 if (pci_dma_mapping_error(tp->pdev, mapping))
6090 goto dma_error;
6092 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6093 len <= 8)
6094 would_hit_hwbug = 1;
6096 if (tg3_4g_overflow_test(mapping, len))
6097 would_hit_hwbug = 1;
6099 if (tg3_40bit_overflow_test(tp, mapping, len))
6100 would_hit_hwbug = 1;
6102 if (tg3_flag(tp, HW_TSO_1) ||
6103 tg3_flag(tp, HW_TSO_2) ||
6104 tg3_flag(tp, HW_TSO_3))
6105 tg3_set_txd(tnapi, entry, mapping, len,
6106 base_flags, (i == last)|(mss << 1));
6107 else
6108 tg3_set_txd(tnapi, entry, mapping, len,
6109 base_flags, (i == last));
6111 entry = NEXT_TX(entry);
6115 if (would_hit_hwbug) {
6116 tg3_skb_error_unmap(tnapi, skb, i);
6118 /* If the workaround fails due to memory/mapping
6119 * failure, silently drop this packet.
6121 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6122 goto out_unlock;
6124 entry = NEXT_TX(tnapi->tx_prod);
6127 skb_tx_timestamp(skb);
6129 /* Packets are ready, update Tx producer idx local and on card. */
6130 tw32_tx_mbox(tnapi->prodmbox, entry);
6132 tnapi->tx_prod = entry;
6133 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6134 netif_tx_stop_queue(txq);
6136 /* netif_tx_stop_queue() must be done before checking
6137 * checking tx index in tg3_tx_avail() below, because in
6138 * tg3_tx(), we update tx index before checking for
6139 * netif_tx_queue_stopped().
6141 smp_mb();
6142 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6143 netif_tx_wake_queue(txq);
6146 out_unlock:
6147 mmiowb();
6149 return NETDEV_TX_OK;
6151 dma_error:
6152 tg3_skb_error_unmap(tnapi, skb, i);
6153 dev_kfree_skb(skb);
6154 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6155 return NETDEV_TX_OK;
6158 static void tg3_set_loopback(struct net_device *dev, u32 features)
6160 struct tg3 *tp = netdev_priv(dev);
6162 if (features & NETIF_F_LOOPBACK) {
6163 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6164 return;
6167 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6168 * loopback mode if Half-Duplex mode was negotiated earlier.
6170 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6172 /* Enable internal MAC loopback mode */
6173 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6174 spin_lock_bh(&tp->lock);
6175 tw32(MAC_MODE, tp->mac_mode);
6176 netif_carrier_on(tp->dev);
6177 spin_unlock_bh(&tp->lock);
6178 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6179 } else {
6180 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6181 return;
6183 /* Disable internal MAC loopback mode */
6184 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6185 spin_lock_bh(&tp->lock);
6186 tw32(MAC_MODE, tp->mac_mode);
6187 /* Force link status check */
6188 tg3_setup_phy(tp, 1);
6189 spin_unlock_bh(&tp->lock);
6190 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6194 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6196 struct tg3 *tp = netdev_priv(dev);
6198 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6199 features &= ~NETIF_F_ALL_TSO;
6201 return features;
6204 static int tg3_set_features(struct net_device *dev, u32 features)
6206 u32 changed = dev->features ^ features;
6208 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6209 tg3_set_loopback(dev, features);
6211 return 0;
6214 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6215 int new_mtu)
6217 dev->mtu = new_mtu;
6219 if (new_mtu > ETH_DATA_LEN) {
6220 if (tg3_flag(tp, 5780_CLASS)) {
6221 netdev_update_features(dev);
6222 tg3_flag_clear(tp, TSO_CAPABLE);
6223 } else {
6224 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6226 } else {
6227 if (tg3_flag(tp, 5780_CLASS)) {
6228 tg3_flag_set(tp, TSO_CAPABLE);
6229 netdev_update_features(dev);
6231 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6235 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6237 struct tg3 *tp = netdev_priv(dev);
6238 int err;
6240 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6241 return -EINVAL;
6243 if (!netif_running(dev)) {
6244 /* We'll just catch it later when the
6245 * device is up'd.
6247 tg3_set_mtu(dev, tp, new_mtu);
6248 return 0;
6251 tg3_phy_stop(tp);
6253 tg3_netif_stop(tp);
6255 tg3_full_lock(tp, 1);
6257 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6259 tg3_set_mtu(dev, tp, new_mtu);
6261 err = tg3_restart_hw(tp, 0);
6263 if (!err)
6264 tg3_netif_start(tp);
6266 tg3_full_unlock(tp);
6268 if (!err)
6269 tg3_phy_start(tp);
6271 return err;
6274 static void tg3_rx_prodring_free(struct tg3 *tp,
6275 struct tg3_rx_prodring_set *tpr)
6277 int i;
6279 if (tpr != &tp->napi[0].prodring) {
6280 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6281 i = (i + 1) & tp->rx_std_ring_mask)
6282 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6283 tp->rx_pkt_map_sz);
6285 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6286 for (i = tpr->rx_jmb_cons_idx;
6287 i != tpr->rx_jmb_prod_idx;
6288 i = (i + 1) & tp->rx_jmb_ring_mask) {
6289 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6290 TG3_RX_JMB_MAP_SZ);
6294 return;
6297 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6298 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6299 tp->rx_pkt_map_sz);
6301 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6302 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6303 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6304 TG3_RX_JMB_MAP_SZ);
6308 /* Initialize rx rings for packet processing.
6310 * The chip has been shut down and the driver detached from
6311 * the networking, so no interrupts or new tx packets will
6312 * end up in the driver. tp->{tx,}lock are held and thus
6313 * we may not sleep.
6315 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6316 struct tg3_rx_prodring_set *tpr)
6318 u32 i, rx_pkt_dma_sz;
6320 tpr->rx_std_cons_idx = 0;
6321 tpr->rx_std_prod_idx = 0;
6322 tpr->rx_jmb_cons_idx = 0;
6323 tpr->rx_jmb_prod_idx = 0;
6325 if (tpr != &tp->napi[0].prodring) {
6326 memset(&tpr->rx_std_buffers[0], 0,
6327 TG3_RX_STD_BUFF_RING_SIZE(tp));
6328 if (tpr->rx_jmb_buffers)
6329 memset(&tpr->rx_jmb_buffers[0], 0,
6330 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6331 goto done;
6334 /* Zero out all descriptors. */
6335 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6337 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6338 if (tg3_flag(tp, 5780_CLASS) &&
6339 tp->dev->mtu > ETH_DATA_LEN)
6340 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6341 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6343 /* Initialize invariants of the rings, we only set this
6344 * stuff once. This works because the card does not
6345 * write into the rx buffer posting rings.
6347 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6348 struct tg3_rx_buffer_desc *rxd;
6350 rxd = &tpr->rx_std[i];
6351 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6352 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6353 rxd->opaque = (RXD_OPAQUE_RING_STD |
6354 (i << RXD_OPAQUE_INDEX_SHIFT));
6357 /* Now allocate fresh SKBs for each rx ring. */
6358 for (i = 0; i < tp->rx_pending; i++) {
6359 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6360 netdev_warn(tp->dev,
6361 "Using a smaller RX standard ring. Only "
6362 "%d out of %d buffers were allocated "
6363 "successfully\n", i, tp->rx_pending);
6364 if (i == 0)
6365 goto initfail;
6366 tp->rx_pending = i;
6367 break;
6371 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6372 goto done;
6374 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6376 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6377 goto done;
6379 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6380 struct tg3_rx_buffer_desc *rxd;
6382 rxd = &tpr->rx_jmb[i].std;
6383 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6384 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6385 RXD_FLAG_JUMBO;
6386 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6387 (i << RXD_OPAQUE_INDEX_SHIFT));
6390 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6391 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6392 netdev_warn(tp->dev,
6393 "Using a smaller RX jumbo ring. Only %d "
6394 "out of %d buffers were allocated "
6395 "successfully\n", i, tp->rx_jumbo_pending);
6396 if (i == 0)
6397 goto initfail;
6398 tp->rx_jumbo_pending = i;
6399 break;
6403 done:
6404 return 0;
6406 initfail:
6407 tg3_rx_prodring_free(tp, tpr);
6408 return -ENOMEM;
6411 static void tg3_rx_prodring_fini(struct tg3 *tp,
6412 struct tg3_rx_prodring_set *tpr)
6414 kfree(tpr->rx_std_buffers);
6415 tpr->rx_std_buffers = NULL;
6416 kfree(tpr->rx_jmb_buffers);
6417 tpr->rx_jmb_buffers = NULL;
6418 if (tpr->rx_std) {
6419 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6420 tpr->rx_std, tpr->rx_std_mapping);
6421 tpr->rx_std = NULL;
6423 if (tpr->rx_jmb) {
6424 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6425 tpr->rx_jmb, tpr->rx_jmb_mapping);
6426 tpr->rx_jmb = NULL;
6430 static int tg3_rx_prodring_init(struct tg3 *tp,
6431 struct tg3_rx_prodring_set *tpr)
6433 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6434 GFP_KERNEL);
6435 if (!tpr->rx_std_buffers)
6436 return -ENOMEM;
6438 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6439 TG3_RX_STD_RING_BYTES(tp),
6440 &tpr->rx_std_mapping,
6441 GFP_KERNEL);
6442 if (!tpr->rx_std)
6443 goto err_out;
6445 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6446 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6447 GFP_KERNEL);
6448 if (!tpr->rx_jmb_buffers)
6449 goto err_out;
6451 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6452 TG3_RX_JMB_RING_BYTES(tp),
6453 &tpr->rx_jmb_mapping,
6454 GFP_KERNEL);
6455 if (!tpr->rx_jmb)
6456 goto err_out;
6459 return 0;
6461 err_out:
6462 tg3_rx_prodring_fini(tp, tpr);
6463 return -ENOMEM;
6466 /* Free up pending packets in all rx/tx rings.
6468 * The chip has been shut down and the driver detached from
6469 * the networking, so no interrupts or new tx packets will
6470 * end up in the driver. tp->{tx,}lock is not held and we are not
6471 * in an interrupt context and thus may sleep.
6473 static void tg3_free_rings(struct tg3 *tp)
6475 int i, j;
6477 for (j = 0; j < tp->irq_cnt; j++) {
6478 struct tg3_napi *tnapi = &tp->napi[j];
6480 tg3_rx_prodring_free(tp, &tnapi->prodring);
6482 if (!tnapi->tx_buffers)
6483 continue;
6485 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6486 struct ring_info *txp;
6487 struct sk_buff *skb;
6488 unsigned int k;
6490 txp = &tnapi->tx_buffers[i];
6491 skb = txp->skb;
6493 if (skb == NULL) {
6494 i++;
6495 continue;
6498 pci_unmap_single(tp->pdev,
6499 dma_unmap_addr(txp, mapping),
6500 skb_headlen(skb),
6501 PCI_DMA_TODEVICE);
6502 txp->skb = NULL;
6504 i++;
6506 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6507 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6508 pci_unmap_page(tp->pdev,
6509 dma_unmap_addr(txp, mapping),
6510 skb_shinfo(skb)->frags[k].size,
6511 PCI_DMA_TODEVICE);
6512 i++;
6515 dev_kfree_skb_any(skb);
6520 /* Initialize tx/rx rings for packet processing.
6522 * The chip has been shut down and the driver detached from
6523 * the networking, so no interrupts or new tx packets will
6524 * end up in the driver. tp->{tx,}lock are held and thus
6525 * we may not sleep.
6527 static int tg3_init_rings(struct tg3 *tp)
6529 int i;
6531 /* Free up all the SKBs. */
6532 tg3_free_rings(tp);
6534 for (i = 0; i < tp->irq_cnt; i++) {
6535 struct tg3_napi *tnapi = &tp->napi[i];
6537 tnapi->last_tag = 0;
6538 tnapi->last_irq_tag = 0;
6539 tnapi->hw_status->status = 0;
6540 tnapi->hw_status->status_tag = 0;
6541 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6543 tnapi->tx_prod = 0;
6544 tnapi->tx_cons = 0;
6545 if (tnapi->tx_ring)
6546 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6548 tnapi->rx_rcb_ptr = 0;
6549 if (tnapi->rx_rcb)
6550 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6552 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6553 tg3_free_rings(tp);
6554 return -ENOMEM;
6558 return 0;
6562 * Must not be invoked with interrupt sources disabled and
6563 * the hardware shutdown down.
6565 static void tg3_free_consistent(struct tg3 *tp)
6567 int i;
6569 for (i = 0; i < tp->irq_cnt; i++) {
6570 struct tg3_napi *tnapi = &tp->napi[i];
6572 if (tnapi->tx_ring) {
6573 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6574 tnapi->tx_ring, tnapi->tx_desc_mapping);
6575 tnapi->tx_ring = NULL;
6578 kfree(tnapi->tx_buffers);
6579 tnapi->tx_buffers = NULL;
6581 if (tnapi->rx_rcb) {
6582 dma_free_coherent(&tp->pdev->dev,
6583 TG3_RX_RCB_RING_BYTES(tp),
6584 tnapi->rx_rcb,
6585 tnapi->rx_rcb_mapping);
6586 tnapi->rx_rcb = NULL;
6589 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6591 if (tnapi->hw_status) {
6592 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6593 tnapi->hw_status,
6594 tnapi->status_mapping);
6595 tnapi->hw_status = NULL;
6599 if (tp->hw_stats) {
6600 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6601 tp->hw_stats, tp->stats_mapping);
6602 tp->hw_stats = NULL;
6607 * Must not be invoked with interrupt sources disabled and
6608 * the hardware shutdown down. Can sleep.
6610 static int tg3_alloc_consistent(struct tg3 *tp)
6612 int i;
6614 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6615 sizeof(struct tg3_hw_stats),
6616 &tp->stats_mapping,
6617 GFP_KERNEL);
6618 if (!tp->hw_stats)
6619 goto err_out;
6621 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6623 for (i = 0; i < tp->irq_cnt; i++) {
6624 struct tg3_napi *tnapi = &tp->napi[i];
6625 struct tg3_hw_status *sblk;
6627 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6628 TG3_HW_STATUS_SIZE,
6629 &tnapi->status_mapping,
6630 GFP_KERNEL);
6631 if (!tnapi->hw_status)
6632 goto err_out;
6634 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6635 sblk = tnapi->hw_status;
6637 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6638 goto err_out;
6640 /* If multivector TSS is enabled, vector 0 does not handle
6641 * tx interrupts. Don't allocate any resources for it.
6643 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6644 (i && tg3_flag(tp, ENABLE_TSS))) {
6645 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6646 TG3_TX_RING_SIZE,
6647 GFP_KERNEL);
6648 if (!tnapi->tx_buffers)
6649 goto err_out;
6651 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6652 TG3_TX_RING_BYTES,
6653 &tnapi->tx_desc_mapping,
6654 GFP_KERNEL);
6655 if (!tnapi->tx_ring)
6656 goto err_out;
6660 * When RSS is enabled, the status block format changes
6661 * slightly. The "rx_jumbo_consumer", "reserved",
6662 * and "rx_mini_consumer" members get mapped to the
6663 * other three rx return ring producer indexes.
6665 switch (i) {
6666 default:
6667 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6668 break;
6669 case 2:
6670 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6671 break;
6672 case 3:
6673 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6674 break;
6675 case 4:
6676 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6677 break;
6681 * If multivector RSS is enabled, vector 0 does not handle
6682 * rx or tx interrupts. Don't allocate any resources for it.
6684 if (!i && tg3_flag(tp, ENABLE_RSS))
6685 continue;
6687 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6688 TG3_RX_RCB_RING_BYTES(tp),
6689 &tnapi->rx_rcb_mapping,
6690 GFP_KERNEL);
6691 if (!tnapi->rx_rcb)
6692 goto err_out;
6694 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6697 return 0;
6699 err_out:
6700 tg3_free_consistent(tp);
6701 return -ENOMEM;
6704 #define MAX_WAIT_CNT 1000
6706 /* To stop a block, clear the enable bit and poll till it
6707 * clears. tp->lock is held.
6709 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6711 unsigned int i;
6712 u32 val;
6714 if (tg3_flag(tp, 5705_PLUS)) {
6715 switch (ofs) {
6716 case RCVLSC_MODE:
6717 case DMAC_MODE:
6718 case MBFREE_MODE:
6719 case BUFMGR_MODE:
6720 case MEMARB_MODE:
6721 /* We can't enable/disable these bits of the
6722 * 5705/5750, just say success.
6724 return 0;
6726 default:
6727 break;
6731 val = tr32(ofs);
6732 val &= ~enable_bit;
6733 tw32_f(ofs, val);
6735 for (i = 0; i < MAX_WAIT_CNT; i++) {
6736 udelay(100);
6737 val = tr32(ofs);
6738 if ((val & enable_bit) == 0)
6739 break;
6742 if (i == MAX_WAIT_CNT && !silent) {
6743 dev_err(&tp->pdev->dev,
6744 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6745 ofs, enable_bit);
6746 return -ENODEV;
6749 return 0;
6752 /* tp->lock is held. */
6753 static int tg3_abort_hw(struct tg3 *tp, int silent)
6755 int i, err;
6757 tg3_disable_ints(tp);
6759 tp->rx_mode &= ~RX_MODE_ENABLE;
6760 tw32_f(MAC_RX_MODE, tp->rx_mode);
6761 udelay(10);
6763 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6764 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6765 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6766 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6767 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6768 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6770 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6771 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6772 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6773 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6774 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6775 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6776 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6778 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6779 tw32_f(MAC_MODE, tp->mac_mode);
6780 udelay(40);
6782 tp->tx_mode &= ~TX_MODE_ENABLE;
6783 tw32_f(MAC_TX_MODE, tp->tx_mode);
6785 for (i = 0; i < MAX_WAIT_CNT; i++) {
6786 udelay(100);
6787 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6788 break;
6790 if (i >= MAX_WAIT_CNT) {
6791 dev_err(&tp->pdev->dev,
6792 "%s timed out, TX_MODE_ENABLE will not clear "
6793 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6794 err |= -ENODEV;
6797 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6798 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6799 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6801 tw32(FTQ_RESET, 0xffffffff);
6802 tw32(FTQ_RESET, 0x00000000);
6804 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6805 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6807 for (i = 0; i < tp->irq_cnt; i++) {
6808 struct tg3_napi *tnapi = &tp->napi[i];
6809 if (tnapi->hw_status)
6810 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6812 if (tp->hw_stats)
6813 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6815 return err;
6818 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6820 int i;
6821 u32 apedata;
6823 /* NCSI does not support APE events */
6824 if (tg3_flag(tp, APE_HAS_NCSI))
6825 return;
6827 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6828 if (apedata != APE_SEG_SIG_MAGIC)
6829 return;
6831 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6832 if (!(apedata & APE_FW_STATUS_READY))
6833 return;
6835 /* Wait for up to 1 millisecond for APE to service previous event. */
6836 for (i = 0; i < 10; i++) {
6837 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6838 return;
6840 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6842 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6843 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6844 event | APE_EVENT_STATUS_EVENT_PENDING);
6846 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6848 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6849 break;
6851 udelay(100);
6854 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6855 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6858 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6860 u32 event;
6861 u32 apedata;
6863 if (!tg3_flag(tp, ENABLE_APE))
6864 return;
6866 switch (kind) {
6867 case RESET_KIND_INIT:
6868 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6869 APE_HOST_SEG_SIG_MAGIC);
6870 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6871 APE_HOST_SEG_LEN_MAGIC);
6872 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6873 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6874 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6875 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6876 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6877 APE_HOST_BEHAV_NO_PHYLOCK);
6878 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6879 TG3_APE_HOST_DRVR_STATE_START);
6881 event = APE_EVENT_STATUS_STATE_START;
6882 break;
6883 case RESET_KIND_SHUTDOWN:
6884 /* With the interface we are currently using,
6885 * APE does not track driver state. Wiping
6886 * out the HOST SEGMENT SIGNATURE forces
6887 * the APE to assume OS absent status.
6889 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6891 if (device_may_wakeup(&tp->pdev->dev) &&
6892 tg3_flag(tp, WOL_ENABLE)) {
6893 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6894 TG3_APE_HOST_WOL_SPEED_AUTO);
6895 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6896 } else
6897 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6899 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6901 event = APE_EVENT_STATUS_STATE_UNLOAD;
6902 break;
6903 case RESET_KIND_SUSPEND:
6904 event = APE_EVENT_STATUS_STATE_SUSPEND;
6905 break;
6906 default:
6907 return;
6910 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6912 tg3_ape_send_event(tp, event);
6915 /* tp->lock is held. */
6916 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6918 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6919 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6921 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6922 switch (kind) {
6923 case RESET_KIND_INIT:
6924 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6925 DRV_STATE_START);
6926 break;
6928 case RESET_KIND_SHUTDOWN:
6929 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6930 DRV_STATE_UNLOAD);
6931 break;
6933 case RESET_KIND_SUSPEND:
6934 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6935 DRV_STATE_SUSPEND);
6936 break;
6938 default:
6939 break;
6943 if (kind == RESET_KIND_INIT ||
6944 kind == RESET_KIND_SUSPEND)
6945 tg3_ape_driver_state_change(tp, kind);
6948 /* tp->lock is held. */
6949 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6951 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6952 switch (kind) {
6953 case RESET_KIND_INIT:
6954 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6955 DRV_STATE_START_DONE);
6956 break;
6958 case RESET_KIND_SHUTDOWN:
6959 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6960 DRV_STATE_UNLOAD_DONE);
6961 break;
6963 default:
6964 break;
6968 if (kind == RESET_KIND_SHUTDOWN)
6969 tg3_ape_driver_state_change(tp, kind);
6972 /* tp->lock is held. */
6973 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6975 if (tg3_flag(tp, ENABLE_ASF)) {
6976 switch (kind) {
6977 case RESET_KIND_INIT:
6978 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6979 DRV_STATE_START);
6980 break;
6982 case RESET_KIND_SHUTDOWN:
6983 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6984 DRV_STATE_UNLOAD);
6985 break;
6987 case RESET_KIND_SUSPEND:
6988 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6989 DRV_STATE_SUSPEND);
6990 break;
6992 default:
6993 break;
6998 static int tg3_poll_fw(struct tg3 *tp)
7000 int i;
7001 u32 val;
7003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7004 /* Wait up to 20ms for init done. */
7005 for (i = 0; i < 200; i++) {
7006 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7007 return 0;
7008 udelay(100);
7010 return -ENODEV;
7013 /* Wait for firmware initialization to complete. */
7014 for (i = 0; i < 100000; i++) {
7015 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7016 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7017 break;
7018 udelay(10);
7021 /* Chip might not be fitted with firmware. Some Sun onboard
7022 * parts are configured like that. So don't signal the timeout
7023 * of the above loop as an error, but do report the lack of
7024 * running firmware once.
7026 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7027 tg3_flag_set(tp, NO_FWARE_REPORTED);
7029 netdev_info(tp->dev, "No firmware running\n");
7032 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7033 /* The 57765 A0 needs a little more
7034 * time to do some important work.
7036 mdelay(10);
7039 return 0;
7042 /* Save PCI command register before chip reset */
7043 static void tg3_save_pci_state(struct tg3 *tp)
7045 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7048 /* Restore PCI state after chip reset */
7049 static void tg3_restore_pci_state(struct tg3 *tp)
7051 u32 val;
7053 /* Re-enable indirect register accesses. */
7054 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7055 tp->misc_host_ctrl);
7057 /* Set MAX PCI retry to zero. */
7058 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7059 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7060 tg3_flag(tp, PCIX_MODE))
7061 val |= PCISTATE_RETRY_SAME_DMA;
7062 /* Allow reads and writes to the APE register and memory space. */
7063 if (tg3_flag(tp, ENABLE_APE))
7064 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7065 PCISTATE_ALLOW_APE_SHMEM_WR |
7066 PCISTATE_ALLOW_APE_PSPACE_WR;
7067 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7069 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7071 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7072 if (tg3_flag(tp, PCI_EXPRESS))
7073 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7074 else {
7075 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7076 tp->pci_cacheline_sz);
7077 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7078 tp->pci_lat_timer);
7082 /* Make sure PCI-X relaxed ordering bit is clear. */
7083 if (tg3_flag(tp, PCIX_MODE)) {
7084 u16 pcix_cmd;
7086 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7087 &pcix_cmd);
7088 pcix_cmd &= ~PCI_X_CMD_ERO;
7089 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7090 pcix_cmd);
7093 if (tg3_flag(tp, 5780_CLASS)) {
7095 /* Chip reset on 5780 will reset MSI enable bit,
7096 * so need to restore it.
7098 if (tg3_flag(tp, USING_MSI)) {
7099 u16 ctrl;
7101 pci_read_config_word(tp->pdev,
7102 tp->msi_cap + PCI_MSI_FLAGS,
7103 &ctrl);
7104 pci_write_config_word(tp->pdev,
7105 tp->msi_cap + PCI_MSI_FLAGS,
7106 ctrl | PCI_MSI_FLAGS_ENABLE);
7107 val = tr32(MSGINT_MODE);
7108 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7113 static void tg3_stop_fw(struct tg3 *);
7115 /* tp->lock is held. */
7116 static int tg3_chip_reset(struct tg3 *tp)
7118 u32 val;
7119 void (*write_op)(struct tg3 *, u32, u32);
7120 int i, err;
7122 tg3_nvram_lock(tp);
7124 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7126 /* No matching tg3_nvram_unlock() after this because
7127 * chip reset below will undo the nvram lock.
7129 tp->nvram_lock_cnt = 0;
7131 /* GRC_MISC_CFG core clock reset will clear the memory
7132 * enable bit in PCI register 4 and the MSI enable bit
7133 * on some chips, so we save relevant registers here.
7135 tg3_save_pci_state(tp);
7137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7138 tg3_flag(tp, 5755_PLUS))
7139 tw32(GRC_FASTBOOT_PC, 0);
7142 * We must avoid the readl() that normally takes place.
7143 * It locks machines, causes machine checks, and other
7144 * fun things. So, temporarily disable the 5701
7145 * hardware workaround, while we do the reset.
7147 write_op = tp->write32;
7148 if (write_op == tg3_write_flush_reg32)
7149 tp->write32 = tg3_write32;
7151 /* Prevent the irq handler from reading or writing PCI registers
7152 * during chip reset when the memory enable bit in the PCI command
7153 * register may be cleared. The chip does not generate interrupt
7154 * at this time, but the irq handler may still be called due to irq
7155 * sharing or irqpoll.
7157 tg3_flag_set(tp, CHIP_RESETTING);
7158 for (i = 0; i < tp->irq_cnt; i++) {
7159 struct tg3_napi *tnapi = &tp->napi[i];
7160 if (tnapi->hw_status) {
7161 tnapi->hw_status->status = 0;
7162 tnapi->hw_status->status_tag = 0;
7164 tnapi->last_tag = 0;
7165 tnapi->last_irq_tag = 0;
7167 smp_mb();
7169 for (i = 0; i < tp->irq_cnt; i++)
7170 synchronize_irq(tp->napi[i].irq_vec);
7172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7173 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7174 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7177 /* do the reset */
7178 val = GRC_MISC_CFG_CORECLK_RESET;
7180 if (tg3_flag(tp, PCI_EXPRESS)) {
7181 /* Force PCIe 1.0a mode */
7182 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7183 !tg3_flag(tp, 57765_PLUS) &&
7184 tr32(TG3_PCIE_PHY_TSTCTL) ==
7185 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7186 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7188 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7189 tw32(GRC_MISC_CFG, (1 << 29));
7190 val |= (1 << 29);
7194 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7195 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7196 tw32(GRC_VCPU_EXT_CTRL,
7197 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7200 /* Manage gphy power for all CPMU absent PCIe devices. */
7201 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7202 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7204 tw32(GRC_MISC_CFG, val);
7206 /* restore 5701 hardware bug workaround write method */
7207 tp->write32 = write_op;
7209 /* Unfortunately, we have to delay before the PCI read back.
7210 * Some 575X chips even will not respond to a PCI cfg access
7211 * when the reset command is given to the chip.
7213 * How do these hardware designers expect things to work
7214 * properly if the PCI write is posted for a long period
7215 * of time? It is always necessary to have some method by
7216 * which a register read back can occur to push the write
7217 * out which does the reset.
7219 * For most tg3 variants the trick below was working.
7220 * Ho hum...
7222 udelay(120);
7224 /* Flush PCI posted writes. The normal MMIO registers
7225 * are inaccessible at this time so this is the only
7226 * way to make this reliably (actually, this is no longer
7227 * the case, see above). I tried to use indirect
7228 * register read/write but this upset some 5701 variants.
7230 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7232 udelay(120);
7234 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7235 u16 val16;
7237 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7238 int i;
7239 u32 cfg_val;
7241 /* Wait for link training to complete. */
7242 for (i = 0; i < 5000; i++)
7243 udelay(100);
7245 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7246 pci_write_config_dword(tp->pdev, 0xc4,
7247 cfg_val | (1 << 15));
7250 /* Clear the "no snoop" and "relaxed ordering" bits. */
7251 pci_read_config_word(tp->pdev,
7252 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7253 &val16);
7254 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7255 PCI_EXP_DEVCTL_NOSNOOP_EN);
7257 * Older PCIe devices only support the 128 byte
7258 * MPS setting. Enforce the restriction.
7260 if (!tg3_flag(tp, CPMU_PRESENT))
7261 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7262 pci_write_config_word(tp->pdev,
7263 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7264 val16);
7266 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7268 /* Clear error status */
7269 pci_write_config_word(tp->pdev,
7270 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7271 PCI_EXP_DEVSTA_CED |
7272 PCI_EXP_DEVSTA_NFED |
7273 PCI_EXP_DEVSTA_FED |
7274 PCI_EXP_DEVSTA_URD);
7277 tg3_restore_pci_state(tp);
7279 tg3_flag_clear(tp, CHIP_RESETTING);
7280 tg3_flag_clear(tp, ERROR_PROCESSED);
7282 val = 0;
7283 if (tg3_flag(tp, 5780_CLASS))
7284 val = tr32(MEMARB_MODE);
7285 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7287 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7288 tg3_stop_fw(tp);
7289 tw32(0x5000, 0x400);
7292 tw32(GRC_MODE, tp->grc_mode);
7294 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7295 val = tr32(0xc4);
7297 tw32(0xc4, val | (1 << 15));
7300 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7301 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7302 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7303 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7304 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7305 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7308 if (tg3_flag(tp, ENABLE_APE))
7309 tp->mac_mode = MAC_MODE_APE_TX_EN |
7310 MAC_MODE_APE_RX_EN |
7311 MAC_MODE_TDE_ENABLE;
7313 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7314 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7315 val = tp->mac_mode;
7316 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7317 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7318 val = tp->mac_mode;
7319 } else
7320 val = 0;
7322 tw32_f(MAC_MODE, val);
7323 udelay(40);
7325 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7327 err = tg3_poll_fw(tp);
7328 if (err)
7329 return err;
7331 tg3_mdio_start(tp);
7333 if (tg3_flag(tp, PCI_EXPRESS) &&
7334 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7335 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7336 !tg3_flag(tp, 57765_PLUS)) {
7337 val = tr32(0x7c00);
7339 tw32(0x7c00, val | (1 << 25));
7342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7343 val = tr32(TG3_CPMU_CLCK_ORIDE);
7344 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7347 /* Reprobe ASF enable state. */
7348 tg3_flag_clear(tp, ENABLE_ASF);
7349 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7350 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7351 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7352 u32 nic_cfg;
7354 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7355 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7356 tg3_flag_set(tp, ENABLE_ASF);
7357 tp->last_event_jiffies = jiffies;
7358 if (tg3_flag(tp, 5750_PLUS))
7359 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7363 return 0;
7366 /* tp->lock is held. */
7367 static void tg3_stop_fw(struct tg3 *tp)
7369 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7370 /* Wait for RX cpu to ACK the previous event. */
7371 tg3_wait_for_event_ack(tp);
7373 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7375 tg3_generate_fw_event(tp);
7377 /* Wait for RX cpu to ACK this event. */
7378 tg3_wait_for_event_ack(tp);
7382 /* tp->lock is held. */
7383 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7385 int err;
7387 tg3_stop_fw(tp);
7389 tg3_write_sig_pre_reset(tp, kind);
7391 tg3_abort_hw(tp, silent);
7392 err = tg3_chip_reset(tp);
7394 __tg3_set_mac_addr(tp, 0);
7396 tg3_write_sig_legacy(tp, kind);
7397 tg3_write_sig_post_reset(tp, kind);
7399 if (err)
7400 return err;
7402 return 0;
7405 #define RX_CPU_SCRATCH_BASE 0x30000
7406 #define RX_CPU_SCRATCH_SIZE 0x04000
7407 #define TX_CPU_SCRATCH_BASE 0x34000
7408 #define TX_CPU_SCRATCH_SIZE 0x04000
7410 /* tp->lock is held. */
7411 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7413 int i;
7415 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7417 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7418 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7420 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7421 return 0;
7423 if (offset == RX_CPU_BASE) {
7424 for (i = 0; i < 10000; i++) {
7425 tw32(offset + CPU_STATE, 0xffffffff);
7426 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7427 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7428 break;
7431 tw32(offset + CPU_STATE, 0xffffffff);
7432 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7433 udelay(10);
7434 } else {
7435 for (i = 0; i < 10000; i++) {
7436 tw32(offset + CPU_STATE, 0xffffffff);
7437 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7438 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7439 break;
7443 if (i >= 10000) {
7444 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7445 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7446 return -ENODEV;
7449 /* Clear firmware's nvram arbitration. */
7450 if (tg3_flag(tp, NVRAM))
7451 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7452 return 0;
7455 struct fw_info {
7456 unsigned int fw_base;
7457 unsigned int fw_len;
7458 const __be32 *fw_data;
7461 /* tp->lock is held. */
7462 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7463 int cpu_scratch_size, struct fw_info *info)
7465 int err, lock_err, i;
7466 void (*write_op)(struct tg3 *, u32, u32);
7468 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7469 netdev_err(tp->dev,
7470 "%s: Trying to load TX cpu firmware which is 5705\n",
7471 __func__);
7472 return -EINVAL;
7475 if (tg3_flag(tp, 5705_PLUS))
7476 write_op = tg3_write_mem;
7477 else
7478 write_op = tg3_write_indirect_reg32;
7480 /* It is possible that bootcode is still loading at this point.
7481 * Get the nvram lock first before halting the cpu.
7483 lock_err = tg3_nvram_lock(tp);
7484 err = tg3_halt_cpu(tp, cpu_base);
7485 if (!lock_err)
7486 tg3_nvram_unlock(tp);
7487 if (err)
7488 goto out;
7490 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7491 write_op(tp, cpu_scratch_base + i, 0);
7492 tw32(cpu_base + CPU_STATE, 0xffffffff);
7493 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7494 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7495 write_op(tp, (cpu_scratch_base +
7496 (info->fw_base & 0xffff) +
7497 (i * sizeof(u32))),
7498 be32_to_cpu(info->fw_data[i]));
7500 err = 0;
7502 out:
7503 return err;
7506 /* tp->lock is held. */
7507 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7509 struct fw_info info;
7510 const __be32 *fw_data;
7511 int err, i;
7513 fw_data = (void *)tp->fw->data;
7515 /* Firmware blob starts with version numbers, followed by
7516 start address and length. We are setting complete length.
7517 length = end_address_of_bss - start_address_of_text.
7518 Remainder is the blob to be loaded contiguously
7519 from start address. */
7521 info.fw_base = be32_to_cpu(fw_data[1]);
7522 info.fw_len = tp->fw->size - 12;
7523 info.fw_data = &fw_data[3];
7525 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7526 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7527 &info);
7528 if (err)
7529 return err;
7531 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7532 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7533 &info);
7534 if (err)
7535 return err;
7537 /* Now startup only the RX cpu. */
7538 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7539 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7541 for (i = 0; i < 5; i++) {
7542 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7543 break;
7544 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7545 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7546 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7547 udelay(1000);
7549 if (i >= 5) {
7550 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7551 "should be %08x\n", __func__,
7552 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7553 return -ENODEV;
7555 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7556 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7558 return 0;
7561 /* tp->lock is held. */
7562 static int tg3_load_tso_firmware(struct tg3 *tp)
7564 struct fw_info info;
7565 const __be32 *fw_data;
7566 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7567 int err, i;
7569 if (tg3_flag(tp, HW_TSO_1) ||
7570 tg3_flag(tp, HW_TSO_2) ||
7571 tg3_flag(tp, HW_TSO_3))
7572 return 0;
7574 fw_data = (void *)tp->fw->data;
7576 /* Firmware blob starts with version numbers, followed by
7577 start address and length. We are setting complete length.
7578 length = end_address_of_bss - start_address_of_text.
7579 Remainder is the blob to be loaded contiguously
7580 from start address. */
7582 info.fw_base = be32_to_cpu(fw_data[1]);
7583 cpu_scratch_size = tp->fw_len;
7584 info.fw_len = tp->fw->size - 12;
7585 info.fw_data = &fw_data[3];
7587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7588 cpu_base = RX_CPU_BASE;
7589 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7590 } else {
7591 cpu_base = TX_CPU_BASE;
7592 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7593 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7596 err = tg3_load_firmware_cpu(tp, cpu_base,
7597 cpu_scratch_base, cpu_scratch_size,
7598 &info);
7599 if (err)
7600 return err;
7602 /* Now startup the cpu. */
7603 tw32(cpu_base + CPU_STATE, 0xffffffff);
7604 tw32_f(cpu_base + CPU_PC, info.fw_base);
7606 for (i = 0; i < 5; i++) {
7607 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7608 break;
7609 tw32(cpu_base + CPU_STATE, 0xffffffff);
7610 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7611 tw32_f(cpu_base + CPU_PC, info.fw_base);
7612 udelay(1000);
7614 if (i >= 5) {
7615 netdev_err(tp->dev,
7616 "%s fails to set CPU PC, is %08x should be %08x\n",
7617 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7618 return -ENODEV;
7620 tw32(cpu_base + CPU_STATE, 0xffffffff);
7621 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7622 return 0;
7626 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7628 struct tg3 *tp = netdev_priv(dev);
7629 struct sockaddr *addr = p;
7630 int err = 0, skip_mac_1 = 0;
7632 if (!is_valid_ether_addr(addr->sa_data))
7633 return -EINVAL;
7635 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7637 if (!netif_running(dev))
7638 return 0;
7640 if (tg3_flag(tp, ENABLE_ASF)) {
7641 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7643 addr0_high = tr32(MAC_ADDR_0_HIGH);
7644 addr0_low = tr32(MAC_ADDR_0_LOW);
7645 addr1_high = tr32(MAC_ADDR_1_HIGH);
7646 addr1_low = tr32(MAC_ADDR_1_LOW);
7648 /* Skip MAC addr 1 if ASF is using it. */
7649 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7650 !(addr1_high == 0 && addr1_low == 0))
7651 skip_mac_1 = 1;
7653 spin_lock_bh(&tp->lock);
7654 __tg3_set_mac_addr(tp, skip_mac_1);
7655 spin_unlock_bh(&tp->lock);
7657 return err;
7660 /* tp->lock is held. */
7661 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7662 dma_addr_t mapping, u32 maxlen_flags,
7663 u32 nic_addr)
7665 tg3_write_mem(tp,
7666 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7667 ((u64) mapping >> 32));
7668 tg3_write_mem(tp,
7669 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7670 ((u64) mapping & 0xffffffff));
7671 tg3_write_mem(tp,
7672 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7673 maxlen_flags);
7675 if (!tg3_flag(tp, 5705_PLUS))
7676 tg3_write_mem(tp,
7677 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7678 nic_addr);
7681 static void __tg3_set_rx_mode(struct net_device *);
7682 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7684 int i;
7686 if (!tg3_flag(tp, ENABLE_TSS)) {
7687 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7688 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7689 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7690 } else {
7691 tw32(HOSTCC_TXCOL_TICKS, 0);
7692 tw32(HOSTCC_TXMAX_FRAMES, 0);
7693 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7696 if (!tg3_flag(tp, ENABLE_RSS)) {
7697 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7698 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7699 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7700 } else {
7701 tw32(HOSTCC_RXCOL_TICKS, 0);
7702 tw32(HOSTCC_RXMAX_FRAMES, 0);
7703 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7706 if (!tg3_flag(tp, 5705_PLUS)) {
7707 u32 val = ec->stats_block_coalesce_usecs;
7709 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7710 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7712 if (!netif_carrier_ok(tp->dev))
7713 val = 0;
7715 tw32(HOSTCC_STAT_COAL_TICKS, val);
7718 for (i = 0; i < tp->irq_cnt - 1; i++) {
7719 u32 reg;
7721 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7722 tw32(reg, ec->rx_coalesce_usecs);
7723 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7724 tw32(reg, ec->rx_max_coalesced_frames);
7725 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7726 tw32(reg, ec->rx_max_coalesced_frames_irq);
7728 if (tg3_flag(tp, ENABLE_TSS)) {
7729 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7730 tw32(reg, ec->tx_coalesce_usecs);
7731 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7732 tw32(reg, ec->tx_max_coalesced_frames);
7733 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7734 tw32(reg, ec->tx_max_coalesced_frames_irq);
7738 for (; i < tp->irq_max - 1; i++) {
7739 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7740 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7741 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7743 if (tg3_flag(tp, ENABLE_TSS)) {
7744 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7745 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7746 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7751 /* tp->lock is held. */
7752 static void tg3_rings_reset(struct tg3 *tp)
7754 int i;
7755 u32 stblk, txrcb, rxrcb, limit;
7756 struct tg3_napi *tnapi = &tp->napi[0];
7758 /* Disable all transmit rings but the first. */
7759 if (!tg3_flag(tp, 5705_PLUS))
7760 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7761 else if (tg3_flag(tp, 5717_PLUS))
7762 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7763 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7764 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7765 else
7766 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7768 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7769 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7770 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7771 BDINFO_FLAGS_DISABLED);
7774 /* Disable all receive return rings but the first. */
7775 if (tg3_flag(tp, 5717_PLUS))
7776 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7777 else if (!tg3_flag(tp, 5705_PLUS))
7778 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7779 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7780 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7781 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7782 else
7783 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7785 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7786 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7787 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7788 BDINFO_FLAGS_DISABLED);
7790 /* Disable interrupts */
7791 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7792 tp->napi[0].chk_msi_cnt = 0;
7793 tp->napi[0].last_rx_cons = 0;
7794 tp->napi[0].last_tx_cons = 0;
7796 /* Zero mailbox registers. */
7797 if (tg3_flag(tp, SUPPORT_MSIX)) {
7798 for (i = 1; i < tp->irq_max; i++) {
7799 tp->napi[i].tx_prod = 0;
7800 tp->napi[i].tx_cons = 0;
7801 if (tg3_flag(tp, ENABLE_TSS))
7802 tw32_mailbox(tp->napi[i].prodmbox, 0);
7803 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7804 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7805 tp->napi[0].chk_msi_cnt = 0;
7806 tp->napi[i].last_rx_cons = 0;
7807 tp->napi[i].last_tx_cons = 0;
7809 if (!tg3_flag(tp, ENABLE_TSS))
7810 tw32_mailbox(tp->napi[0].prodmbox, 0);
7811 } else {
7812 tp->napi[0].tx_prod = 0;
7813 tp->napi[0].tx_cons = 0;
7814 tw32_mailbox(tp->napi[0].prodmbox, 0);
7815 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7818 /* Make sure the NIC-based send BD rings are disabled. */
7819 if (!tg3_flag(tp, 5705_PLUS)) {
7820 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7821 for (i = 0; i < 16; i++)
7822 tw32_tx_mbox(mbox + i * 8, 0);
7825 txrcb = NIC_SRAM_SEND_RCB;
7826 rxrcb = NIC_SRAM_RCV_RET_RCB;
7828 /* Clear status block in ram. */
7829 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7831 /* Set status block DMA address */
7832 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7833 ((u64) tnapi->status_mapping >> 32));
7834 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7835 ((u64) tnapi->status_mapping & 0xffffffff));
7837 if (tnapi->tx_ring) {
7838 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7839 (TG3_TX_RING_SIZE <<
7840 BDINFO_FLAGS_MAXLEN_SHIFT),
7841 NIC_SRAM_TX_BUFFER_DESC);
7842 txrcb += TG3_BDINFO_SIZE;
7845 if (tnapi->rx_rcb) {
7846 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7847 (tp->rx_ret_ring_mask + 1) <<
7848 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7849 rxrcb += TG3_BDINFO_SIZE;
7852 stblk = HOSTCC_STATBLCK_RING1;
7854 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7855 u64 mapping = (u64)tnapi->status_mapping;
7856 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7857 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7859 /* Clear status block in ram. */
7860 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7862 if (tnapi->tx_ring) {
7863 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7864 (TG3_TX_RING_SIZE <<
7865 BDINFO_FLAGS_MAXLEN_SHIFT),
7866 NIC_SRAM_TX_BUFFER_DESC);
7867 txrcb += TG3_BDINFO_SIZE;
7870 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7871 ((tp->rx_ret_ring_mask + 1) <<
7872 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7874 stblk += 8;
7875 rxrcb += TG3_BDINFO_SIZE;
7879 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7881 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7883 if (!tg3_flag(tp, 5750_PLUS) ||
7884 tg3_flag(tp, 5780_CLASS) ||
7885 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7886 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7887 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7888 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7890 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7891 else
7892 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7894 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7895 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7897 val = min(nic_rep_thresh, host_rep_thresh);
7898 tw32(RCVBDI_STD_THRESH, val);
7900 if (tg3_flag(tp, 57765_PLUS))
7901 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7903 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7904 return;
7906 if (!tg3_flag(tp, 5705_PLUS))
7907 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7908 else
7909 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7911 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7913 val = min(bdcache_maxcnt / 2, host_rep_thresh);
7914 tw32(RCVBDI_JUMBO_THRESH, val);
7916 if (tg3_flag(tp, 57765_PLUS))
7917 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7920 /* tp->lock is held. */
7921 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7923 u32 val, rdmac_mode;
7924 int i, err, limit;
7925 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7927 tg3_disable_ints(tp);
7929 tg3_stop_fw(tp);
7931 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7933 if (tg3_flag(tp, INIT_COMPLETE))
7934 tg3_abort_hw(tp, 1);
7936 /* Enable MAC control of LPI */
7937 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7938 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7939 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7940 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7942 tw32_f(TG3_CPMU_EEE_CTRL,
7943 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7945 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7946 TG3_CPMU_EEEMD_LPI_IN_TX |
7947 TG3_CPMU_EEEMD_LPI_IN_RX |
7948 TG3_CPMU_EEEMD_EEE_ENABLE;
7950 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7951 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7953 if (tg3_flag(tp, ENABLE_APE))
7954 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7956 tw32_f(TG3_CPMU_EEE_MODE, val);
7958 tw32_f(TG3_CPMU_EEE_DBTMR1,
7959 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7960 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7962 tw32_f(TG3_CPMU_EEE_DBTMR2,
7963 TG3_CPMU_DBTMR2_APE_TX_2047US |
7964 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7967 if (reset_phy)
7968 tg3_phy_reset(tp);
7970 err = tg3_chip_reset(tp);
7971 if (err)
7972 return err;
7974 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7976 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7977 val = tr32(TG3_CPMU_CTRL);
7978 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7979 tw32(TG3_CPMU_CTRL, val);
7981 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7982 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7983 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7984 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7986 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7987 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7988 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7989 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7991 val = tr32(TG3_CPMU_HST_ACC);
7992 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7993 val |= CPMU_HST_ACC_MACCLK_6_25;
7994 tw32(TG3_CPMU_HST_ACC, val);
7997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7998 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7999 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8000 PCIE_PWR_MGMT_L1_THRESH_4MS;
8001 tw32(PCIE_PWR_MGMT_THRESH, val);
8003 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8004 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8006 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8008 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8009 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8012 if (tg3_flag(tp, L1PLLPD_EN)) {
8013 u32 grc_mode = tr32(GRC_MODE);
8015 /* Access the lower 1K of PL PCIE block registers. */
8016 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8017 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8019 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8020 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8021 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8023 tw32(GRC_MODE, grc_mode);
8026 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8027 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8028 u32 grc_mode = tr32(GRC_MODE);
8030 /* Access the lower 1K of PL PCIE block registers. */
8031 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8032 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8034 val = tr32(TG3_PCIE_TLDLPL_PORT +
8035 TG3_PCIE_PL_LO_PHYCTL5);
8036 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8037 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8039 tw32(GRC_MODE, grc_mode);
8042 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8043 u32 grc_mode = tr32(GRC_MODE);
8045 /* Access the lower 1K of DL PCIE block registers. */
8046 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8047 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8049 val = tr32(TG3_PCIE_TLDLPL_PORT +
8050 TG3_PCIE_DL_LO_FTSMAX);
8051 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8052 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8053 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8055 tw32(GRC_MODE, grc_mode);
8058 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8059 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8060 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8061 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8064 /* This works around an issue with Athlon chipsets on
8065 * B3 tigon3 silicon. This bit has no effect on any
8066 * other revision. But do not set this on PCI Express
8067 * chips and don't even touch the clocks if the CPMU is present.
8069 if (!tg3_flag(tp, CPMU_PRESENT)) {
8070 if (!tg3_flag(tp, PCI_EXPRESS))
8071 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8072 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8075 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8076 tg3_flag(tp, PCIX_MODE)) {
8077 val = tr32(TG3PCI_PCISTATE);
8078 val |= PCISTATE_RETRY_SAME_DMA;
8079 tw32(TG3PCI_PCISTATE, val);
8082 if (tg3_flag(tp, ENABLE_APE)) {
8083 /* Allow reads and writes to the
8084 * APE register and memory space.
8086 val = tr32(TG3PCI_PCISTATE);
8087 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8088 PCISTATE_ALLOW_APE_SHMEM_WR |
8089 PCISTATE_ALLOW_APE_PSPACE_WR;
8090 tw32(TG3PCI_PCISTATE, val);
8093 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8094 /* Enable some hw fixes. */
8095 val = tr32(TG3PCI_MSI_DATA);
8096 val |= (1 << 26) | (1 << 28) | (1 << 29);
8097 tw32(TG3PCI_MSI_DATA, val);
8100 /* Descriptor ring init may make accesses to the
8101 * NIC SRAM area to setup the TX descriptors, so we
8102 * can only do this after the hardware has been
8103 * successfully reset.
8105 err = tg3_init_rings(tp);
8106 if (err)
8107 return err;
8109 if (tg3_flag(tp, 57765_PLUS)) {
8110 val = tr32(TG3PCI_DMA_RW_CTRL) &
8111 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8112 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8113 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8114 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8115 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8116 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8117 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8118 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8119 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8120 /* This value is determined during the probe time DMA
8121 * engine test, tg3_test_dma.
8123 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8126 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8127 GRC_MODE_4X_NIC_SEND_RINGS |
8128 GRC_MODE_NO_TX_PHDR_CSUM |
8129 GRC_MODE_NO_RX_PHDR_CSUM);
8130 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8132 /* Pseudo-header checksum is done by hardware logic and not
8133 * the offload processers, so make the chip do the pseudo-
8134 * header checksums on receive. For transmit it is more
8135 * convenient to do the pseudo-header checksum in software
8136 * as Linux does that on transmit for us in all cases.
8138 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8140 tw32(GRC_MODE,
8141 tp->grc_mode |
8142 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8144 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8145 val = tr32(GRC_MISC_CFG);
8146 val &= ~0xff;
8147 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8148 tw32(GRC_MISC_CFG, val);
8150 /* Initialize MBUF/DESC pool. */
8151 if (tg3_flag(tp, 5750_PLUS)) {
8152 /* Do nothing. */
8153 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8154 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8156 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8157 else
8158 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8159 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8160 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8161 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8162 int fw_len;
8164 fw_len = tp->fw_len;
8165 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8166 tw32(BUFMGR_MB_POOL_ADDR,
8167 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8168 tw32(BUFMGR_MB_POOL_SIZE,
8169 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8172 if (tp->dev->mtu <= ETH_DATA_LEN) {
8173 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8174 tp->bufmgr_config.mbuf_read_dma_low_water);
8175 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8176 tp->bufmgr_config.mbuf_mac_rx_low_water);
8177 tw32(BUFMGR_MB_HIGH_WATER,
8178 tp->bufmgr_config.mbuf_high_water);
8179 } else {
8180 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8181 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8182 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8183 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8184 tw32(BUFMGR_MB_HIGH_WATER,
8185 tp->bufmgr_config.mbuf_high_water_jumbo);
8187 tw32(BUFMGR_DMA_LOW_WATER,
8188 tp->bufmgr_config.dma_low_water);
8189 tw32(BUFMGR_DMA_HIGH_WATER,
8190 tp->bufmgr_config.dma_high_water);
8192 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8193 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8194 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8196 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8197 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8198 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8199 tw32(BUFMGR_MODE, val);
8200 for (i = 0; i < 2000; i++) {
8201 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8202 break;
8203 udelay(10);
8205 if (i >= 2000) {
8206 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8207 return -ENODEV;
8210 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8211 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8213 tg3_setup_rxbd_thresholds(tp);
8215 /* Initialize TG3_BDINFO's at:
8216 * RCVDBDI_STD_BD: standard eth size rx ring
8217 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8218 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8220 * like so:
8221 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8222 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8223 * ring attribute flags
8224 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8226 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8227 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8229 * The size of each ring is fixed in the firmware, but the location is
8230 * configurable.
8232 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8233 ((u64) tpr->rx_std_mapping >> 32));
8234 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8235 ((u64) tpr->rx_std_mapping & 0xffffffff));
8236 if (!tg3_flag(tp, 5717_PLUS))
8237 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8238 NIC_SRAM_RX_BUFFER_DESC);
8240 /* Disable the mini ring */
8241 if (!tg3_flag(tp, 5705_PLUS))
8242 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8243 BDINFO_FLAGS_DISABLED);
8245 /* Program the jumbo buffer descriptor ring control
8246 * blocks on those devices that have them.
8248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8249 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8251 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8252 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8253 ((u64) tpr->rx_jmb_mapping >> 32));
8254 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8255 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8256 val = TG3_RX_JMB_RING_SIZE(tp) <<
8257 BDINFO_FLAGS_MAXLEN_SHIFT;
8258 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8259 val | BDINFO_FLAGS_USE_EXT_RECV);
8260 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8261 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8262 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8263 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8264 } else {
8265 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8266 BDINFO_FLAGS_DISABLED);
8269 if (tg3_flag(tp, 57765_PLUS)) {
8270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8271 val = TG3_RX_STD_MAX_SIZE_5700;
8272 else
8273 val = TG3_RX_STD_MAX_SIZE_5717;
8274 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8275 val |= (TG3_RX_STD_DMA_SZ << 2);
8276 } else
8277 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8278 } else
8279 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8281 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8283 tpr->rx_std_prod_idx = tp->rx_pending;
8284 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8286 tpr->rx_jmb_prod_idx =
8287 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8288 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8290 tg3_rings_reset(tp);
8292 /* Initialize MAC address and backoff seed. */
8293 __tg3_set_mac_addr(tp, 0);
8295 /* MTU + ethernet header + FCS + optional VLAN tag */
8296 tw32(MAC_RX_MTU_SIZE,
8297 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8299 /* The slot time is changed by tg3_setup_phy if we
8300 * run at gigabit with half duplex.
8302 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8303 (6 << TX_LENGTHS_IPG_SHIFT) |
8304 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8306 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8307 val |= tr32(MAC_TX_LENGTHS) &
8308 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8309 TX_LENGTHS_CNT_DWN_VAL_MSK);
8311 tw32(MAC_TX_LENGTHS, val);
8313 /* Receive rules. */
8314 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8315 tw32(RCVLPC_CONFIG, 0x0181);
8317 /* Calculate RDMAC_MODE setting early, we need it to determine
8318 * the RCVLPC_STATE_ENABLE mask.
8320 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8321 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8322 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8323 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8324 RDMAC_MODE_LNGREAD_ENAB);
8326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8327 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8332 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8333 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8334 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8336 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8337 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8338 if (tg3_flag(tp, TSO_CAPABLE) &&
8339 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8340 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8341 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8342 !tg3_flag(tp, IS_5788)) {
8343 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8347 if (tg3_flag(tp, PCI_EXPRESS))
8348 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8350 if (tg3_flag(tp, HW_TSO_1) ||
8351 tg3_flag(tp, HW_TSO_2) ||
8352 tg3_flag(tp, HW_TSO_3))
8353 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8355 if (tg3_flag(tp, 57765_PLUS) ||
8356 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8357 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8358 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8360 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8361 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8366 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8367 tg3_flag(tp, 57765_PLUS)) {
8368 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8369 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8370 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8371 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8372 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8373 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8374 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8375 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8376 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8378 tw32(TG3_RDMA_RSRVCTRL_REG,
8379 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8383 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8384 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8385 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8386 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8387 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8390 /* Receive/send statistics. */
8391 if (tg3_flag(tp, 5750_PLUS)) {
8392 val = tr32(RCVLPC_STATS_ENABLE);
8393 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8394 tw32(RCVLPC_STATS_ENABLE, val);
8395 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8396 tg3_flag(tp, TSO_CAPABLE)) {
8397 val = tr32(RCVLPC_STATS_ENABLE);
8398 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8399 tw32(RCVLPC_STATS_ENABLE, val);
8400 } else {
8401 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8403 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8404 tw32(SNDDATAI_STATSENAB, 0xffffff);
8405 tw32(SNDDATAI_STATSCTRL,
8406 (SNDDATAI_SCTRL_ENABLE |
8407 SNDDATAI_SCTRL_FASTUPD));
8409 /* Setup host coalescing engine. */
8410 tw32(HOSTCC_MODE, 0);
8411 for (i = 0; i < 2000; i++) {
8412 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8413 break;
8414 udelay(10);
8417 __tg3_set_coalesce(tp, &tp->coal);
8419 if (!tg3_flag(tp, 5705_PLUS)) {
8420 /* Status/statistics block address. See tg3_timer,
8421 * the tg3_periodic_fetch_stats call there, and
8422 * tg3_get_stats to see how this works for 5705/5750 chips.
8424 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8425 ((u64) tp->stats_mapping >> 32));
8426 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8427 ((u64) tp->stats_mapping & 0xffffffff));
8428 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8430 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8432 /* Clear statistics and status block memory areas */
8433 for (i = NIC_SRAM_STATS_BLK;
8434 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8435 i += sizeof(u32)) {
8436 tg3_write_mem(tp, i, 0);
8437 udelay(40);
8441 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8443 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8444 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8445 if (!tg3_flag(tp, 5705_PLUS))
8446 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8448 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8449 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8450 /* reset to prevent losing 1st rx packet intermittently */
8451 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8452 udelay(10);
8455 if (tg3_flag(tp, ENABLE_APE))
8456 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8457 else
8458 tp->mac_mode = 0;
8459 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8460 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8461 if (!tg3_flag(tp, 5705_PLUS) &&
8462 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8463 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8464 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8465 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8466 udelay(40);
8468 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8469 * If TG3_FLAG_IS_NIC is zero, we should read the
8470 * register to preserve the GPIO settings for LOMs. The GPIOs,
8471 * whether used as inputs or outputs, are set by boot code after
8472 * reset.
8474 if (!tg3_flag(tp, IS_NIC)) {
8475 u32 gpio_mask;
8477 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8478 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8479 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8482 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8483 GRC_LCLCTRL_GPIO_OUTPUT3;
8485 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8486 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8488 tp->grc_local_ctrl &= ~gpio_mask;
8489 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8491 /* GPIO1 must be driven high for eeprom write protect */
8492 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8493 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8494 GRC_LCLCTRL_GPIO_OUTPUT1);
8496 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8497 udelay(100);
8499 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8500 val = tr32(MSGINT_MODE);
8501 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8502 tw32(MSGINT_MODE, val);
8505 if (!tg3_flag(tp, 5705_PLUS)) {
8506 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8507 udelay(40);
8510 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8511 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8512 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8513 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8514 WDMAC_MODE_LNGREAD_ENAB);
8516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8517 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8518 if (tg3_flag(tp, TSO_CAPABLE) &&
8519 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8520 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8521 /* nothing */
8522 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8523 !tg3_flag(tp, IS_5788)) {
8524 val |= WDMAC_MODE_RX_ACCEL;
8528 /* Enable host coalescing bug fix */
8529 if (tg3_flag(tp, 5755_PLUS))
8530 val |= WDMAC_MODE_STATUS_TAG_FIX;
8532 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8533 val |= WDMAC_MODE_BURST_ALL_DATA;
8535 tw32_f(WDMAC_MODE, val);
8536 udelay(40);
8538 if (tg3_flag(tp, PCIX_MODE)) {
8539 u16 pcix_cmd;
8541 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8542 &pcix_cmd);
8543 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8544 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8545 pcix_cmd |= PCI_X_CMD_READ_2K;
8546 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8547 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8548 pcix_cmd |= PCI_X_CMD_READ_2K;
8550 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8551 pcix_cmd);
8554 tw32_f(RDMAC_MODE, rdmac_mode);
8555 udelay(40);
8557 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8558 if (!tg3_flag(tp, 5705_PLUS))
8559 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8561 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8562 tw32(SNDDATAC_MODE,
8563 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8564 else
8565 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8567 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8568 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8569 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8570 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8571 val |= RCVDBDI_MODE_LRG_RING_SZ;
8572 tw32(RCVDBDI_MODE, val);
8573 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8574 if (tg3_flag(tp, HW_TSO_1) ||
8575 tg3_flag(tp, HW_TSO_2) ||
8576 tg3_flag(tp, HW_TSO_3))
8577 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8578 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8579 if (tg3_flag(tp, ENABLE_TSS))
8580 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8581 tw32(SNDBDI_MODE, val);
8582 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8584 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8585 err = tg3_load_5701_a0_firmware_fix(tp);
8586 if (err)
8587 return err;
8590 if (tg3_flag(tp, TSO_CAPABLE)) {
8591 err = tg3_load_tso_firmware(tp);
8592 if (err)
8593 return err;
8596 tp->tx_mode = TX_MODE_ENABLE;
8598 if (tg3_flag(tp, 5755_PLUS) ||
8599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8600 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8603 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8604 tp->tx_mode &= ~val;
8605 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8608 tw32_f(MAC_TX_MODE, tp->tx_mode);
8609 udelay(100);
8611 if (tg3_flag(tp, ENABLE_RSS)) {
8612 u32 reg = MAC_RSS_INDIR_TBL_0;
8613 u8 *ent = (u8 *)&val;
8615 /* Setup the indirection table */
8616 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8617 int idx = i % sizeof(val);
8619 ent[idx] = i % (tp->irq_cnt - 1);
8620 if (idx == sizeof(val) - 1) {
8621 tw32(reg, val);
8622 reg += 4;
8626 /* Setup the "secret" hash key. */
8627 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8628 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8629 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8630 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8631 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8632 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8633 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8634 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8635 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8636 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8639 tp->rx_mode = RX_MODE_ENABLE;
8640 if (tg3_flag(tp, 5755_PLUS))
8641 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8643 if (tg3_flag(tp, ENABLE_RSS))
8644 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8645 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8646 RX_MODE_RSS_IPV6_HASH_EN |
8647 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8648 RX_MODE_RSS_IPV4_HASH_EN |
8649 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8651 tw32_f(MAC_RX_MODE, tp->rx_mode);
8652 udelay(10);
8654 tw32(MAC_LED_CTRL, tp->led_ctrl);
8656 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8657 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8658 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8659 udelay(10);
8661 tw32_f(MAC_RX_MODE, tp->rx_mode);
8662 udelay(10);
8664 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8665 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8666 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8667 /* Set drive transmission level to 1.2V */
8668 /* only if the signal pre-emphasis bit is not set */
8669 val = tr32(MAC_SERDES_CFG);
8670 val &= 0xfffff000;
8671 val |= 0x880;
8672 tw32(MAC_SERDES_CFG, val);
8674 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8675 tw32(MAC_SERDES_CFG, 0x616000);
8678 /* Prevent chip from dropping frames when flow control
8679 * is enabled.
8681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8682 val = 1;
8683 else
8684 val = 2;
8685 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8688 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8689 /* Use hardware link auto-negotiation */
8690 tg3_flag_set(tp, HW_AUTONEG);
8693 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8694 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8695 u32 tmp;
8697 tmp = tr32(SERDES_RX_CTRL);
8698 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8699 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8700 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8701 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8704 if (!tg3_flag(tp, USE_PHYLIB)) {
8705 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8706 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8707 tp->link_config.speed = tp->link_config.orig_speed;
8708 tp->link_config.duplex = tp->link_config.orig_duplex;
8709 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8712 err = tg3_setup_phy(tp, 0);
8713 if (err)
8714 return err;
8716 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8717 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8718 u32 tmp;
8720 /* Clear CRC stats. */
8721 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8722 tg3_writephy(tp, MII_TG3_TEST1,
8723 tmp | MII_TG3_TEST1_CRC_EN);
8724 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8729 __tg3_set_rx_mode(tp->dev);
8731 /* Initialize receive rules. */
8732 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8733 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8734 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8735 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8737 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8738 limit = 8;
8739 else
8740 limit = 16;
8741 if (tg3_flag(tp, ENABLE_ASF))
8742 limit -= 4;
8743 switch (limit) {
8744 case 16:
8745 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8746 case 15:
8747 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8748 case 14:
8749 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8750 case 13:
8751 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8752 case 12:
8753 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8754 case 11:
8755 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8756 case 10:
8757 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8758 case 9:
8759 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8760 case 8:
8761 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8762 case 7:
8763 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8764 case 6:
8765 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8766 case 5:
8767 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8768 case 4:
8769 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8770 case 3:
8771 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8772 case 2:
8773 case 1:
8775 default:
8776 break;
8779 if (tg3_flag(tp, ENABLE_APE))
8780 /* Write our heartbeat update interval to APE. */
8781 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8782 APE_HOST_HEARTBEAT_INT_DISABLE);
8784 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8786 return 0;
8789 /* Called at device open time to get the chip ready for
8790 * packet processing. Invoked with tp->lock held.
8792 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8794 tg3_switch_clocks(tp);
8796 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8798 return tg3_reset_hw(tp, reset_phy);
8801 #define TG3_STAT_ADD32(PSTAT, REG) \
8802 do { u32 __val = tr32(REG); \
8803 (PSTAT)->low += __val; \
8804 if ((PSTAT)->low < __val) \
8805 (PSTAT)->high += 1; \
8806 } while (0)
8808 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8810 struct tg3_hw_stats *sp = tp->hw_stats;
8812 if (!netif_carrier_ok(tp->dev))
8813 return;
8815 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8816 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8817 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8818 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8819 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8820 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8821 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8822 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8823 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8824 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8825 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8826 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8827 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8829 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8830 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8831 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8832 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8833 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8834 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8835 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8836 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8837 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8838 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8839 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8840 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8841 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8842 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8844 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8845 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8846 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8847 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8848 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8849 } else {
8850 u32 val = tr32(HOSTCC_FLOW_ATTN);
8851 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8852 if (val) {
8853 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8854 sp->rx_discards.low += val;
8855 if (sp->rx_discards.low < val)
8856 sp->rx_discards.high += 1;
8858 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8860 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8863 static void tg3_chk_missed_msi(struct tg3 *tp)
8865 u32 i;
8867 for (i = 0; i < tp->irq_cnt; i++) {
8868 struct tg3_napi *tnapi = &tp->napi[i];
8870 if (tg3_has_work(tnapi)) {
8871 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8872 tnapi->last_tx_cons == tnapi->tx_cons) {
8873 if (tnapi->chk_msi_cnt < 1) {
8874 tnapi->chk_msi_cnt++;
8875 return;
8877 tw32_mailbox(tnapi->int_mbox,
8878 tnapi->last_tag << 24);
8881 tnapi->chk_msi_cnt = 0;
8882 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8883 tnapi->last_tx_cons = tnapi->tx_cons;
8887 static void tg3_timer(unsigned long __opaque)
8889 struct tg3 *tp = (struct tg3 *) __opaque;
8891 if (tp->irq_sync)
8892 goto restart_timer;
8894 spin_lock(&tp->lock);
8896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8898 tg3_chk_missed_msi(tp);
8900 if (!tg3_flag(tp, TAGGED_STATUS)) {
8901 /* All of this garbage is because when using non-tagged
8902 * IRQ status the mailbox/status_block protocol the chip
8903 * uses with the cpu is race prone.
8905 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8906 tw32(GRC_LOCAL_CTRL,
8907 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8908 } else {
8909 tw32(HOSTCC_MODE, tp->coalesce_mode |
8910 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8913 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8914 tg3_flag_set(tp, RESTART_TIMER);
8915 spin_unlock(&tp->lock);
8916 schedule_work(&tp->reset_task);
8917 return;
8921 /* This part only runs once per second. */
8922 if (!--tp->timer_counter) {
8923 if (tg3_flag(tp, 5705_PLUS))
8924 tg3_periodic_fetch_stats(tp);
8926 if (tp->setlpicnt && !--tp->setlpicnt)
8927 tg3_phy_eee_enable(tp);
8929 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8930 u32 mac_stat;
8931 int phy_event;
8933 mac_stat = tr32(MAC_STATUS);
8935 phy_event = 0;
8936 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8937 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8938 phy_event = 1;
8939 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8940 phy_event = 1;
8942 if (phy_event)
8943 tg3_setup_phy(tp, 0);
8944 } else if (tg3_flag(tp, POLL_SERDES)) {
8945 u32 mac_stat = tr32(MAC_STATUS);
8946 int need_setup = 0;
8948 if (netif_carrier_ok(tp->dev) &&
8949 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8950 need_setup = 1;
8952 if (!netif_carrier_ok(tp->dev) &&
8953 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8954 MAC_STATUS_SIGNAL_DET))) {
8955 need_setup = 1;
8957 if (need_setup) {
8958 if (!tp->serdes_counter) {
8959 tw32_f(MAC_MODE,
8960 (tp->mac_mode &
8961 ~MAC_MODE_PORT_MODE_MASK));
8962 udelay(40);
8963 tw32_f(MAC_MODE, tp->mac_mode);
8964 udelay(40);
8966 tg3_setup_phy(tp, 0);
8968 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8969 tg3_flag(tp, 5780_CLASS)) {
8970 tg3_serdes_parallel_detect(tp);
8973 tp->timer_counter = tp->timer_multiplier;
8976 /* Heartbeat is only sent once every 2 seconds.
8978 * The heartbeat is to tell the ASF firmware that the host
8979 * driver is still alive. In the event that the OS crashes,
8980 * ASF needs to reset the hardware to free up the FIFO space
8981 * that may be filled with rx packets destined for the host.
8982 * If the FIFO is full, ASF will no longer function properly.
8984 * Unintended resets have been reported on real time kernels
8985 * where the timer doesn't run on time. Netpoll will also have
8986 * same problem.
8988 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8989 * to check the ring condition when the heartbeat is expiring
8990 * before doing the reset. This will prevent most unintended
8991 * resets.
8993 if (!--tp->asf_counter) {
8994 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8995 tg3_wait_for_event_ack(tp);
8997 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8998 FWCMD_NICDRV_ALIVE3);
8999 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9000 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9001 TG3_FW_UPDATE_TIMEOUT_SEC);
9003 tg3_generate_fw_event(tp);
9005 tp->asf_counter = tp->asf_multiplier;
9008 spin_unlock(&tp->lock);
9010 restart_timer:
9011 tp->timer.expires = jiffies + tp->timer_offset;
9012 add_timer(&tp->timer);
9015 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9017 irq_handler_t fn;
9018 unsigned long flags;
9019 char *name;
9020 struct tg3_napi *tnapi = &tp->napi[irq_num];
9022 if (tp->irq_cnt == 1)
9023 name = tp->dev->name;
9024 else {
9025 name = &tnapi->irq_lbl[0];
9026 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9027 name[IFNAMSIZ-1] = 0;
9030 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9031 fn = tg3_msi;
9032 if (tg3_flag(tp, 1SHOT_MSI))
9033 fn = tg3_msi_1shot;
9034 flags = 0;
9035 } else {
9036 fn = tg3_interrupt;
9037 if (tg3_flag(tp, TAGGED_STATUS))
9038 fn = tg3_interrupt_tagged;
9039 flags = IRQF_SHARED;
9042 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9045 static int tg3_test_interrupt(struct tg3 *tp)
9047 struct tg3_napi *tnapi = &tp->napi[0];
9048 struct net_device *dev = tp->dev;
9049 int err, i, intr_ok = 0;
9050 u32 val;
9052 if (!netif_running(dev))
9053 return -ENODEV;
9055 tg3_disable_ints(tp);
9057 free_irq(tnapi->irq_vec, tnapi);
9060 * Turn off MSI one shot mode. Otherwise this test has no
9061 * observable way to know whether the interrupt was delivered.
9063 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9064 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9065 tw32(MSGINT_MODE, val);
9068 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9069 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9070 if (err)
9071 return err;
9073 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9074 tg3_enable_ints(tp);
9076 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9077 tnapi->coal_now);
9079 for (i = 0; i < 5; i++) {
9080 u32 int_mbox, misc_host_ctrl;
9082 int_mbox = tr32_mailbox(tnapi->int_mbox);
9083 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9085 if ((int_mbox != 0) ||
9086 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9087 intr_ok = 1;
9088 break;
9091 msleep(10);
9094 tg3_disable_ints(tp);
9096 free_irq(tnapi->irq_vec, tnapi);
9098 err = tg3_request_irq(tp, 0);
9100 if (err)
9101 return err;
9103 if (intr_ok) {
9104 /* Reenable MSI one shot mode. */
9105 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9106 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9107 tw32(MSGINT_MODE, val);
9109 return 0;
9112 return -EIO;
9115 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9116 * successfully restored
9118 static int tg3_test_msi(struct tg3 *tp)
9120 int err;
9121 u16 pci_cmd;
9123 if (!tg3_flag(tp, USING_MSI))
9124 return 0;
9126 /* Turn off SERR reporting in case MSI terminates with Master
9127 * Abort.
9129 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9130 pci_write_config_word(tp->pdev, PCI_COMMAND,
9131 pci_cmd & ~PCI_COMMAND_SERR);
9133 err = tg3_test_interrupt(tp);
9135 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9137 if (!err)
9138 return 0;
9140 /* other failures */
9141 if (err != -EIO)
9142 return err;
9144 /* MSI test failed, go back to INTx mode */
9145 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9146 "to INTx mode. Please report this failure to the PCI "
9147 "maintainer and include system chipset information\n");
9149 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9151 pci_disable_msi(tp->pdev);
9153 tg3_flag_clear(tp, USING_MSI);
9154 tp->napi[0].irq_vec = tp->pdev->irq;
9156 err = tg3_request_irq(tp, 0);
9157 if (err)
9158 return err;
9160 /* Need to reset the chip because the MSI cycle may have terminated
9161 * with Master Abort.
9163 tg3_full_lock(tp, 1);
9165 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9166 err = tg3_init_hw(tp, 1);
9168 tg3_full_unlock(tp);
9170 if (err)
9171 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9173 return err;
9176 static int tg3_request_firmware(struct tg3 *tp)
9178 const __be32 *fw_data;
9180 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9181 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9182 tp->fw_needed);
9183 return -ENOENT;
9186 fw_data = (void *)tp->fw->data;
9188 /* Firmware blob starts with version numbers, followed by
9189 * start address and _full_ length including BSS sections
9190 * (which must be longer than the actual data, of course
9193 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9194 if (tp->fw_len < (tp->fw->size - 12)) {
9195 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9196 tp->fw_len, tp->fw_needed);
9197 release_firmware(tp->fw);
9198 tp->fw = NULL;
9199 return -EINVAL;
9202 /* We no longer need firmware; we have it. */
9203 tp->fw_needed = NULL;
9204 return 0;
9207 static bool tg3_enable_msix(struct tg3 *tp)
9209 int i, rc, cpus = num_online_cpus();
9210 struct msix_entry msix_ent[tp->irq_max];
9212 if (cpus == 1)
9213 /* Just fallback to the simpler MSI mode. */
9214 return false;
9217 * We want as many rx rings enabled as there are cpus.
9218 * The first MSIX vector only deals with link interrupts, etc,
9219 * so we add one to the number of vectors we are requesting.
9221 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9223 for (i = 0; i < tp->irq_max; i++) {
9224 msix_ent[i].entry = i;
9225 msix_ent[i].vector = 0;
9228 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9229 if (rc < 0) {
9230 return false;
9231 } else if (rc != 0) {
9232 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9233 return false;
9234 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9235 tp->irq_cnt, rc);
9236 tp->irq_cnt = rc;
9239 for (i = 0; i < tp->irq_max; i++)
9240 tp->napi[i].irq_vec = msix_ent[i].vector;
9242 netif_set_real_num_tx_queues(tp->dev, 1);
9243 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9244 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9245 pci_disable_msix(tp->pdev);
9246 return false;
9249 if (tp->irq_cnt > 1) {
9250 tg3_flag_set(tp, ENABLE_RSS);
9252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9254 tg3_flag_set(tp, ENABLE_TSS);
9255 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9259 return true;
9262 static void tg3_ints_init(struct tg3 *tp)
9264 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9265 !tg3_flag(tp, TAGGED_STATUS)) {
9266 /* All MSI supporting chips should support tagged
9267 * status. Assert that this is the case.
9269 netdev_warn(tp->dev,
9270 "MSI without TAGGED_STATUS? Not using MSI\n");
9271 goto defcfg;
9274 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9275 tg3_flag_set(tp, USING_MSIX);
9276 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9277 tg3_flag_set(tp, USING_MSI);
9279 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9280 u32 msi_mode = tr32(MSGINT_MODE);
9281 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9282 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9283 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9285 defcfg:
9286 if (!tg3_flag(tp, USING_MSIX)) {
9287 tp->irq_cnt = 1;
9288 tp->napi[0].irq_vec = tp->pdev->irq;
9289 netif_set_real_num_tx_queues(tp->dev, 1);
9290 netif_set_real_num_rx_queues(tp->dev, 1);
9294 static void tg3_ints_fini(struct tg3 *tp)
9296 if (tg3_flag(tp, USING_MSIX))
9297 pci_disable_msix(tp->pdev);
9298 else if (tg3_flag(tp, USING_MSI))
9299 pci_disable_msi(tp->pdev);
9300 tg3_flag_clear(tp, USING_MSI);
9301 tg3_flag_clear(tp, USING_MSIX);
9302 tg3_flag_clear(tp, ENABLE_RSS);
9303 tg3_flag_clear(tp, ENABLE_TSS);
9306 static int tg3_open(struct net_device *dev)
9308 struct tg3 *tp = netdev_priv(dev);
9309 int i, err;
9311 if (tp->fw_needed) {
9312 err = tg3_request_firmware(tp);
9313 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9314 if (err)
9315 return err;
9316 } else if (err) {
9317 netdev_warn(tp->dev, "TSO capability disabled\n");
9318 tg3_flag_clear(tp, TSO_CAPABLE);
9319 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9320 netdev_notice(tp->dev, "TSO capability restored\n");
9321 tg3_flag_set(tp, TSO_CAPABLE);
9325 netif_carrier_off(tp->dev);
9327 err = tg3_power_up(tp);
9328 if (err)
9329 return err;
9331 tg3_full_lock(tp, 0);
9333 tg3_disable_ints(tp);
9334 tg3_flag_clear(tp, INIT_COMPLETE);
9336 tg3_full_unlock(tp);
9339 * Setup interrupts first so we know how
9340 * many NAPI resources to allocate
9342 tg3_ints_init(tp);
9344 /* The placement of this call is tied
9345 * to the setup and use of Host TX descriptors.
9347 err = tg3_alloc_consistent(tp);
9348 if (err)
9349 goto err_out1;
9351 tg3_napi_init(tp);
9353 tg3_napi_enable(tp);
9355 for (i = 0; i < tp->irq_cnt; i++) {
9356 struct tg3_napi *tnapi = &tp->napi[i];
9357 err = tg3_request_irq(tp, i);
9358 if (err) {
9359 for (i--; i >= 0; i--)
9360 free_irq(tnapi->irq_vec, tnapi);
9361 break;
9365 if (err)
9366 goto err_out2;
9368 tg3_full_lock(tp, 0);
9370 err = tg3_init_hw(tp, 1);
9371 if (err) {
9372 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9373 tg3_free_rings(tp);
9374 } else {
9375 if (tg3_flag(tp, TAGGED_STATUS) &&
9376 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9377 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9378 tp->timer_offset = HZ;
9379 else
9380 tp->timer_offset = HZ / 10;
9382 BUG_ON(tp->timer_offset > HZ);
9383 tp->timer_counter = tp->timer_multiplier =
9384 (HZ / tp->timer_offset);
9385 tp->asf_counter = tp->asf_multiplier =
9386 ((HZ / tp->timer_offset) * 2);
9388 init_timer(&tp->timer);
9389 tp->timer.expires = jiffies + tp->timer_offset;
9390 tp->timer.data = (unsigned long) tp;
9391 tp->timer.function = tg3_timer;
9394 tg3_full_unlock(tp);
9396 if (err)
9397 goto err_out3;
9399 if (tg3_flag(tp, USING_MSI)) {
9400 err = tg3_test_msi(tp);
9402 if (err) {
9403 tg3_full_lock(tp, 0);
9404 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9405 tg3_free_rings(tp);
9406 tg3_full_unlock(tp);
9408 goto err_out2;
9411 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9412 u32 val = tr32(PCIE_TRANSACTION_CFG);
9414 tw32(PCIE_TRANSACTION_CFG,
9415 val | PCIE_TRANS_CFG_1SHOT_MSI);
9419 tg3_phy_start(tp);
9421 tg3_full_lock(tp, 0);
9423 add_timer(&tp->timer);
9424 tg3_flag_set(tp, INIT_COMPLETE);
9425 tg3_enable_ints(tp);
9427 tg3_full_unlock(tp);
9429 netif_tx_start_all_queues(dev);
9432 * Reset loopback feature if it was turned on while the device was down
9433 * make sure that it's installed properly now.
9435 if (dev->features & NETIF_F_LOOPBACK)
9436 tg3_set_loopback(dev, dev->features);
9438 return 0;
9440 err_out3:
9441 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9442 struct tg3_napi *tnapi = &tp->napi[i];
9443 free_irq(tnapi->irq_vec, tnapi);
9446 err_out2:
9447 tg3_napi_disable(tp);
9448 tg3_napi_fini(tp);
9449 tg3_free_consistent(tp);
9451 err_out1:
9452 tg3_ints_fini(tp);
9453 return err;
9456 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9457 struct rtnl_link_stats64 *);
9458 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9460 static int tg3_close(struct net_device *dev)
9462 int i;
9463 struct tg3 *tp = netdev_priv(dev);
9465 tg3_napi_disable(tp);
9466 cancel_work_sync(&tp->reset_task);
9468 netif_tx_stop_all_queues(dev);
9470 del_timer_sync(&tp->timer);
9472 tg3_phy_stop(tp);
9474 tg3_full_lock(tp, 1);
9476 tg3_disable_ints(tp);
9478 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9479 tg3_free_rings(tp);
9480 tg3_flag_clear(tp, INIT_COMPLETE);
9482 tg3_full_unlock(tp);
9484 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9485 struct tg3_napi *tnapi = &tp->napi[i];
9486 free_irq(tnapi->irq_vec, tnapi);
9489 tg3_ints_fini(tp);
9491 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9493 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9494 sizeof(tp->estats_prev));
9496 tg3_napi_fini(tp);
9498 tg3_free_consistent(tp);
9500 tg3_power_down(tp);
9502 netif_carrier_off(tp->dev);
9504 return 0;
9507 static inline u64 get_stat64(tg3_stat64_t *val)
9509 return ((u64)val->high << 32) | ((u64)val->low);
9512 static u64 calc_crc_errors(struct tg3 *tp)
9514 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9516 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9517 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9518 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9519 u32 val;
9521 spin_lock_bh(&tp->lock);
9522 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9523 tg3_writephy(tp, MII_TG3_TEST1,
9524 val | MII_TG3_TEST1_CRC_EN);
9525 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9526 } else
9527 val = 0;
9528 spin_unlock_bh(&tp->lock);
9530 tp->phy_crc_errors += val;
9532 return tp->phy_crc_errors;
9535 return get_stat64(&hw_stats->rx_fcs_errors);
9538 #define ESTAT_ADD(member) \
9539 estats->member = old_estats->member + \
9540 get_stat64(&hw_stats->member)
9542 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9544 struct tg3_ethtool_stats *estats = &tp->estats;
9545 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9546 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9548 if (!hw_stats)
9549 return old_estats;
9551 ESTAT_ADD(rx_octets);
9552 ESTAT_ADD(rx_fragments);
9553 ESTAT_ADD(rx_ucast_packets);
9554 ESTAT_ADD(rx_mcast_packets);
9555 ESTAT_ADD(rx_bcast_packets);
9556 ESTAT_ADD(rx_fcs_errors);
9557 ESTAT_ADD(rx_align_errors);
9558 ESTAT_ADD(rx_xon_pause_rcvd);
9559 ESTAT_ADD(rx_xoff_pause_rcvd);
9560 ESTAT_ADD(rx_mac_ctrl_rcvd);
9561 ESTAT_ADD(rx_xoff_entered);
9562 ESTAT_ADD(rx_frame_too_long_errors);
9563 ESTAT_ADD(rx_jabbers);
9564 ESTAT_ADD(rx_undersize_packets);
9565 ESTAT_ADD(rx_in_length_errors);
9566 ESTAT_ADD(rx_out_length_errors);
9567 ESTAT_ADD(rx_64_or_less_octet_packets);
9568 ESTAT_ADD(rx_65_to_127_octet_packets);
9569 ESTAT_ADD(rx_128_to_255_octet_packets);
9570 ESTAT_ADD(rx_256_to_511_octet_packets);
9571 ESTAT_ADD(rx_512_to_1023_octet_packets);
9572 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9573 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9574 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9575 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9576 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9578 ESTAT_ADD(tx_octets);
9579 ESTAT_ADD(tx_collisions);
9580 ESTAT_ADD(tx_xon_sent);
9581 ESTAT_ADD(tx_xoff_sent);
9582 ESTAT_ADD(tx_flow_control);
9583 ESTAT_ADD(tx_mac_errors);
9584 ESTAT_ADD(tx_single_collisions);
9585 ESTAT_ADD(tx_mult_collisions);
9586 ESTAT_ADD(tx_deferred);
9587 ESTAT_ADD(tx_excessive_collisions);
9588 ESTAT_ADD(tx_late_collisions);
9589 ESTAT_ADD(tx_collide_2times);
9590 ESTAT_ADD(tx_collide_3times);
9591 ESTAT_ADD(tx_collide_4times);
9592 ESTAT_ADD(tx_collide_5times);
9593 ESTAT_ADD(tx_collide_6times);
9594 ESTAT_ADD(tx_collide_7times);
9595 ESTAT_ADD(tx_collide_8times);
9596 ESTAT_ADD(tx_collide_9times);
9597 ESTAT_ADD(tx_collide_10times);
9598 ESTAT_ADD(tx_collide_11times);
9599 ESTAT_ADD(tx_collide_12times);
9600 ESTAT_ADD(tx_collide_13times);
9601 ESTAT_ADD(tx_collide_14times);
9602 ESTAT_ADD(tx_collide_15times);
9603 ESTAT_ADD(tx_ucast_packets);
9604 ESTAT_ADD(tx_mcast_packets);
9605 ESTAT_ADD(tx_bcast_packets);
9606 ESTAT_ADD(tx_carrier_sense_errors);
9607 ESTAT_ADD(tx_discards);
9608 ESTAT_ADD(tx_errors);
9610 ESTAT_ADD(dma_writeq_full);
9611 ESTAT_ADD(dma_write_prioq_full);
9612 ESTAT_ADD(rxbds_empty);
9613 ESTAT_ADD(rx_discards);
9614 ESTAT_ADD(rx_errors);
9615 ESTAT_ADD(rx_threshold_hit);
9617 ESTAT_ADD(dma_readq_full);
9618 ESTAT_ADD(dma_read_prioq_full);
9619 ESTAT_ADD(tx_comp_queue_full);
9621 ESTAT_ADD(ring_set_send_prod_index);
9622 ESTAT_ADD(ring_status_update);
9623 ESTAT_ADD(nic_irqs);
9624 ESTAT_ADD(nic_avoided_irqs);
9625 ESTAT_ADD(nic_tx_threshold_hit);
9627 ESTAT_ADD(mbuf_lwm_thresh_hit);
9629 return estats;
9632 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9633 struct rtnl_link_stats64 *stats)
9635 struct tg3 *tp = netdev_priv(dev);
9636 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9637 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9639 if (!hw_stats)
9640 return old_stats;
9642 stats->rx_packets = old_stats->rx_packets +
9643 get_stat64(&hw_stats->rx_ucast_packets) +
9644 get_stat64(&hw_stats->rx_mcast_packets) +
9645 get_stat64(&hw_stats->rx_bcast_packets);
9647 stats->tx_packets = old_stats->tx_packets +
9648 get_stat64(&hw_stats->tx_ucast_packets) +
9649 get_stat64(&hw_stats->tx_mcast_packets) +
9650 get_stat64(&hw_stats->tx_bcast_packets);
9652 stats->rx_bytes = old_stats->rx_bytes +
9653 get_stat64(&hw_stats->rx_octets);
9654 stats->tx_bytes = old_stats->tx_bytes +
9655 get_stat64(&hw_stats->tx_octets);
9657 stats->rx_errors = old_stats->rx_errors +
9658 get_stat64(&hw_stats->rx_errors);
9659 stats->tx_errors = old_stats->tx_errors +
9660 get_stat64(&hw_stats->tx_errors) +
9661 get_stat64(&hw_stats->tx_mac_errors) +
9662 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9663 get_stat64(&hw_stats->tx_discards);
9665 stats->multicast = old_stats->multicast +
9666 get_stat64(&hw_stats->rx_mcast_packets);
9667 stats->collisions = old_stats->collisions +
9668 get_stat64(&hw_stats->tx_collisions);
9670 stats->rx_length_errors = old_stats->rx_length_errors +
9671 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9672 get_stat64(&hw_stats->rx_undersize_packets);
9674 stats->rx_over_errors = old_stats->rx_over_errors +
9675 get_stat64(&hw_stats->rxbds_empty);
9676 stats->rx_frame_errors = old_stats->rx_frame_errors +
9677 get_stat64(&hw_stats->rx_align_errors);
9678 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9679 get_stat64(&hw_stats->tx_discards);
9680 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9681 get_stat64(&hw_stats->tx_carrier_sense_errors);
9683 stats->rx_crc_errors = old_stats->rx_crc_errors +
9684 calc_crc_errors(tp);
9686 stats->rx_missed_errors = old_stats->rx_missed_errors +
9687 get_stat64(&hw_stats->rx_discards);
9689 stats->rx_dropped = tp->rx_dropped;
9691 return stats;
9694 static inline u32 calc_crc(unsigned char *buf, int len)
9696 u32 reg;
9697 u32 tmp;
9698 int j, k;
9700 reg = 0xffffffff;
9702 for (j = 0; j < len; j++) {
9703 reg ^= buf[j];
9705 for (k = 0; k < 8; k++) {
9706 tmp = reg & 0x01;
9708 reg >>= 1;
9710 if (tmp)
9711 reg ^= 0xedb88320;
9715 return ~reg;
9718 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9720 /* accept or reject all multicast frames */
9721 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9722 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9723 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9724 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9727 static void __tg3_set_rx_mode(struct net_device *dev)
9729 struct tg3 *tp = netdev_priv(dev);
9730 u32 rx_mode;
9732 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9733 RX_MODE_KEEP_VLAN_TAG);
9735 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9736 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9737 * flag clear.
9739 if (!tg3_flag(tp, ENABLE_ASF))
9740 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9741 #endif
9743 if (dev->flags & IFF_PROMISC) {
9744 /* Promiscuous mode. */
9745 rx_mode |= RX_MODE_PROMISC;
9746 } else if (dev->flags & IFF_ALLMULTI) {
9747 /* Accept all multicast. */
9748 tg3_set_multi(tp, 1);
9749 } else if (netdev_mc_empty(dev)) {
9750 /* Reject all multicast. */
9751 tg3_set_multi(tp, 0);
9752 } else {
9753 /* Accept one or more multicast(s). */
9754 struct netdev_hw_addr *ha;
9755 u32 mc_filter[4] = { 0, };
9756 u32 regidx;
9757 u32 bit;
9758 u32 crc;
9760 netdev_for_each_mc_addr(ha, dev) {
9761 crc = calc_crc(ha->addr, ETH_ALEN);
9762 bit = ~crc & 0x7f;
9763 regidx = (bit & 0x60) >> 5;
9764 bit &= 0x1f;
9765 mc_filter[regidx] |= (1 << bit);
9768 tw32(MAC_HASH_REG_0, mc_filter[0]);
9769 tw32(MAC_HASH_REG_1, mc_filter[1]);
9770 tw32(MAC_HASH_REG_2, mc_filter[2]);
9771 tw32(MAC_HASH_REG_3, mc_filter[3]);
9774 if (rx_mode != tp->rx_mode) {
9775 tp->rx_mode = rx_mode;
9776 tw32_f(MAC_RX_MODE, rx_mode);
9777 udelay(10);
9781 static void tg3_set_rx_mode(struct net_device *dev)
9783 struct tg3 *tp = netdev_priv(dev);
9785 if (!netif_running(dev))
9786 return;
9788 tg3_full_lock(tp, 0);
9789 __tg3_set_rx_mode(dev);
9790 tg3_full_unlock(tp);
9793 static int tg3_get_regs_len(struct net_device *dev)
9795 return TG3_REG_BLK_SIZE;
9798 static void tg3_get_regs(struct net_device *dev,
9799 struct ethtool_regs *regs, void *_p)
9801 struct tg3 *tp = netdev_priv(dev);
9803 regs->version = 0;
9805 memset(_p, 0, TG3_REG_BLK_SIZE);
9807 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9808 return;
9810 tg3_full_lock(tp, 0);
9812 tg3_dump_legacy_regs(tp, (u32 *)_p);
9814 tg3_full_unlock(tp);
9817 static int tg3_get_eeprom_len(struct net_device *dev)
9819 struct tg3 *tp = netdev_priv(dev);
9821 return tp->nvram_size;
9824 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9826 struct tg3 *tp = netdev_priv(dev);
9827 int ret;
9828 u8 *pd;
9829 u32 i, offset, len, b_offset, b_count;
9830 __be32 val;
9832 if (tg3_flag(tp, NO_NVRAM))
9833 return -EINVAL;
9835 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9836 return -EAGAIN;
9838 offset = eeprom->offset;
9839 len = eeprom->len;
9840 eeprom->len = 0;
9842 eeprom->magic = TG3_EEPROM_MAGIC;
9844 if (offset & 3) {
9845 /* adjustments to start on required 4 byte boundary */
9846 b_offset = offset & 3;
9847 b_count = 4 - b_offset;
9848 if (b_count > len) {
9849 /* i.e. offset=1 len=2 */
9850 b_count = len;
9852 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9853 if (ret)
9854 return ret;
9855 memcpy(data, ((char *)&val) + b_offset, b_count);
9856 len -= b_count;
9857 offset += b_count;
9858 eeprom->len += b_count;
9861 /* read bytes up to the last 4 byte boundary */
9862 pd = &data[eeprom->len];
9863 for (i = 0; i < (len - (len & 3)); i += 4) {
9864 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9865 if (ret) {
9866 eeprom->len += i;
9867 return ret;
9869 memcpy(pd + i, &val, 4);
9871 eeprom->len += i;
9873 if (len & 3) {
9874 /* read last bytes not ending on 4 byte boundary */
9875 pd = &data[eeprom->len];
9876 b_count = len & 3;
9877 b_offset = offset + len - b_count;
9878 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9879 if (ret)
9880 return ret;
9881 memcpy(pd, &val, b_count);
9882 eeprom->len += b_count;
9884 return 0;
9887 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9889 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9891 struct tg3 *tp = netdev_priv(dev);
9892 int ret;
9893 u32 offset, len, b_offset, odd_len;
9894 u8 *buf;
9895 __be32 start, end;
9897 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9898 return -EAGAIN;
9900 if (tg3_flag(tp, NO_NVRAM) ||
9901 eeprom->magic != TG3_EEPROM_MAGIC)
9902 return -EINVAL;
9904 offset = eeprom->offset;
9905 len = eeprom->len;
9907 if ((b_offset = (offset & 3))) {
9908 /* adjustments to start on required 4 byte boundary */
9909 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9910 if (ret)
9911 return ret;
9912 len += b_offset;
9913 offset &= ~3;
9914 if (len < 4)
9915 len = 4;
9918 odd_len = 0;
9919 if (len & 3) {
9920 /* adjustments to end on required 4 byte boundary */
9921 odd_len = 1;
9922 len = (len + 3) & ~3;
9923 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9924 if (ret)
9925 return ret;
9928 buf = data;
9929 if (b_offset || odd_len) {
9930 buf = kmalloc(len, GFP_KERNEL);
9931 if (!buf)
9932 return -ENOMEM;
9933 if (b_offset)
9934 memcpy(buf, &start, 4);
9935 if (odd_len)
9936 memcpy(buf+len-4, &end, 4);
9937 memcpy(buf + b_offset, data, eeprom->len);
9940 ret = tg3_nvram_write_block(tp, offset, len, buf);
9942 if (buf != data)
9943 kfree(buf);
9945 return ret;
9948 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9950 struct tg3 *tp = netdev_priv(dev);
9952 if (tg3_flag(tp, USE_PHYLIB)) {
9953 struct phy_device *phydev;
9954 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9955 return -EAGAIN;
9956 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9957 return phy_ethtool_gset(phydev, cmd);
9960 cmd->supported = (SUPPORTED_Autoneg);
9962 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9963 cmd->supported |= (SUPPORTED_1000baseT_Half |
9964 SUPPORTED_1000baseT_Full);
9966 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9967 cmd->supported |= (SUPPORTED_100baseT_Half |
9968 SUPPORTED_100baseT_Full |
9969 SUPPORTED_10baseT_Half |
9970 SUPPORTED_10baseT_Full |
9971 SUPPORTED_TP);
9972 cmd->port = PORT_TP;
9973 } else {
9974 cmd->supported |= SUPPORTED_FIBRE;
9975 cmd->port = PORT_FIBRE;
9978 cmd->advertising = tp->link_config.advertising;
9979 if (tg3_flag(tp, PAUSE_AUTONEG)) {
9980 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
9981 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
9982 cmd->advertising |= ADVERTISED_Pause;
9983 } else {
9984 cmd->advertising |= ADVERTISED_Pause |
9985 ADVERTISED_Asym_Pause;
9987 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
9988 cmd->advertising |= ADVERTISED_Asym_Pause;
9991 if (netif_running(dev)) {
9992 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9993 cmd->duplex = tp->link_config.active_duplex;
9994 } else {
9995 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9996 cmd->duplex = DUPLEX_INVALID;
9998 cmd->phy_address = tp->phy_addr;
9999 cmd->transceiver = XCVR_INTERNAL;
10000 cmd->autoneg = tp->link_config.autoneg;
10001 cmd->maxtxpkt = 0;
10002 cmd->maxrxpkt = 0;
10003 return 0;
10006 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10008 struct tg3 *tp = netdev_priv(dev);
10009 u32 speed = ethtool_cmd_speed(cmd);
10011 if (tg3_flag(tp, USE_PHYLIB)) {
10012 struct phy_device *phydev;
10013 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10014 return -EAGAIN;
10015 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10016 return phy_ethtool_sset(phydev, cmd);
10019 if (cmd->autoneg != AUTONEG_ENABLE &&
10020 cmd->autoneg != AUTONEG_DISABLE)
10021 return -EINVAL;
10023 if (cmd->autoneg == AUTONEG_DISABLE &&
10024 cmd->duplex != DUPLEX_FULL &&
10025 cmd->duplex != DUPLEX_HALF)
10026 return -EINVAL;
10028 if (cmd->autoneg == AUTONEG_ENABLE) {
10029 u32 mask = ADVERTISED_Autoneg |
10030 ADVERTISED_Pause |
10031 ADVERTISED_Asym_Pause;
10033 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10034 mask |= ADVERTISED_1000baseT_Half |
10035 ADVERTISED_1000baseT_Full;
10037 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10038 mask |= ADVERTISED_100baseT_Half |
10039 ADVERTISED_100baseT_Full |
10040 ADVERTISED_10baseT_Half |
10041 ADVERTISED_10baseT_Full |
10042 ADVERTISED_TP;
10043 else
10044 mask |= ADVERTISED_FIBRE;
10046 if (cmd->advertising & ~mask)
10047 return -EINVAL;
10049 mask &= (ADVERTISED_1000baseT_Half |
10050 ADVERTISED_1000baseT_Full |
10051 ADVERTISED_100baseT_Half |
10052 ADVERTISED_100baseT_Full |
10053 ADVERTISED_10baseT_Half |
10054 ADVERTISED_10baseT_Full);
10056 cmd->advertising &= mask;
10057 } else {
10058 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10059 if (speed != SPEED_1000)
10060 return -EINVAL;
10062 if (cmd->duplex != DUPLEX_FULL)
10063 return -EINVAL;
10064 } else {
10065 if (speed != SPEED_100 &&
10066 speed != SPEED_10)
10067 return -EINVAL;
10071 tg3_full_lock(tp, 0);
10073 tp->link_config.autoneg = cmd->autoneg;
10074 if (cmd->autoneg == AUTONEG_ENABLE) {
10075 tp->link_config.advertising = (cmd->advertising |
10076 ADVERTISED_Autoneg);
10077 tp->link_config.speed = SPEED_INVALID;
10078 tp->link_config.duplex = DUPLEX_INVALID;
10079 } else {
10080 tp->link_config.advertising = 0;
10081 tp->link_config.speed = speed;
10082 tp->link_config.duplex = cmd->duplex;
10085 tp->link_config.orig_speed = tp->link_config.speed;
10086 tp->link_config.orig_duplex = tp->link_config.duplex;
10087 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10089 if (netif_running(dev))
10090 tg3_setup_phy(tp, 1);
10092 tg3_full_unlock(tp);
10094 return 0;
10097 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10099 struct tg3 *tp = netdev_priv(dev);
10101 strcpy(info->driver, DRV_MODULE_NAME);
10102 strcpy(info->version, DRV_MODULE_VERSION);
10103 strcpy(info->fw_version, tp->fw_ver);
10104 strcpy(info->bus_info, pci_name(tp->pdev));
10107 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10109 struct tg3 *tp = netdev_priv(dev);
10111 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10112 wol->supported = WAKE_MAGIC;
10113 else
10114 wol->supported = 0;
10115 wol->wolopts = 0;
10116 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10117 wol->wolopts = WAKE_MAGIC;
10118 memset(&wol->sopass, 0, sizeof(wol->sopass));
10121 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10123 struct tg3 *tp = netdev_priv(dev);
10124 struct device *dp = &tp->pdev->dev;
10126 if (wol->wolopts & ~WAKE_MAGIC)
10127 return -EINVAL;
10128 if ((wol->wolopts & WAKE_MAGIC) &&
10129 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10130 return -EINVAL;
10132 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10134 spin_lock_bh(&tp->lock);
10135 if (device_may_wakeup(dp))
10136 tg3_flag_set(tp, WOL_ENABLE);
10137 else
10138 tg3_flag_clear(tp, WOL_ENABLE);
10139 spin_unlock_bh(&tp->lock);
10141 return 0;
10144 static u32 tg3_get_msglevel(struct net_device *dev)
10146 struct tg3 *tp = netdev_priv(dev);
10147 return tp->msg_enable;
10150 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10152 struct tg3 *tp = netdev_priv(dev);
10153 tp->msg_enable = value;
10156 static int tg3_nway_reset(struct net_device *dev)
10158 struct tg3 *tp = netdev_priv(dev);
10159 int r;
10161 if (!netif_running(dev))
10162 return -EAGAIN;
10164 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10165 return -EINVAL;
10167 if (tg3_flag(tp, USE_PHYLIB)) {
10168 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10169 return -EAGAIN;
10170 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10171 } else {
10172 u32 bmcr;
10174 spin_lock_bh(&tp->lock);
10175 r = -EINVAL;
10176 tg3_readphy(tp, MII_BMCR, &bmcr);
10177 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10178 ((bmcr & BMCR_ANENABLE) ||
10179 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10180 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10181 BMCR_ANENABLE);
10182 r = 0;
10184 spin_unlock_bh(&tp->lock);
10187 return r;
10190 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10192 struct tg3 *tp = netdev_priv(dev);
10194 ering->rx_max_pending = tp->rx_std_ring_mask;
10195 ering->rx_mini_max_pending = 0;
10196 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10197 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10198 else
10199 ering->rx_jumbo_max_pending = 0;
10201 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10203 ering->rx_pending = tp->rx_pending;
10204 ering->rx_mini_pending = 0;
10205 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10206 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10207 else
10208 ering->rx_jumbo_pending = 0;
10210 ering->tx_pending = tp->napi[0].tx_pending;
10213 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10215 struct tg3 *tp = netdev_priv(dev);
10216 int i, irq_sync = 0, err = 0;
10218 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10219 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10220 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10221 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10222 (tg3_flag(tp, TSO_BUG) &&
10223 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10224 return -EINVAL;
10226 if (netif_running(dev)) {
10227 tg3_phy_stop(tp);
10228 tg3_netif_stop(tp);
10229 irq_sync = 1;
10232 tg3_full_lock(tp, irq_sync);
10234 tp->rx_pending = ering->rx_pending;
10236 if (tg3_flag(tp, MAX_RXPEND_64) &&
10237 tp->rx_pending > 63)
10238 tp->rx_pending = 63;
10239 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10241 for (i = 0; i < tp->irq_max; i++)
10242 tp->napi[i].tx_pending = ering->tx_pending;
10244 if (netif_running(dev)) {
10245 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10246 err = tg3_restart_hw(tp, 1);
10247 if (!err)
10248 tg3_netif_start(tp);
10251 tg3_full_unlock(tp);
10253 if (irq_sync && !err)
10254 tg3_phy_start(tp);
10256 return err;
10259 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10261 struct tg3 *tp = netdev_priv(dev);
10263 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10265 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10266 epause->rx_pause = 1;
10267 else
10268 epause->rx_pause = 0;
10270 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10271 epause->tx_pause = 1;
10272 else
10273 epause->tx_pause = 0;
10276 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10278 struct tg3 *tp = netdev_priv(dev);
10279 int err = 0;
10281 if (tg3_flag(tp, USE_PHYLIB)) {
10282 u32 newadv;
10283 struct phy_device *phydev;
10285 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10287 if (!(phydev->supported & SUPPORTED_Pause) ||
10288 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10289 (epause->rx_pause != epause->tx_pause)))
10290 return -EINVAL;
10292 tp->link_config.flowctrl = 0;
10293 if (epause->rx_pause) {
10294 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10296 if (epause->tx_pause) {
10297 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10298 newadv = ADVERTISED_Pause;
10299 } else
10300 newadv = ADVERTISED_Pause |
10301 ADVERTISED_Asym_Pause;
10302 } else if (epause->tx_pause) {
10303 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10304 newadv = ADVERTISED_Asym_Pause;
10305 } else
10306 newadv = 0;
10308 if (epause->autoneg)
10309 tg3_flag_set(tp, PAUSE_AUTONEG);
10310 else
10311 tg3_flag_clear(tp, PAUSE_AUTONEG);
10313 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10314 u32 oldadv = phydev->advertising &
10315 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10316 if (oldadv != newadv) {
10317 phydev->advertising &=
10318 ~(ADVERTISED_Pause |
10319 ADVERTISED_Asym_Pause);
10320 phydev->advertising |= newadv;
10321 if (phydev->autoneg) {
10323 * Always renegotiate the link to
10324 * inform our link partner of our
10325 * flow control settings, even if the
10326 * flow control is forced. Let
10327 * tg3_adjust_link() do the final
10328 * flow control setup.
10330 return phy_start_aneg(phydev);
10334 if (!epause->autoneg)
10335 tg3_setup_flow_control(tp, 0, 0);
10336 } else {
10337 tp->link_config.orig_advertising &=
10338 ~(ADVERTISED_Pause |
10339 ADVERTISED_Asym_Pause);
10340 tp->link_config.orig_advertising |= newadv;
10342 } else {
10343 int irq_sync = 0;
10345 if (netif_running(dev)) {
10346 tg3_netif_stop(tp);
10347 irq_sync = 1;
10350 tg3_full_lock(tp, irq_sync);
10352 if (epause->autoneg)
10353 tg3_flag_set(tp, PAUSE_AUTONEG);
10354 else
10355 tg3_flag_clear(tp, PAUSE_AUTONEG);
10356 if (epause->rx_pause)
10357 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10358 else
10359 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10360 if (epause->tx_pause)
10361 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10362 else
10363 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10365 if (netif_running(dev)) {
10366 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10367 err = tg3_restart_hw(tp, 1);
10368 if (!err)
10369 tg3_netif_start(tp);
10372 tg3_full_unlock(tp);
10375 return err;
10378 static int tg3_get_sset_count(struct net_device *dev, int sset)
10380 switch (sset) {
10381 case ETH_SS_TEST:
10382 return TG3_NUM_TEST;
10383 case ETH_SS_STATS:
10384 return TG3_NUM_STATS;
10385 default:
10386 return -EOPNOTSUPP;
10390 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10392 switch (stringset) {
10393 case ETH_SS_STATS:
10394 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10395 break;
10396 case ETH_SS_TEST:
10397 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10398 break;
10399 default:
10400 WARN_ON(1); /* we need a WARN() */
10401 break;
10405 static int tg3_set_phys_id(struct net_device *dev,
10406 enum ethtool_phys_id_state state)
10408 struct tg3 *tp = netdev_priv(dev);
10410 if (!netif_running(tp->dev))
10411 return -EAGAIN;
10413 switch (state) {
10414 case ETHTOOL_ID_ACTIVE:
10415 return 1; /* cycle on/off once per second */
10417 case ETHTOOL_ID_ON:
10418 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10419 LED_CTRL_1000MBPS_ON |
10420 LED_CTRL_100MBPS_ON |
10421 LED_CTRL_10MBPS_ON |
10422 LED_CTRL_TRAFFIC_OVERRIDE |
10423 LED_CTRL_TRAFFIC_BLINK |
10424 LED_CTRL_TRAFFIC_LED);
10425 break;
10427 case ETHTOOL_ID_OFF:
10428 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10429 LED_CTRL_TRAFFIC_OVERRIDE);
10430 break;
10432 case ETHTOOL_ID_INACTIVE:
10433 tw32(MAC_LED_CTRL, tp->led_ctrl);
10434 break;
10437 return 0;
10440 static void tg3_get_ethtool_stats(struct net_device *dev,
10441 struct ethtool_stats *estats, u64 *tmp_stats)
10443 struct tg3 *tp = netdev_priv(dev);
10444 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10447 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10449 int i;
10450 __be32 *buf;
10451 u32 offset = 0, len = 0;
10452 u32 magic, val;
10454 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10455 return NULL;
10457 if (magic == TG3_EEPROM_MAGIC) {
10458 for (offset = TG3_NVM_DIR_START;
10459 offset < TG3_NVM_DIR_END;
10460 offset += TG3_NVM_DIRENT_SIZE) {
10461 if (tg3_nvram_read(tp, offset, &val))
10462 return NULL;
10464 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10465 TG3_NVM_DIRTYPE_EXTVPD)
10466 break;
10469 if (offset != TG3_NVM_DIR_END) {
10470 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10471 if (tg3_nvram_read(tp, offset + 4, &offset))
10472 return NULL;
10474 offset = tg3_nvram_logical_addr(tp, offset);
10478 if (!offset || !len) {
10479 offset = TG3_NVM_VPD_OFF;
10480 len = TG3_NVM_VPD_LEN;
10483 buf = kmalloc(len, GFP_KERNEL);
10484 if (buf == NULL)
10485 return NULL;
10487 if (magic == TG3_EEPROM_MAGIC) {
10488 for (i = 0; i < len; i += 4) {
10489 /* The data is in little-endian format in NVRAM.
10490 * Use the big-endian read routines to preserve
10491 * the byte order as it exists in NVRAM.
10493 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10494 goto error;
10496 } else {
10497 u8 *ptr;
10498 ssize_t cnt;
10499 unsigned int pos = 0;
10501 ptr = (u8 *)&buf[0];
10502 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10503 cnt = pci_read_vpd(tp->pdev, pos,
10504 len - pos, ptr);
10505 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10506 cnt = 0;
10507 else if (cnt < 0)
10508 goto error;
10510 if (pos != len)
10511 goto error;
10514 return buf;
10516 error:
10517 kfree(buf);
10518 return NULL;
10521 #define NVRAM_TEST_SIZE 0x100
10522 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10523 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10524 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10525 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10526 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10527 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x4c
10528 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10529 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10531 static int tg3_test_nvram(struct tg3 *tp)
10533 u32 csum, magic;
10534 __be32 *buf;
10535 int i, j, k, err = 0, size;
10537 if (tg3_flag(tp, NO_NVRAM))
10538 return 0;
10540 if (tg3_nvram_read(tp, 0, &magic) != 0)
10541 return -EIO;
10543 if (magic == TG3_EEPROM_MAGIC)
10544 size = NVRAM_TEST_SIZE;
10545 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10546 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10547 TG3_EEPROM_SB_FORMAT_1) {
10548 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10549 case TG3_EEPROM_SB_REVISION_0:
10550 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10551 break;
10552 case TG3_EEPROM_SB_REVISION_2:
10553 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10554 break;
10555 case TG3_EEPROM_SB_REVISION_3:
10556 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10557 break;
10558 case TG3_EEPROM_SB_REVISION_4:
10559 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10560 break;
10561 case TG3_EEPROM_SB_REVISION_5:
10562 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10563 break;
10564 case TG3_EEPROM_SB_REVISION_6:
10565 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10566 break;
10567 default:
10568 return -EIO;
10570 } else
10571 return 0;
10572 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10573 size = NVRAM_SELFBOOT_HW_SIZE;
10574 else
10575 return -EIO;
10577 buf = kmalloc(size, GFP_KERNEL);
10578 if (buf == NULL)
10579 return -ENOMEM;
10581 err = -EIO;
10582 for (i = 0, j = 0; i < size; i += 4, j++) {
10583 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10584 if (err)
10585 break;
10587 if (i < size)
10588 goto out;
10590 /* Selfboot format */
10591 magic = be32_to_cpu(buf[0]);
10592 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10593 TG3_EEPROM_MAGIC_FW) {
10594 u8 *buf8 = (u8 *) buf, csum8 = 0;
10596 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10597 TG3_EEPROM_SB_REVISION_2) {
10598 /* For rev 2, the csum doesn't include the MBA. */
10599 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10600 csum8 += buf8[i];
10601 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10602 csum8 += buf8[i];
10603 } else {
10604 for (i = 0; i < size; i++)
10605 csum8 += buf8[i];
10608 if (csum8 == 0) {
10609 err = 0;
10610 goto out;
10613 err = -EIO;
10614 goto out;
10617 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10618 TG3_EEPROM_MAGIC_HW) {
10619 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10620 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10621 u8 *buf8 = (u8 *) buf;
10623 /* Separate the parity bits and the data bytes. */
10624 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10625 if ((i == 0) || (i == 8)) {
10626 int l;
10627 u8 msk;
10629 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10630 parity[k++] = buf8[i] & msk;
10631 i++;
10632 } else if (i == 16) {
10633 int l;
10634 u8 msk;
10636 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10637 parity[k++] = buf8[i] & msk;
10638 i++;
10640 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10641 parity[k++] = buf8[i] & msk;
10642 i++;
10644 data[j++] = buf8[i];
10647 err = -EIO;
10648 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10649 u8 hw8 = hweight8(data[i]);
10651 if ((hw8 & 0x1) && parity[i])
10652 goto out;
10653 else if (!(hw8 & 0x1) && !parity[i])
10654 goto out;
10656 err = 0;
10657 goto out;
10660 err = -EIO;
10662 /* Bootstrap checksum at offset 0x10 */
10663 csum = calc_crc((unsigned char *) buf, 0x10);
10664 if (csum != le32_to_cpu(buf[0x10/4]))
10665 goto out;
10667 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10668 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10669 if (csum != le32_to_cpu(buf[0xfc/4]))
10670 goto out;
10672 kfree(buf);
10674 buf = tg3_vpd_readblock(tp);
10675 if (!buf)
10676 return -ENOMEM;
10678 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10679 PCI_VPD_LRDT_RO_DATA);
10680 if (i > 0) {
10681 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10682 if (j < 0)
10683 goto out;
10685 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10686 goto out;
10688 i += PCI_VPD_LRDT_TAG_SIZE;
10689 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10690 PCI_VPD_RO_KEYWORD_CHKSUM);
10691 if (j > 0) {
10692 u8 csum8 = 0;
10694 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10696 for (i = 0; i <= j; i++)
10697 csum8 += ((u8 *)buf)[i];
10699 if (csum8)
10700 goto out;
10704 err = 0;
10706 out:
10707 kfree(buf);
10708 return err;
10711 #define TG3_SERDES_TIMEOUT_SEC 2
10712 #define TG3_COPPER_TIMEOUT_SEC 6
10714 static int tg3_test_link(struct tg3 *tp)
10716 int i, max;
10718 if (!netif_running(tp->dev))
10719 return -ENODEV;
10721 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10722 max = TG3_SERDES_TIMEOUT_SEC;
10723 else
10724 max = TG3_COPPER_TIMEOUT_SEC;
10726 for (i = 0; i < max; i++) {
10727 if (netif_carrier_ok(tp->dev))
10728 return 0;
10730 if (msleep_interruptible(1000))
10731 break;
10734 return -EIO;
10737 /* Only test the commonly used registers */
10738 static int tg3_test_registers(struct tg3 *tp)
10740 int i, is_5705, is_5750;
10741 u32 offset, read_mask, write_mask, val, save_val, read_val;
10742 static struct {
10743 u16 offset;
10744 u16 flags;
10745 #define TG3_FL_5705 0x1
10746 #define TG3_FL_NOT_5705 0x2
10747 #define TG3_FL_NOT_5788 0x4
10748 #define TG3_FL_NOT_5750 0x8
10749 u32 read_mask;
10750 u32 write_mask;
10751 } reg_tbl[] = {
10752 /* MAC Control Registers */
10753 { MAC_MODE, TG3_FL_NOT_5705,
10754 0x00000000, 0x00ef6f8c },
10755 { MAC_MODE, TG3_FL_5705,
10756 0x00000000, 0x01ef6b8c },
10757 { MAC_STATUS, TG3_FL_NOT_5705,
10758 0x03800107, 0x00000000 },
10759 { MAC_STATUS, TG3_FL_5705,
10760 0x03800100, 0x00000000 },
10761 { MAC_ADDR_0_HIGH, 0x0000,
10762 0x00000000, 0x0000ffff },
10763 { MAC_ADDR_0_LOW, 0x0000,
10764 0x00000000, 0xffffffff },
10765 { MAC_RX_MTU_SIZE, 0x0000,
10766 0x00000000, 0x0000ffff },
10767 { MAC_TX_MODE, 0x0000,
10768 0x00000000, 0x00000070 },
10769 { MAC_TX_LENGTHS, 0x0000,
10770 0x00000000, 0x00003fff },
10771 { MAC_RX_MODE, TG3_FL_NOT_5705,
10772 0x00000000, 0x000007fc },
10773 { MAC_RX_MODE, TG3_FL_5705,
10774 0x00000000, 0x000007dc },
10775 { MAC_HASH_REG_0, 0x0000,
10776 0x00000000, 0xffffffff },
10777 { MAC_HASH_REG_1, 0x0000,
10778 0x00000000, 0xffffffff },
10779 { MAC_HASH_REG_2, 0x0000,
10780 0x00000000, 0xffffffff },
10781 { MAC_HASH_REG_3, 0x0000,
10782 0x00000000, 0xffffffff },
10784 /* Receive Data and Receive BD Initiator Control Registers. */
10785 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10786 0x00000000, 0xffffffff },
10787 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10788 0x00000000, 0xffffffff },
10789 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10790 0x00000000, 0x00000003 },
10791 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10792 0x00000000, 0xffffffff },
10793 { RCVDBDI_STD_BD+0, 0x0000,
10794 0x00000000, 0xffffffff },
10795 { RCVDBDI_STD_BD+4, 0x0000,
10796 0x00000000, 0xffffffff },
10797 { RCVDBDI_STD_BD+8, 0x0000,
10798 0x00000000, 0xffff0002 },
10799 { RCVDBDI_STD_BD+0xc, 0x0000,
10800 0x00000000, 0xffffffff },
10802 /* Receive BD Initiator Control Registers. */
10803 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10804 0x00000000, 0xffffffff },
10805 { RCVBDI_STD_THRESH, TG3_FL_5705,
10806 0x00000000, 0x000003ff },
10807 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10808 0x00000000, 0xffffffff },
10810 /* Host Coalescing Control Registers. */
10811 { HOSTCC_MODE, TG3_FL_NOT_5705,
10812 0x00000000, 0x00000004 },
10813 { HOSTCC_MODE, TG3_FL_5705,
10814 0x00000000, 0x000000f6 },
10815 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10816 0x00000000, 0xffffffff },
10817 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10818 0x00000000, 0x000003ff },
10819 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10820 0x00000000, 0xffffffff },
10821 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10822 0x00000000, 0x000003ff },
10823 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10824 0x00000000, 0xffffffff },
10825 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10826 0x00000000, 0x000000ff },
10827 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10828 0x00000000, 0xffffffff },
10829 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10830 0x00000000, 0x000000ff },
10831 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10832 0x00000000, 0xffffffff },
10833 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10834 0x00000000, 0xffffffff },
10835 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10836 0x00000000, 0xffffffff },
10837 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10838 0x00000000, 0x000000ff },
10839 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10840 0x00000000, 0xffffffff },
10841 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10842 0x00000000, 0x000000ff },
10843 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10844 0x00000000, 0xffffffff },
10845 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10846 0x00000000, 0xffffffff },
10847 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10848 0x00000000, 0xffffffff },
10849 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10850 0x00000000, 0xffffffff },
10851 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10852 0x00000000, 0xffffffff },
10853 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10854 0xffffffff, 0x00000000 },
10855 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10856 0xffffffff, 0x00000000 },
10858 /* Buffer Manager Control Registers. */
10859 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10860 0x00000000, 0x007fff80 },
10861 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10862 0x00000000, 0x007fffff },
10863 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10864 0x00000000, 0x0000003f },
10865 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10866 0x00000000, 0x000001ff },
10867 { BUFMGR_MB_HIGH_WATER, 0x0000,
10868 0x00000000, 0x000001ff },
10869 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10870 0xffffffff, 0x00000000 },
10871 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10872 0xffffffff, 0x00000000 },
10874 /* Mailbox Registers */
10875 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10876 0x00000000, 0x000001ff },
10877 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10878 0x00000000, 0x000001ff },
10879 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10880 0x00000000, 0x000007ff },
10881 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10882 0x00000000, 0x000001ff },
10884 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10887 is_5705 = is_5750 = 0;
10888 if (tg3_flag(tp, 5705_PLUS)) {
10889 is_5705 = 1;
10890 if (tg3_flag(tp, 5750_PLUS))
10891 is_5750 = 1;
10894 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10895 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10896 continue;
10898 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10899 continue;
10901 if (tg3_flag(tp, IS_5788) &&
10902 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10903 continue;
10905 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10906 continue;
10908 offset = (u32) reg_tbl[i].offset;
10909 read_mask = reg_tbl[i].read_mask;
10910 write_mask = reg_tbl[i].write_mask;
10912 /* Save the original register content */
10913 save_val = tr32(offset);
10915 /* Determine the read-only value. */
10916 read_val = save_val & read_mask;
10918 /* Write zero to the register, then make sure the read-only bits
10919 * are not changed and the read/write bits are all zeros.
10921 tw32(offset, 0);
10923 val = tr32(offset);
10925 /* Test the read-only and read/write bits. */
10926 if (((val & read_mask) != read_val) || (val & write_mask))
10927 goto out;
10929 /* Write ones to all the bits defined by RdMask and WrMask, then
10930 * make sure the read-only bits are not changed and the
10931 * read/write bits are all ones.
10933 tw32(offset, read_mask | write_mask);
10935 val = tr32(offset);
10937 /* Test the read-only bits. */
10938 if ((val & read_mask) != read_val)
10939 goto out;
10941 /* Test the read/write bits. */
10942 if ((val & write_mask) != write_mask)
10943 goto out;
10945 tw32(offset, save_val);
10948 return 0;
10950 out:
10951 if (netif_msg_hw(tp))
10952 netdev_err(tp->dev,
10953 "Register test failed at offset %x\n", offset);
10954 tw32(offset, save_val);
10955 return -EIO;
10958 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10960 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10961 int i;
10962 u32 j;
10964 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10965 for (j = 0; j < len; j += 4) {
10966 u32 val;
10968 tg3_write_mem(tp, offset + j, test_pattern[i]);
10969 tg3_read_mem(tp, offset + j, &val);
10970 if (val != test_pattern[i])
10971 return -EIO;
10974 return 0;
10977 static int tg3_test_memory(struct tg3 *tp)
10979 static struct mem_entry {
10980 u32 offset;
10981 u32 len;
10982 } mem_tbl_570x[] = {
10983 { 0x00000000, 0x00b50},
10984 { 0x00002000, 0x1c000},
10985 { 0xffffffff, 0x00000}
10986 }, mem_tbl_5705[] = {
10987 { 0x00000100, 0x0000c},
10988 { 0x00000200, 0x00008},
10989 { 0x00004000, 0x00800},
10990 { 0x00006000, 0x01000},
10991 { 0x00008000, 0x02000},
10992 { 0x00010000, 0x0e000},
10993 { 0xffffffff, 0x00000}
10994 }, mem_tbl_5755[] = {
10995 { 0x00000200, 0x00008},
10996 { 0x00004000, 0x00800},
10997 { 0x00006000, 0x00800},
10998 { 0x00008000, 0x02000},
10999 { 0x00010000, 0x0c000},
11000 { 0xffffffff, 0x00000}
11001 }, mem_tbl_5906[] = {
11002 { 0x00000200, 0x00008},
11003 { 0x00004000, 0x00400},
11004 { 0x00006000, 0x00400},
11005 { 0x00008000, 0x01000},
11006 { 0x00010000, 0x01000},
11007 { 0xffffffff, 0x00000}
11008 }, mem_tbl_5717[] = {
11009 { 0x00000200, 0x00008},
11010 { 0x00010000, 0x0a000},
11011 { 0x00020000, 0x13c00},
11012 { 0xffffffff, 0x00000}
11013 }, mem_tbl_57765[] = {
11014 { 0x00000200, 0x00008},
11015 { 0x00004000, 0x00800},
11016 { 0x00006000, 0x09800},
11017 { 0x00010000, 0x0a000},
11018 { 0xffffffff, 0x00000}
11020 struct mem_entry *mem_tbl;
11021 int err = 0;
11022 int i;
11024 if (tg3_flag(tp, 5717_PLUS))
11025 mem_tbl = mem_tbl_5717;
11026 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11027 mem_tbl = mem_tbl_57765;
11028 else if (tg3_flag(tp, 5755_PLUS))
11029 mem_tbl = mem_tbl_5755;
11030 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11031 mem_tbl = mem_tbl_5906;
11032 else if (tg3_flag(tp, 5705_PLUS))
11033 mem_tbl = mem_tbl_5705;
11034 else
11035 mem_tbl = mem_tbl_570x;
11037 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11038 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11039 if (err)
11040 break;
11043 return err;
11046 #define TG3_MAC_LOOPBACK 0
11047 #define TG3_PHY_LOOPBACK 1
11048 #define TG3_TSO_LOOPBACK 2
11050 #define TG3_TSO_MSS 500
11052 #define TG3_TSO_IP_HDR_LEN 20
11053 #define TG3_TSO_TCP_HDR_LEN 20
11054 #define TG3_TSO_TCP_OPT_LEN 12
11056 static const u8 tg3_tso_header[] = {
11057 0x08, 0x00,
11058 0x45, 0x00, 0x00, 0x00,
11059 0x00, 0x00, 0x40, 0x00,
11060 0x40, 0x06, 0x00, 0x00,
11061 0x0a, 0x00, 0x00, 0x01,
11062 0x0a, 0x00, 0x00, 0x02,
11063 0x0d, 0x00, 0xe0, 0x00,
11064 0x00, 0x00, 0x01, 0x00,
11065 0x00, 0x00, 0x02, 0x00,
11066 0x80, 0x10, 0x10, 0x00,
11067 0x14, 0x09, 0x00, 0x00,
11068 0x01, 0x01, 0x08, 0x0a,
11069 0x11, 0x11, 0x11, 0x11,
11070 0x11, 0x11, 0x11, 0x11,
11073 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11075 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11076 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11077 struct sk_buff *skb, *rx_skb;
11078 u8 *tx_data;
11079 dma_addr_t map;
11080 int num_pkts, tx_len, rx_len, i, err;
11081 struct tg3_rx_buffer_desc *desc;
11082 struct tg3_napi *tnapi, *rnapi;
11083 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11085 tnapi = &tp->napi[0];
11086 rnapi = &tp->napi[0];
11087 if (tp->irq_cnt > 1) {
11088 if (tg3_flag(tp, ENABLE_RSS))
11089 rnapi = &tp->napi[1];
11090 if (tg3_flag(tp, ENABLE_TSS))
11091 tnapi = &tp->napi[1];
11093 coal_now = tnapi->coal_now | rnapi->coal_now;
11095 if (loopback_mode == TG3_MAC_LOOPBACK) {
11096 /* HW errata - mac loopback fails in some cases on 5780.
11097 * Normal traffic and PHY loopback are not affected by
11098 * errata. Also, the MAC loopback test is deprecated for
11099 * all newer ASIC revisions.
11101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11102 tg3_flag(tp, CPMU_PRESENT))
11103 return 0;
11105 mac_mode = tp->mac_mode &
11106 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11107 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11108 if (!tg3_flag(tp, 5705_PLUS))
11109 mac_mode |= MAC_MODE_LINK_POLARITY;
11110 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11111 mac_mode |= MAC_MODE_PORT_MODE_MII;
11112 else
11113 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11114 tw32(MAC_MODE, mac_mode);
11115 } else {
11116 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11117 tg3_phy_fet_toggle_apd(tp, false);
11118 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11119 } else
11120 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11122 tg3_phy_toggle_automdix(tp, 0);
11124 tg3_writephy(tp, MII_BMCR, val);
11125 udelay(40);
11127 mac_mode = tp->mac_mode &
11128 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11129 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11130 tg3_writephy(tp, MII_TG3_FET_PTEST,
11131 MII_TG3_FET_PTEST_FRC_TX_LINK |
11132 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11133 /* The write needs to be flushed for the AC131 */
11134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11135 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11136 mac_mode |= MAC_MODE_PORT_MODE_MII;
11137 } else
11138 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11140 /* reset to prevent losing 1st rx packet intermittently */
11141 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11142 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11143 udelay(10);
11144 tw32_f(MAC_RX_MODE, tp->rx_mode);
11146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11147 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11148 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11149 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11150 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11151 mac_mode |= MAC_MODE_LINK_POLARITY;
11152 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11153 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11155 tw32(MAC_MODE, mac_mode);
11157 /* Wait for link */
11158 for (i = 0; i < 100; i++) {
11159 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11160 break;
11161 mdelay(1);
11165 err = -EIO;
11167 tx_len = pktsz;
11168 skb = netdev_alloc_skb(tp->dev, tx_len);
11169 if (!skb)
11170 return -ENOMEM;
11172 tx_data = skb_put(skb, tx_len);
11173 memcpy(tx_data, tp->dev->dev_addr, 6);
11174 memset(tx_data + 6, 0x0, 8);
11176 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11178 if (loopback_mode == TG3_TSO_LOOPBACK) {
11179 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11181 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11182 TG3_TSO_TCP_OPT_LEN;
11184 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11185 sizeof(tg3_tso_header));
11186 mss = TG3_TSO_MSS;
11188 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11189 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11191 /* Set the total length field in the IP header */
11192 iph->tot_len = htons((u16)(mss + hdr_len));
11194 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11195 TXD_FLAG_CPU_POST_DMA);
11197 if (tg3_flag(tp, HW_TSO_1) ||
11198 tg3_flag(tp, HW_TSO_2) ||
11199 tg3_flag(tp, HW_TSO_3)) {
11200 struct tcphdr *th;
11201 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11202 th = (struct tcphdr *)&tx_data[val];
11203 th->check = 0;
11204 } else
11205 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11207 if (tg3_flag(tp, HW_TSO_3)) {
11208 mss |= (hdr_len & 0xc) << 12;
11209 if (hdr_len & 0x10)
11210 base_flags |= 0x00000010;
11211 base_flags |= (hdr_len & 0x3e0) << 5;
11212 } else if (tg3_flag(tp, HW_TSO_2))
11213 mss |= hdr_len << 9;
11214 else if (tg3_flag(tp, HW_TSO_1) ||
11215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11216 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11217 } else {
11218 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11221 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11222 } else {
11223 num_pkts = 1;
11224 data_off = ETH_HLEN;
11227 for (i = data_off; i < tx_len; i++)
11228 tx_data[i] = (u8) (i & 0xff);
11230 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11231 if (pci_dma_mapping_error(tp->pdev, map)) {
11232 dev_kfree_skb(skb);
11233 return -EIO;
11236 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11237 rnapi->coal_now);
11239 udelay(10);
11241 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11243 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11244 base_flags, (mss << 1) | 1);
11246 tnapi->tx_prod++;
11248 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11249 tr32_mailbox(tnapi->prodmbox);
11251 udelay(10);
11253 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11254 for (i = 0; i < 35; i++) {
11255 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11256 coal_now);
11258 udelay(10);
11260 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11261 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11262 if ((tx_idx == tnapi->tx_prod) &&
11263 (rx_idx == (rx_start_idx + num_pkts)))
11264 break;
11267 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11268 dev_kfree_skb(skb);
11270 if (tx_idx != tnapi->tx_prod)
11271 goto out;
11273 if (rx_idx != rx_start_idx + num_pkts)
11274 goto out;
11276 val = data_off;
11277 while (rx_idx != rx_start_idx) {
11278 desc = &rnapi->rx_rcb[rx_start_idx++];
11279 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11280 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11282 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11283 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11284 goto out;
11286 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11287 - ETH_FCS_LEN;
11289 if (loopback_mode != TG3_TSO_LOOPBACK) {
11290 if (rx_len != tx_len)
11291 goto out;
11293 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11294 if (opaque_key != RXD_OPAQUE_RING_STD)
11295 goto out;
11296 } else {
11297 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11298 goto out;
11300 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11301 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11302 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11303 goto out;
11306 if (opaque_key == RXD_OPAQUE_RING_STD) {
11307 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11308 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11309 mapping);
11310 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11311 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11312 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11313 mapping);
11314 } else
11315 goto out;
11317 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11318 PCI_DMA_FROMDEVICE);
11320 for (i = data_off; i < rx_len; i++, val++) {
11321 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11322 goto out;
11326 err = 0;
11328 /* tg3_free_rings will unmap and free the rx_skb */
11329 out:
11330 return err;
11333 #define TG3_STD_LOOPBACK_FAILED 1
11334 #define TG3_JMB_LOOPBACK_FAILED 2
11335 #define TG3_TSO_LOOPBACK_FAILED 4
11337 #define TG3_MAC_LOOPBACK_SHIFT 0
11338 #define TG3_PHY_LOOPBACK_SHIFT 4
11339 #define TG3_LOOPBACK_FAILED 0x00000077
11341 static int tg3_test_loopback(struct tg3 *tp)
11343 int err = 0;
11344 u32 eee_cap, cpmuctrl = 0;
11346 if (!netif_running(tp->dev))
11347 return TG3_LOOPBACK_FAILED;
11349 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11350 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11352 err = tg3_reset_hw(tp, 1);
11353 if (err) {
11354 err = TG3_LOOPBACK_FAILED;
11355 goto done;
11358 if (tg3_flag(tp, ENABLE_RSS)) {
11359 int i;
11361 /* Reroute all rx packets to the 1st queue */
11362 for (i = MAC_RSS_INDIR_TBL_0;
11363 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11364 tw32(i, 0x0);
11367 /* Turn off gphy autopowerdown. */
11368 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11369 tg3_phy_toggle_apd(tp, false);
11371 if (tg3_flag(tp, CPMU_PRESENT)) {
11372 int i;
11373 u32 status;
11375 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11377 /* Wait for up to 40 microseconds to acquire lock. */
11378 for (i = 0; i < 4; i++) {
11379 status = tr32(TG3_CPMU_MUTEX_GNT);
11380 if (status == CPMU_MUTEX_GNT_DRIVER)
11381 break;
11382 udelay(10);
11385 if (status != CPMU_MUTEX_GNT_DRIVER) {
11386 err = TG3_LOOPBACK_FAILED;
11387 goto done;
11390 /* Turn off link-based power management. */
11391 cpmuctrl = tr32(TG3_CPMU_CTRL);
11392 tw32(TG3_CPMU_CTRL,
11393 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11394 CPMU_CTRL_LINK_AWARE_MODE));
11397 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11398 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11400 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11401 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11402 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11404 if (tg3_flag(tp, CPMU_PRESENT)) {
11405 tw32(TG3_CPMU_CTRL, cpmuctrl);
11407 /* Release the mutex */
11408 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11411 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11412 !tg3_flag(tp, USE_PHYLIB)) {
11413 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11414 err |= TG3_STD_LOOPBACK_FAILED <<
11415 TG3_PHY_LOOPBACK_SHIFT;
11416 if (tg3_flag(tp, TSO_CAPABLE) &&
11417 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11418 err |= TG3_TSO_LOOPBACK_FAILED <<
11419 TG3_PHY_LOOPBACK_SHIFT;
11420 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11421 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11422 err |= TG3_JMB_LOOPBACK_FAILED <<
11423 TG3_PHY_LOOPBACK_SHIFT;
11426 /* Re-enable gphy autopowerdown. */
11427 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11428 tg3_phy_toggle_apd(tp, true);
11430 done:
11431 tp->phy_flags |= eee_cap;
11433 return err;
11436 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11437 u64 *data)
11439 struct tg3 *tp = netdev_priv(dev);
11441 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11442 tg3_power_up(tp)) {
11443 etest->flags |= ETH_TEST_FL_FAILED;
11444 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11445 return;
11448 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11450 if (tg3_test_nvram(tp) != 0) {
11451 etest->flags |= ETH_TEST_FL_FAILED;
11452 data[0] = 1;
11454 if (tg3_test_link(tp) != 0) {
11455 etest->flags |= ETH_TEST_FL_FAILED;
11456 data[1] = 1;
11458 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11459 int err, err2 = 0, irq_sync = 0;
11461 if (netif_running(dev)) {
11462 tg3_phy_stop(tp);
11463 tg3_netif_stop(tp);
11464 irq_sync = 1;
11467 tg3_full_lock(tp, irq_sync);
11469 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11470 err = tg3_nvram_lock(tp);
11471 tg3_halt_cpu(tp, RX_CPU_BASE);
11472 if (!tg3_flag(tp, 5705_PLUS))
11473 tg3_halt_cpu(tp, TX_CPU_BASE);
11474 if (!err)
11475 tg3_nvram_unlock(tp);
11477 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11478 tg3_phy_reset(tp);
11480 if (tg3_test_registers(tp) != 0) {
11481 etest->flags |= ETH_TEST_FL_FAILED;
11482 data[2] = 1;
11484 if (tg3_test_memory(tp) != 0) {
11485 etest->flags |= ETH_TEST_FL_FAILED;
11486 data[3] = 1;
11488 if ((data[4] = tg3_test_loopback(tp)) != 0)
11489 etest->flags |= ETH_TEST_FL_FAILED;
11491 tg3_full_unlock(tp);
11493 if (tg3_test_interrupt(tp) != 0) {
11494 etest->flags |= ETH_TEST_FL_FAILED;
11495 data[5] = 1;
11498 tg3_full_lock(tp, 0);
11500 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11501 if (netif_running(dev)) {
11502 tg3_flag_set(tp, INIT_COMPLETE);
11503 err2 = tg3_restart_hw(tp, 1);
11504 if (!err2)
11505 tg3_netif_start(tp);
11508 tg3_full_unlock(tp);
11510 if (irq_sync && !err2)
11511 tg3_phy_start(tp);
11513 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11514 tg3_power_down(tp);
11518 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11520 struct mii_ioctl_data *data = if_mii(ifr);
11521 struct tg3 *tp = netdev_priv(dev);
11522 int err;
11524 if (tg3_flag(tp, USE_PHYLIB)) {
11525 struct phy_device *phydev;
11526 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11527 return -EAGAIN;
11528 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11529 return phy_mii_ioctl(phydev, ifr, cmd);
11532 switch (cmd) {
11533 case SIOCGMIIPHY:
11534 data->phy_id = tp->phy_addr;
11536 /* fallthru */
11537 case SIOCGMIIREG: {
11538 u32 mii_regval;
11540 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11541 break; /* We have no PHY */
11543 if (!netif_running(dev))
11544 return -EAGAIN;
11546 spin_lock_bh(&tp->lock);
11547 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11548 spin_unlock_bh(&tp->lock);
11550 data->val_out = mii_regval;
11552 return err;
11555 case SIOCSMIIREG:
11556 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11557 break; /* We have no PHY */
11559 if (!netif_running(dev))
11560 return -EAGAIN;
11562 spin_lock_bh(&tp->lock);
11563 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11564 spin_unlock_bh(&tp->lock);
11566 return err;
11568 default:
11569 /* do nothing */
11570 break;
11572 return -EOPNOTSUPP;
11575 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11577 struct tg3 *tp = netdev_priv(dev);
11579 memcpy(ec, &tp->coal, sizeof(*ec));
11580 return 0;
11583 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11585 struct tg3 *tp = netdev_priv(dev);
11586 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11587 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11589 if (!tg3_flag(tp, 5705_PLUS)) {
11590 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11591 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11592 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11593 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11596 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11597 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11598 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11599 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11600 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11601 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11602 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11603 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11604 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11605 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11606 return -EINVAL;
11608 /* No rx interrupts will be generated if both are zero */
11609 if ((ec->rx_coalesce_usecs == 0) &&
11610 (ec->rx_max_coalesced_frames == 0))
11611 return -EINVAL;
11613 /* No tx interrupts will be generated if both are zero */
11614 if ((ec->tx_coalesce_usecs == 0) &&
11615 (ec->tx_max_coalesced_frames == 0))
11616 return -EINVAL;
11618 /* Only copy relevant parameters, ignore all others. */
11619 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11620 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11621 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11622 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11623 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11624 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11625 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11626 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11627 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11629 if (netif_running(dev)) {
11630 tg3_full_lock(tp, 0);
11631 __tg3_set_coalesce(tp, &tp->coal);
11632 tg3_full_unlock(tp);
11634 return 0;
11637 static const struct ethtool_ops tg3_ethtool_ops = {
11638 .get_settings = tg3_get_settings,
11639 .set_settings = tg3_set_settings,
11640 .get_drvinfo = tg3_get_drvinfo,
11641 .get_regs_len = tg3_get_regs_len,
11642 .get_regs = tg3_get_regs,
11643 .get_wol = tg3_get_wol,
11644 .set_wol = tg3_set_wol,
11645 .get_msglevel = tg3_get_msglevel,
11646 .set_msglevel = tg3_set_msglevel,
11647 .nway_reset = tg3_nway_reset,
11648 .get_link = ethtool_op_get_link,
11649 .get_eeprom_len = tg3_get_eeprom_len,
11650 .get_eeprom = tg3_get_eeprom,
11651 .set_eeprom = tg3_set_eeprom,
11652 .get_ringparam = tg3_get_ringparam,
11653 .set_ringparam = tg3_set_ringparam,
11654 .get_pauseparam = tg3_get_pauseparam,
11655 .set_pauseparam = tg3_set_pauseparam,
11656 .self_test = tg3_self_test,
11657 .get_strings = tg3_get_strings,
11658 .set_phys_id = tg3_set_phys_id,
11659 .get_ethtool_stats = tg3_get_ethtool_stats,
11660 .get_coalesce = tg3_get_coalesce,
11661 .set_coalesce = tg3_set_coalesce,
11662 .get_sset_count = tg3_get_sset_count,
11665 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11667 u32 cursize, val, magic;
11669 tp->nvram_size = EEPROM_CHIP_SIZE;
11671 if (tg3_nvram_read(tp, 0, &magic) != 0)
11672 return;
11674 if ((magic != TG3_EEPROM_MAGIC) &&
11675 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11676 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11677 return;
11680 * Size the chip by reading offsets at increasing powers of two.
11681 * When we encounter our validation signature, we know the addressing
11682 * has wrapped around, and thus have our chip size.
11684 cursize = 0x10;
11686 while (cursize < tp->nvram_size) {
11687 if (tg3_nvram_read(tp, cursize, &val) != 0)
11688 return;
11690 if (val == magic)
11691 break;
11693 cursize <<= 1;
11696 tp->nvram_size = cursize;
11699 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11701 u32 val;
11703 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11704 return;
11706 /* Selfboot format */
11707 if (val != TG3_EEPROM_MAGIC) {
11708 tg3_get_eeprom_size(tp);
11709 return;
11712 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11713 if (val != 0) {
11714 /* This is confusing. We want to operate on the
11715 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11716 * call will read from NVRAM and byteswap the data
11717 * according to the byteswapping settings for all
11718 * other register accesses. This ensures the data we
11719 * want will always reside in the lower 16-bits.
11720 * However, the data in NVRAM is in LE format, which
11721 * means the data from the NVRAM read will always be
11722 * opposite the endianness of the CPU. The 16-bit
11723 * byteswap then brings the data to CPU endianness.
11725 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11726 return;
11729 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11732 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11734 u32 nvcfg1;
11736 nvcfg1 = tr32(NVRAM_CFG1);
11737 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11738 tg3_flag_set(tp, FLASH);
11739 } else {
11740 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11741 tw32(NVRAM_CFG1, nvcfg1);
11744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11745 tg3_flag(tp, 5780_CLASS)) {
11746 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11747 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11748 tp->nvram_jedecnum = JEDEC_ATMEL;
11749 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11750 tg3_flag_set(tp, NVRAM_BUFFERED);
11751 break;
11752 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11753 tp->nvram_jedecnum = JEDEC_ATMEL;
11754 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11755 break;
11756 case FLASH_VENDOR_ATMEL_EEPROM:
11757 tp->nvram_jedecnum = JEDEC_ATMEL;
11758 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11759 tg3_flag_set(tp, NVRAM_BUFFERED);
11760 break;
11761 case FLASH_VENDOR_ST:
11762 tp->nvram_jedecnum = JEDEC_ST;
11763 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11764 tg3_flag_set(tp, NVRAM_BUFFERED);
11765 break;
11766 case FLASH_VENDOR_SAIFUN:
11767 tp->nvram_jedecnum = JEDEC_SAIFUN;
11768 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11769 break;
11770 case FLASH_VENDOR_SST_SMALL:
11771 case FLASH_VENDOR_SST_LARGE:
11772 tp->nvram_jedecnum = JEDEC_SST;
11773 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11774 break;
11776 } else {
11777 tp->nvram_jedecnum = JEDEC_ATMEL;
11778 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11779 tg3_flag_set(tp, NVRAM_BUFFERED);
11783 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11785 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11786 case FLASH_5752PAGE_SIZE_256:
11787 tp->nvram_pagesize = 256;
11788 break;
11789 case FLASH_5752PAGE_SIZE_512:
11790 tp->nvram_pagesize = 512;
11791 break;
11792 case FLASH_5752PAGE_SIZE_1K:
11793 tp->nvram_pagesize = 1024;
11794 break;
11795 case FLASH_5752PAGE_SIZE_2K:
11796 tp->nvram_pagesize = 2048;
11797 break;
11798 case FLASH_5752PAGE_SIZE_4K:
11799 tp->nvram_pagesize = 4096;
11800 break;
11801 case FLASH_5752PAGE_SIZE_264:
11802 tp->nvram_pagesize = 264;
11803 break;
11804 case FLASH_5752PAGE_SIZE_528:
11805 tp->nvram_pagesize = 528;
11806 break;
11810 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11812 u32 nvcfg1;
11814 nvcfg1 = tr32(NVRAM_CFG1);
11816 /* NVRAM protection for TPM */
11817 if (nvcfg1 & (1 << 27))
11818 tg3_flag_set(tp, PROTECTED_NVRAM);
11820 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11821 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11822 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11823 tp->nvram_jedecnum = JEDEC_ATMEL;
11824 tg3_flag_set(tp, NVRAM_BUFFERED);
11825 break;
11826 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11827 tp->nvram_jedecnum = JEDEC_ATMEL;
11828 tg3_flag_set(tp, NVRAM_BUFFERED);
11829 tg3_flag_set(tp, FLASH);
11830 break;
11831 case FLASH_5752VENDOR_ST_M45PE10:
11832 case FLASH_5752VENDOR_ST_M45PE20:
11833 case FLASH_5752VENDOR_ST_M45PE40:
11834 tp->nvram_jedecnum = JEDEC_ST;
11835 tg3_flag_set(tp, NVRAM_BUFFERED);
11836 tg3_flag_set(tp, FLASH);
11837 break;
11840 if (tg3_flag(tp, FLASH)) {
11841 tg3_nvram_get_pagesize(tp, nvcfg1);
11842 } else {
11843 /* For eeprom, set pagesize to maximum eeprom size */
11844 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11846 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11847 tw32(NVRAM_CFG1, nvcfg1);
11851 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11853 u32 nvcfg1, protect = 0;
11855 nvcfg1 = tr32(NVRAM_CFG1);
11857 /* NVRAM protection for TPM */
11858 if (nvcfg1 & (1 << 27)) {
11859 tg3_flag_set(tp, PROTECTED_NVRAM);
11860 protect = 1;
11863 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11864 switch (nvcfg1) {
11865 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11866 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11867 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11868 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11869 tp->nvram_jedecnum = JEDEC_ATMEL;
11870 tg3_flag_set(tp, NVRAM_BUFFERED);
11871 tg3_flag_set(tp, FLASH);
11872 tp->nvram_pagesize = 264;
11873 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11874 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11875 tp->nvram_size = (protect ? 0x3e200 :
11876 TG3_NVRAM_SIZE_512KB);
11877 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11878 tp->nvram_size = (protect ? 0x1f200 :
11879 TG3_NVRAM_SIZE_256KB);
11880 else
11881 tp->nvram_size = (protect ? 0x1f200 :
11882 TG3_NVRAM_SIZE_128KB);
11883 break;
11884 case FLASH_5752VENDOR_ST_M45PE10:
11885 case FLASH_5752VENDOR_ST_M45PE20:
11886 case FLASH_5752VENDOR_ST_M45PE40:
11887 tp->nvram_jedecnum = JEDEC_ST;
11888 tg3_flag_set(tp, NVRAM_BUFFERED);
11889 tg3_flag_set(tp, FLASH);
11890 tp->nvram_pagesize = 256;
11891 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11892 tp->nvram_size = (protect ?
11893 TG3_NVRAM_SIZE_64KB :
11894 TG3_NVRAM_SIZE_128KB);
11895 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11896 tp->nvram_size = (protect ?
11897 TG3_NVRAM_SIZE_64KB :
11898 TG3_NVRAM_SIZE_256KB);
11899 else
11900 tp->nvram_size = (protect ?
11901 TG3_NVRAM_SIZE_128KB :
11902 TG3_NVRAM_SIZE_512KB);
11903 break;
11907 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11909 u32 nvcfg1;
11911 nvcfg1 = tr32(NVRAM_CFG1);
11913 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11914 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11915 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11916 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11917 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11918 tp->nvram_jedecnum = JEDEC_ATMEL;
11919 tg3_flag_set(tp, NVRAM_BUFFERED);
11920 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11922 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11923 tw32(NVRAM_CFG1, nvcfg1);
11924 break;
11925 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11926 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11927 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11928 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11929 tp->nvram_jedecnum = JEDEC_ATMEL;
11930 tg3_flag_set(tp, NVRAM_BUFFERED);
11931 tg3_flag_set(tp, FLASH);
11932 tp->nvram_pagesize = 264;
11933 break;
11934 case FLASH_5752VENDOR_ST_M45PE10:
11935 case FLASH_5752VENDOR_ST_M45PE20:
11936 case FLASH_5752VENDOR_ST_M45PE40:
11937 tp->nvram_jedecnum = JEDEC_ST;
11938 tg3_flag_set(tp, NVRAM_BUFFERED);
11939 tg3_flag_set(tp, FLASH);
11940 tp->nvram_pagesize = 256;
11941 break;
11945 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11947 u32 nvcfg1, protect = 0;
11949 nvcfg1 = tr32(NVRAM_CFG1);
11951 /* NVRAM protection for TPM */
11952 if (nvcfg1 & (1 << 27)) {
11953 tg3_flag_set(tp, PROTECTED_NVRAM);
11954 protect = 1;
11957 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11958 switch (nvcfg1) {
11959 case FLASH_5761VENDOR_ATMEL_ADB021D:
11960 case FLASH_5761VENDOR_ATMEL_ADB041D:
11961 case FLASH_5761VENDOR_ATMEL_ADB081D:
11962 case FLASH_5761VENDOR_ATMEL_ADB161D:
11963 case FLASH_5761VENDOR_ATMEL_MDB021D:
11964 case FLASH_5761VENDOR_ATMEL_MDB041D:
11965 case FLASH_5761VENDOR_ATMEL_MDB081D:
11966 case FLASH_5761VENDOR_ATMEL_MDB161D:
11967 tp->nvram_jedecnum = JEDEC_ATMEL;
11968 tg3_flag_set(tp, NVRAM_BUFFERED);
11969 tg3_flag_set(tp, FLASH);
11970 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11971 tp->nvram_pagesize = 256;
11972 break;
11973 case FLASH_5761VENDOR_ST_A_M45PE20:
11974 case FLASH_5761VENDOR_ST_A_M45PE40:
11975 case FLASH_5761VENDOR_ST_A_M45PE80:
11976 case FLASH_5761VENDOR_ST_A_M45PE16:
11977 case FLASH_5761VENDOR_ST_M_M45PE20:
11978 case FLASH_5761VENDOR_ST_M_M45PE40:
11979 case FLASH_5761VENDOR_ST_M_M45PE80:
11980 case FLASH_5761VENDOR_ST_M_M45PE16:
11981 tp->nvram_jedecnum = JEDEC_ST;
11982 tg3_flag_set(tp, NVRAM_BUFFERED);
11983 tg3_flag_set(tp, FLASH);
11984 tp->nvram_pagesize = 256;
11985 break;
11988 if (protect) {
11989 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11990 } else {
11991 switch (nvcfg1) {
11992 case FLASH_5761VENDOR_ATMEL_ADB161D:
11993 case FLASH_5761VENDOR_ATMEL_MDB161D:
11994 case FLASH_5761VENDOR_ST_A_M45PE16:
11995 case FLASH_5761VENDOR_ST_M_M45PE16:
11996 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11997 break;
11998 case FLASH_5761VENDOR_ATMEL_ADB081D:
11999 case FLASH_5761VENDOR_ATMEL_MDB081D:
12000 case FLASH_5761VENDOR_ST_A_M45PE80:
12001 case FLASH_5761VENDOR_ST_M_M45PE80:
12002 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12003 break;
12004 case FLASH_5761VENDOR_ATMEL_ADB041D:
12005 case FLASH_5761VENDOR_ATMEL_MDB041D:
12006 case FLASH_5761VENDOR_ST_A_M45PE40:
12007 case FLASH_5761VENDOR_ST_M_M45PE40:
12008 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12009 break;
12010 case FLASH_5761VENDOR_ATMEL_ADB021D:
12011 case FLASH_5761VENDOR_ATMEL_MDB021D:
12012 case FLASH_5761VENDOR_ST_A_M45PE20:
12013 case FLASH_5761VENDOR_ST_M_M45PE20:
12014 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12015 break;
12020 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12022 tp->nvram_jedecnum = JEDEC_ATMEL;
12023 tg3_flag_set(tp, NVRAM_BUFFERED);
12024 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12027 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12029 u32 nvcfg1;
12031 nvcfg1 = tr32(NVRAM_CFG1);
12033 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12034 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12035 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12036 tp->nvram_jedecnum = JEDEC_ATMEL;
12037 tg3_flag_set(tp, NVRAM_BUFFERED);
12038 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12040 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12041 tw32(NVRAM_CFG1, nvcfg1);
12042 return;
12043 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12044 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12045 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12046 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12047 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12048 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12049 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12050 tp->nvram_jedecnum = JEDEC_ATMEL;
12051 tg3_flag_set(tp, NVRAM_BUFFERED);
12052 tg3_flag_set(tp, FLASH);
12054 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12055 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12056 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12057 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12058 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12059 break;
12060 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12061 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12062 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12063 break;
12064 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12065 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12066 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12067 break;
12069 break;
12070 case FLASH_5752VENDOR_ST_M45PE10:
12071 case FLASH_5752VENDOR_ST_M45PE20:
12072 case FLASH_5752VENDOR_ST_M45PE40:
12073 tp->nvram_jedecnum = JEDEC_ST;
12074 tg3_flag_set(tp, NVRAM_BUFFERED);
12075 tg3_flag_set(tp, FLASH);
12077 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12078 case FLASH_5752VENDOR_ST_M45PE10:
12079 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12080 break;
12081 case FLASH_5752VENDOR_ST_M45PE20:
12082 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12083 break;
12084 case FLASH_5752VENDOR_ST_M45PE40:
12085 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12086 break;
12088 break;
12089 default:
12090 tg3_flag_set(tp, NO_NVRAM);
12091 return;
12094 tg3_nvram_get_pagesize(tp, nvcfg1);
12095 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12096 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12100 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12102 u32 nvcfg1;
12104 nvcfg1 = tr32(NVRAM_CFG1);
12106 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12107 case FLASH_5717VENDOR_ATMEL_EEPROM:
12108 case FLASH_5717VENDOR_MICRO_EEPROM:
12109 tp->nvram_jedecnum = JEDEC_ATMEL;
12110 tg3_flag_set(tp, NVRAM_BUFFERED);
12111 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12113 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12114 tw32(NVRAM_CFG1, nvcfg1);
12115 return;
12116 case FLASH_5717VENDOR_ATMEL_MDB011D:
12117 case FLASH_5717VENDOR_ATMEL_ADB011B:
12118 case FLASH_5717VENDOR_ATMEL_ADB011D:
12119 case FLASH_5717VENDOR_ATMEL_MDB021D:
12120 case FLASH_5717VENDOR_ATMEL_ADB021B:
12121 case FLASH_5717VENDOR_ATMEL_ADB021D:
12122 case FLASH_5717VENDOR_ATMEL_45USPT:
12123 tp->nvram_jedecnum = JEDEC_ATMEL;
12124 tg3_flag_set(tp, NVRAM_BUFFERED);
12125 tg3_flag_set(tp, FLASH);
12127 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12128 case FLASH_5717VENDOR_ATMEL_MDB021D:
12129 /* Detect size with tg3_nvram_get_size() */
12130 break;
12131 case FLASH_5717VENDOR_ATMEL_ADB021B:
12132 case FLASH_5717VENDOR_ATMEL_ADB021D:
12133 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12134 break;
12135 default:
12136 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12137 break;
12139 break;
12140 case FLASH_5717VENDOR_ST_M_M25PE10:
12141 case FLASH_5717VENDOR_ST_A_M25PE10:
12142 case FLASH_5717VENDOR_ST_M_M45PE10:
12143 case FLASH_5717VENDOR_ST_A_M45PE10:
12144 case FLASH_5717VENDOR_ST_M_M25PE20:
12145 case FLASH_5717VENDOR_ST_A_M25PE20:
12146 case FLASH_5717VENDOR_ST_M_M45PE20:
12147 case FLASH_5717VENDOR_ST_A_M45PE20:
12148 case FLASH_5717VENDOR_ST_25USPT:
12149 case FLASH_5717VENDOR_ST_45USPT:
12150 tp->nvram_jedecnum = JEDEC_ST;
12151 tg3_flag_set(tp, NVRAM_BUFFERED);
12152 tg3_flag_set(tp, FLASH);
12154 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12155 case FLASH_5717VENDOR_ST_M_M25PE20:
12156 case FLASH_5717VENDOR_ST_M_M45PE20:
12157 /* Detect size with tg3_nvram_get_size() */
12158 break;
12159 case FLASH_5717VENDOR_ST_A_M25PE20:
12160 case FLASH_5717VENDOR_ST_A_M45PE20:
12161 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12162 break;
12163 default:
12164 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12165 break;
12167 break;
12168 default:
12169 tg3_flag_set(tp, NO_NVRAM);
12170 return;
12173 tg3_nvram_get_pagesize(tp, nvcfg1);
12174 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12175 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12178 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12180 u32 nvcfg1, nvmpinstrp;
12182 nvcfg1 = tr32(NVRAM_CFG1);
12183 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12185 switch (nvmpinstrp) {
12186 case FLASH_5720_EEPROM_HD:
12187 case FLASH_5720_EEPROM_LD:
12188 tp->nvram_jedecnum = JEDEC_ATMEL;
12189 tg3_flag_set(tp, NVRAM_BUFFERED);
12191 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12192 tw32(NVRAM_CFG1, nvcfg1);
12193 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12194 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12195 else
12196 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12197 return;
12198 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12199 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12200 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12201 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12202 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12203 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12204 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12205 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12206 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12207 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12208 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12209 case FLASH_5720VENDOR_ATMEL_45USPT:
12210 tp->nvram_jedecnum = JEDEC_ATMEL;
12211 tg3_flag_set(tp, NVRAM_BUFFERED);
12212 tg3_flag_set(tp, FLASH);
12214 switch (nvmpinstrp) {
12215 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12216 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12217 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12218 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12219 break;
12220 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12221 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12222 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12223 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12224 break;
12225 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12226 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12227 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12228 break;
12229 default:
12230 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12231 break;
12233 break;
12234 case FLASH_5720VENDOR_M_ST_M25PE10:
12235 case FLASH_5720VENDOR_M_ST_M45PE10:
12236 case FLASH_5720VENDOR_A_ST_M25PE10:
12237 case FLASH_5720VENDOR_A_ST_M45PE10:
12238 case FLASH_5720VENDOR_M_ST_M25PE20:
12239 case FLASH_5720VENDOR_M_ST_M45PE20:
12240 case FLASH_5720VENDOR_A_ST_M25PE20:
12241 case FLASH_5720VENDOR_A_ST_M45PE20:
12242 case FLASH_5720VENDOR_M_ST_M25PE40:
12243 case FLASH_5720VENDOR_M_ST_M45PE40:
12244 case FLASH_5720VENDOR_A_ST_M25PE40:
12245 case FLASH_5720VENDOR_A_ST_M45PE40:
12246 case FLASH_5720VENDOR_M_ST_M25PE80:
12247 case FLASH_5720VENDOR_M_ST_M45PE80:
12248 case FLASH_5720VENDOR_A_ST_M25PE80:
12249 case FLASH_5720VENDOR_A_ST_M45PE80:
12250 case FLASH_5720VENDOR_ST_25USPT:
12251 case FLASH_5720VENDOR_ST_45USPT:
12252 tp->nvram_jedecnum = JEDEC_ST;
12253 tg3_flag_set(tp, NVRAM_BUFFERED);
12254 tg3_flag_set(tp, FLASH);
12256 switch (nvmpinstrp) {
12257 case FLASH_5720VENDOR_M_ST_M25PE20:
12258 case FLASH_5720VENDOR_M_ST_M45PE20:
12259 case FLASH_5720VENDOR_A_ST_M25PE20:
12260 case FLASH_5720VENDOR_A_ST_M45PE20:
12261 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12262 break;
12263 case FLASH_5720VENDOR_M_ST_M25PE40:
12264 case FLASH_5720VENDOR_M_ST_M45PE40:
12265 case FLASH_5720VENDOR_A_ST_M25PE40:
12266 case FLASH_5720VENDOR_A_ST_M45PE40:
12267 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12268 break;
12269 case FLASH_5720VENDOR_M_ST_M25PE80:
12270 case FLASH_5720VENDOR_M_ST_M45PE80:
12271 case FLASH_5720VENDOR_A_ST_M25PE80:
12272 case FLASH_5720VENDOR_A_ST_M45PE80:
12273 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12274 break;
12275 default:
12276 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12277 break;
12279 break;
12280 default:
12281 tg3_flag_set(tp, NO_NVRAM);
12282 return;
12285 tg3_nvram_get_pagesize(tp, nvcfg1);
12286 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12287 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12290 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12291 static void __devinit tg3_nvram_init(struct tg3 *tp)
12293 tw32_f(GRC_EEPROM_ADDR,
12294 (EEPROM_ADDR_FSM_RESET |
12295 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12296 EEPROM_ADDR_CLKPERD_SHIFT)));
12298 msleep(1);
12300 /* Enable seeprom accesses. */
12301 tw32_f(GRC_LOCAL_CTRL,
12302 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12303 udelay(100);
12305 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12306 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12307 tg3_flag_set(tp, NVRAM);
12309 if (tg3_nvram_lock(tp)) {
12310 netdev_warn(tp->dev,
12311 "Cannot get nvram lock, %s failed\n",
12312 __func__);
12313 return;
12315 tg3_enable_nvram_access(tp);
12317 tp->nvram_size = 0;
12319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12320 tg3_get_5752_nvram_info(tp);
12321 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12322 tg3_get_5755_nvram_info(tp);
12323 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12324 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12325 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12326 tg3_get_5787_nvram_info(tp);
12327 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12328 tg3_get_5761_nvram_info(tp);
12329 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12330 tg3_get_5906_nvram_info(tp);
12331 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12332 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12333 tg3_get_57780_nvram_info(tp);
12334 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12335 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12336 tg3_get_5717_nvram_info(tp);
12337 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12338 tg3_get_5720_nvram_info(tp);
12339 else
12340 tg3_get_nvram_info(tp);
12342 if (tp->nvram_size == 0)
12343 tg3_get_nvram_size(tp);
12345 tg3_disable_nvram_access(tp);
12346 tg3_nvram_unlock(tp);
12348 } else {
12349 tg3_flag_clear(tp, NVRAM);
12350 tg3_flag_clear(tp, NVRAM_BUFFERED);
12352 tg3_get_eeprom_size(tp);
12356 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12357 u32 offset, u32 len, u8 *buf)
12359 int i, j, rc = 0;
12360 u32 val;
12362 for (i = 0; i < len; i += 4) {
12363 u32 addr;
12364 __be32 data;
12366 addr = offset + i;
12368 memcpy(&data, buf + i, 4);
12371 * The SEEPROM interface expects the data to always be opposite
12372 * the native endian format. We accomplish this by reversing
12373 * all the operations that would have been performed on the
12374 * data from a call to tg3_nvram_read_be32().
12376 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12378 val = tr32(GRC_EEPROM_ADDR);
12379 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12381 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12382 EEPROM_ADDR_READ);
12383 tw32(GRC_EEPROM_ADDR, val |
12384 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12385 (addr & EEPROM_ADDR_ADDR_MASK) |
12386 EEPROM_ADDR_START |
12387 EEPROM_ADDR_WRITE);
12389 for (j = 0; j < 1000; j++) {
12390 val = tr32(GRC_EEPROM_ADDR);
12392 if (val & EEPROM_ADDR_COMPLETE)
12393 break;
12394 msleep(1);
12396 if (!(val & EEPROM_ADDR_COMPLETE)) {
12397 rc = -EBUSY;
12398 break;
12402 return rc;
12405 /* offset and length are dword aligned */
12406 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12407 u8 *buf)
12409 int ret = 0;
12410 u32 pagesize = tp->nvram_pagesize;
12411 u32 pagemask = pagesize - 1;
12412 u32 nvram_cmd;
12413 u8 *tmp;
12415 tmp = kmalloc(pagesize, GFP_KERNEL);
12416 if (tmp == NULL)
12417 return -ENOMEM;
12419 while (len) {
12420 int j;
12421 u32 phy_addr, page_off, size;
12423 phy_addr = offset & ~pagemask;
12425 for (j = 0; j < pagesize; j += 4) {
12426 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12427 (__be32 *) (tmp + j));
12428 if (ret)
12429 break;
12431 if (ret)
12432 break;
12434 page_off = offset & pagemask;
12435 size = pagesize;
12436 if (len < size)
12437 size = len;
12439 len -= size;
12441 memcpy(tmp + page_off, buf, size);
12443 offset = offset + (pagesize - page_off);
12445 tg3_enable_nvram_access(tp);
12448 * Before we can erase the flash page, we need
12449 * to issue a special "write enable" command.
12451 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12453 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12454 break;
12456 /* Erase the target page */
12457 tw32(NVRAM_ADDR, phy_addr);
12459 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12460 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12462 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12463 break;
12465 /* Issue another write enable to start the write. */
12466 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12468 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12469 break;
12471 for (j = 0; j < pagesize; j += 4) {
12472 __be32 data;
12474 data = *((__be32 *) (tmp + j));
12476 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12478 tw32(NVRAM_ADDR, phy_addr + j);
12480 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12481 NVRAM_CMD_WR;
12483 if (j == 0)
12484 nvram_cmd |= NVRAM_CMD_FIRST;
12485 else if (j == (pagesize - 4))
12486 nvram_cmd |= NVRAM_CMD_LAST;
12488 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12489 break;
12491 if (ret)
12492 break;
12495 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12496 tg3_nvram_exec_cmd(tp, nvram_cmd);
12498 kfree(tmp);
12500 return ret;
12503 /* offset and length are dword aligned */
12504 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12505 u8 *buf)
12507 int i, ret = 0;
12509 for (i = 0; i < len; i += 4, offset += 4) {
12510 u32 page_off, phy_addr, nvram_cmd;
12511 __be32 data;
12513 memcpy(&data, buf + i, 4);
12514 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12516 page_off = offset % tp->nvram_pagesize;
12518 phy_addr = tg3_nvram_phys_addr(tp, offset);
12520 tw32(NVRAM_ADDR, phy_addr);
12522 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12524 if (page_off == 0 || i == 0)
12525 nvram_cmd |= NVRAM_CMD_FIRST;
12526 if (page_off == (tp->nvram_pagesize - 4))
12527 nvram_cmd |= NVRAM_CMD_LAST;
12529 if (i == (len - 4))
12530 nvram_cmd |= NVRAM_CMD_LAST;
12532 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12533 !tg3_flag(tp, 5755_PLUS) &&
12534 (tp->nvram_jedecnum == JEDEC_ST) &&
12535 (nvram_cmd & NVRAM_CMD_FIRST)) {
12537 if ((ret = tg3_nvram_exec_cmd(tp,
12538 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12539 NVRAM_CMD_DONE)))
12541 break;
12543 if (!tg3_flag(tp, FLASH)) {
12544 /* We always do complete word writes to eeprom. */
12545 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12548 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12549 break;
12551 return ret;
12554 /* offset and length are dword aligned */
12555 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12557 int ret;
12559 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12560 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12561 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12562 udelay(40);
12565 if (!tg3_flag(tp, NVRAM)) {
12566 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12567 } else {
12568 u32 grc_mode;
12570 ret = tg3_nvram_lock(tp);
12571 if (ret)
12572 return ret;
12574 tg3_enable_nvram_access(tp);
12575 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12576 tw32(NVRAM_WRITE1, 0x406);
12578 grc_mode = tr32(GRC_MODE);
12579 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12581 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12582 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12583 buf);
12584 } else {
12585 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12586 buf);
12589 grc_mode = tr32(GRC_MODE);
12590 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12592 tg3_disable_nvram_access(tp);
12593 tg3_nvram_unlock(tp);
12596 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12597 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12598 udelay(40);
12601 return ret;
12604 struct subsys_tbl_ent {
12605 u16 subsys_vendor, subsys_devid;
12606 u32 phy_id;
12609 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12610 /* Broadcom boards. */
12611 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12612 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12613 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12614 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12615 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12616 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12617 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12618 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12619 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12620 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12621 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12622 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12623 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12624 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12625 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12626 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12627 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12628 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12629 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12630 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12631 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12632 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12634 /* 3com boards. */
12635 { TG3PCI_SUBVENDOR_ID_3COM,
12636 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12637 { TG3PCI_SUBVENDOR_ID_3COM,
12638 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12639 { TG3PCI_SUBVENDOR_ID_3COM,
12640 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12641 { TG3PCI_SUBVENDOR_ID_3COM,
12642 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12643 { TG3PCI_SUBVENDOR_ID_3COM,
12644 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12646 /* DELL boards. */
12647 { TG3PCI_SUBVENDOR_ID_DELL,
12648 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12649 { TG3PCI_SUBVENDOR_ID_DELL,
12650 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12651 { TG3PCI_SUBVENDOR_ID_DELL,
12652 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12653 { TG3PCI_SUBVENDOR_ID_DELL,
12654 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12656 /* Compaq boards. */
12657 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12658 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12659 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12660 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12661 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12662 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12663 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12664 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12665 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12666 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12668 /* IBM boards. */
12669 { TG3PCI_SUBVENDOR_ID_IBM,
12670 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12673 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12675 int i;
12677 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12678 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12679 tp->pdev->subsystem_vendor) &&
12680 (subsys_id_to_phy_id[i].subsys_devid ==
12681 tp->pdev->subsystem_device))
12682 return &subsys_id_to_phy_id[i];
12684 return NULL;
12687 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12689 u32 val;
12691 tp->phy_id = TG3_PHY_ID_INVALID;
12692 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12694 /* Assume an onboard device and WOL capable by default. */
12695 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12696 tg3_flag_set(tp, WOL_CAP);
12698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12699 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12700 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12701 tg3_flag_set(tp, IS_NIC);
12703 val = tr32(VCPU_CFGSHDW);
12704 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12705 tg3_flag_set(tp, ASPM_WORKAROUND);
12706 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12707 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12708 tg3_flag_set(tp, WOL_ENABLE);
12709 device_set_wakeup_enable(&tp->pdev->dev, true);
12711 goto done;
12714 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12715 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12716 u32 nic_cfg, led_cfg;
12717 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12718 int eeprom_phy_serdes = 0;
12720 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12721 tp->nic_sram_data_cfg = nic_cfg;
12723 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12724 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12725 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12726 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12727 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12728 (ver > 0) && (ver < 0x100))
12729 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12732 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12734 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12735 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12736 eeprom_phy_serdes = 1;
12738 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12739 if (nic_phy_id != 0) {
12740 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12741 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12743 eeprom_phy_id = (id1 >> 16) << 10;
12744 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12745 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12746 } else
12747 eeprom_phy_id = 0;
12749 tp->phy_id = eeprom_phy_id;
12750 if (eeprom_phy_serdes) {
12751 if (!tg3_flag(tp, 5705_PLUS))
12752 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12753 else
12754 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12757 if (tg3_flag(tp, 5750_PLUS))
12758 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12759 SHASTA_EXT_LED_MODE_MASK);
12760 else
12761 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12763 switch (led_cfg) {
12764 default:
12765 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12766 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12767 break;
12769 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12770 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12771 break;
12773 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12774 tp->led_ctrl = LED_CTRL_MODE_MAC;
12776 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12777 * read on some older 5700/5701 bootcode.
12779 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12780 ASIC_REV_5700 ||
12781 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12782 ASIC_REV_5701)
12783 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12785 break;
12787 case SHASTA_EXT_LED_SHARED:
12788 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12789 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12790 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12791 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12792 LED_CTRL_MODE_PHY_2);
12793 break;
12795 case SHASTA_EXT_LED_MAC:
12796 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12797 break;
12799 case SHASTA_EXT_LED_COMBO:
12800 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12801 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12802 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12803 LED_CTRL_MODE_PHY_2);
12804 break;
12808 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12809 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12810 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12811 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12813 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12814 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12816 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12817 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12818 if ((tp->pdev->subsystem_vendor ==
12819 PCI_VENDOR_ID_ARIMA) &&
12820 (tp->pdev->subsystem_device == 0x205a ||
12821 tp->pdev->subsystem_device == 0x2063))
12822 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12823 } else {
12824 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12825 tg3_flag_set(tp, IS_NIC);
12828 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12829 tg3_flag_set(tp, ENABLE_ASF);
12830 if (tg3_flag(tp, 5750_PLUS))
12831 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12834 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12835 tg3_flag(tp, 5750_PLUS))
12836 tg3_flag_set(tp, ENABLE_APE);
12838 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12839 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12840 tg3_flag_clear(tp, WOL_CAP);
12842 if (tg3_flag(tp, WOL_CAP) &&
12843 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12844 tg3_flag_set(tp, WOL_ENABLE);
12845 device_set_wakeup_enable(&tp->pdev->dev, true);
12848 if (cfg2 & (1 << 17))
12849 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12851 /* serdes signal pre-emphasis in register 0x590 set by */
12852 /* bootcode if bit 18 is set */
12853 if (cfg2 & (1 << 18))
12854 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12856 if ((tg3_flag(tp, 57765_PLUS) ||
12857 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12858 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12859 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12860 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12862 if (tg3_flag(tp, PCI_EXPRESS) &&
12863 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12864 !tg3_flag(tp, 57765_PLUS)) {
12865 u32 cfg3;
12867 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12868 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12869 tg3_flag_set(tp, ASPM_WORKAROUND);
12872 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12873 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12874 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12875 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12876 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12877 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12879 done:
12880 if (tg3_flag(tp, WOL_CAP))
12881 device_set_wakeup_enable(&tp->pdev->dev,
12882 tg3_flag(tp, WOL_ENABLE));
12883 else
12884 device_set_wakeup_capable(&tp->pdev->dev, false);
12887 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12889 int i;
12890 u32 val;
12892 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12893 tw32(OTP_CTRL, cmd);
12895 /* Wait for up to 1 ms for command to execute. */
12896 for (i = 0; i < 100; i++) {
12897 val = tr32(OTP_STATUS);
12898 if (val & OTP_STATUS_CMD_DONE)
12899 break;
12900 udelay(10);
12903 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12906 /* Read the gphy configuration from the OTP region of the chip. The gphy
12907 * configuration is a 32-bit value that straddles the alignment boundary.
12908 * We do two 32-bit reads and then shift and merge the results.
12910 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12912 u32 bhalf_otp, thalf_otp;
12914 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12916 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12917 return 0;
12919 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12921 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12922 return 0;
12924 thalf_otp = tr32(OTP_READ_DATA);
12926 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12928 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12929 return 0;
12931 bhalf_otp = tr32(OTP_READ_DATA);
12933 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12936 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12938 u32 adv = ADVERTISED_Autoneg |
12939 ADVERTISED_Pause;
12941 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12942 adv |= ADVERTISED_1000baseT_Half |
12943 ADVERTISED_1000baseT_Full;
12945 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12946 adv |= ADVERTISED_100baseT_Half |
12947 ADVERTISED_100baseT_Full |
12948 ADVERTISED_10baseT_Half |
12949 ADVERTISED_10baseT_Full |
12950 ADVERTISED_TP;
12951 else
12952 adv |= ADVERTISED_FIBRE;
12954 tp->link_config.advertising = adv;
12955 tp->link_config.speed = SPEED_INVALID;
12956 tp->link_config.duplex = DUPLEX_INVALID;
12957 tp->link_config.autoneg = AUTONEG_ENABLE;
12958 tp->link_config.active_speed = SPEED_INVALID;
12959 tp->link_config.active_duplex = DUPLEX_INVALID;
12960 tp->link_config.orig_speed = SPEED_INVALID;
12961 tp->link_config.orig_duplex = DUPLEX_INVALID;
12962 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12965 static int __devinit tg3_phy_probe(struct tg3 *tp)
12967 u32 hw_phy_id_1, hw_phy_id_2;
12968 u32 hw_phy_id, hw_phy_id_masked;
12969 int err;
12971 /* flow control autonegotiation is default behavior */
12972 tg3_flag_set(tp, PAUSE_AUTONEG);
12973 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12975 if (tg3_flag(tp, USE_PHYLIB))
12976 return tg3_phy_init(tp);
12978 /* Reading the PHY ID register can conflict with ASF
12979 * firmware access to the PHY hardware.
12981 err = 0;
12982 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12983 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12984 } else {
12985 /* Now read the physical PHY_ID from the chip and verify
12986 * that it is sane. If it doesn't look good, we fall back
12987 * to either the hard-coded table based PHY_ID and failing
12988 * that the value found in the eeprom area.
12990 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12991 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12993 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12994 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12995 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12997 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13000 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13001 tp->phy_id = hw_phy_id;
13002 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13003 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13004 else
13005 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13006 } else {
13007 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13008 /* Do nothing, phy ID already set up in
13009 * tg3_get_eeprom_hw_cfg().
13011 } else {
13012 struct subsys_tbl_ent *p;
13014 /* No eeprom signature? Try the hardcoded
13015 * subsys device table.
13017 p = tg3_lookup_by_subsys(tp);
13018 if (!p)
13019 return -ENODEV;
13021 tp->phy_id = p->phy_id;
13022 if (!tp->phy_id ||
13023 tp->phy_id == TG3_PHY_ID_BCM8002)
13024 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13028 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13029 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13030 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13031 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13032 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13033 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13035 tg3_phy_init_link_config(tp);
13037 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13038 !tg3_flag(tp, ENABLE_APE) &&
13039 !tg3_flag(tp, ENABLE_ASF)) {
13040 u32 bmsr, mask;
13042 tg3_readphy(tp, MII_BMSR, &bmsr);
13043 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13044 (bmsr & BMSR_LSTATUS))
13045 goto skip_phy_reset;
13047 err = tg3_phy_reset(tp);
13048 if (err)
13049 return err;
13051 tg3_phy_set_wirespeed(tp);
13053 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13054 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13055 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13056 if (!tg3_copper_is_advertising_all(tp, mask)) {
13057 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13058 tp->link_config.flowctrl);
13060 tg3_writephy(tp, MII_BMCR,
13061 BMCR_ANENABLE | BMCR_ANRESTART);
13065 skip_phy_reset:
13066 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13067 err = tg3_init_5401phy_dsp(tp);
13068 if (err)
13069 return err;
13071 err = tg3_init_5401phy_dsp(tp);
13074 return err;
13077 static void __devinit tg3_read_vpd(struct tg3 *tp)
13079 u8 *vpd_data;
13080 unsigned int block_end, rosize, len;
13081 int j, i = 0;
13083 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13084 if (!vpd_data)
13085 goto out_no_vpd;
13087 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13088 PCI_VPD_LRDT_RO_DATA);
13089 if (i < 0)
13090 goto out_not_found;
13092 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13093 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13094 i += PCI_VPD_LRDT_TAG_SIZE;
13096 if (block_end > TG3_NVM_VPD_LEN)
13097 goto out_not_found;
13099 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13100 PCI_VPD_RO_KEYWORD_MFR_ID);
13101 if (j > 0) {
13102 len = pci_vpd_info_field_size(&vpd_data[j]);
13104 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13105 if (j + len > block_end || len != 4 ||
13106 memcmp(&vpd_data[j], "1028", 4))
13107 goto partno;
13109 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13110 PCI_VPD_RO_KEYWORD_VENDOR0);
13111 if (j < 0)
13112 goto partno;
13114 len = pci_vpd_info_field_size(&vpd_data[j]);
13116 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13117 if (j + len > block_end)
13118 goto partno;
13120 memcpy(tp->fw_ver, &vpd_data[j], len);
13121 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13124 partno:
13125 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13126 PCI_VPD_RO_KEYWORD_PARTNO);
13127 if (i < 0)
13128 goto out_not_found;
13130 len = pci_vpd_info_field_size(&vpd_data[i]);
13132 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13133 if (len > TG3_BPN_SIZE ||
13134 (len + i) > TG3_NVM_VPD_LEN)
13135 goto out_not_found;
13137 memcpy(tp->board_part_number, &vpd_data[i], len);
13139 out_not_found:
13140 kfree(vpd_data);
13141 if (tp->board_part_number[0])
13142 return;
13144 out_no_vpd:
13145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13146 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13147 strcpy(tp->board_part_number, "BCM5717");
13148 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13149 strcpy(tp->board_part_number, "BCM5718");
13150 else
13151 goto nomatch;
13152 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13153 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13154 strcpy(tp->board_part_number, "BCM57780");
13155 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13156 strcpy(tp->board_part_number, "BCM57760");
13157 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13158 strcpy(tp->board_part_number, "BCM57790");
13159 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13160 strcpy(tp->board_part_number, "BCM57788");
13161 else
13162 goto nomatch;
13163 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13164 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13165 strcpy(tp->board_part_number, "BCM57761");
13166 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13167 strcpy(tp->board_part_number, "BCM57765");
13168 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13169 strcpy(tp->board_part_number, "BCM57781");
13170 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13171 strcpy(tp->board_part_number, "BCM57785");
13172 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13173 strcpy(tp->board_part_number, "BCM57791");
13174 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13175 strcpy(tp->board_part_number, "BCM57795");
13176 else
13177 goto nomatch;
13178 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13179 strcpy(tp->board_part_number, "BCM95906");
13180 } else {
13181 nomatch:
13182 strcpy(tp->board_part_number, "none");
13186 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13188 u32 val;
13190 if (tg3_nvram_read(tp, offset, &val) ||
13191 (val & 0xfc000000) != 0x0c000000 ||
13192 tg3_nvram_read(tp, offset + 4, &val) ||
13193 val != 0)
13194 return 0;
13196 return 1;
13199 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13201 u32 val, offset, start, ver_offset;
13202 int i, dst_off;
13203 bool newver = false;
13205 if (tg3_nvram_read(tp, 0xc, &offset) ||
13206 tg3_nvram_read(tp, 0x4, &start))
13207 return;
13209 offset = tg3_nvram_logical_addr(tp, offset);
13211 if (tg3_nvram_read(tp, offset, &val))
13212 return;
13214 if ((val & 0xfc000000) == 0x0c000000) {
13215 if (tg3_nvram_read(tp, offset + 4, &val))
13216 return;
13218 if (val == 0)
13219 newver = true;
13222 dst_off = strlen(tp->fw_ver);
13224 if (newver) {
13225 if (TG3_VER_SIZE - dst_off < 16 ||
13226 tg3_nvram_read(tp, offset + 8, &ver_offset))
13227 return;
13229 offset = offset + ver_offset - start;
13230 for (i = 0; i < 16; i += 4) {
13231 __be32 v;
13232 if (tg3_nvram_read_be32(tp, offset + i, &v))
13233 return;
13235 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13237 } else {
13238 u32 major, minor;
13240 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13241 return;
13243 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13244 TG3_NVM_BCVER_MAJSFT;
13245 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13246 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13247 "v%d.%02d", major, minor);
13251 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13253 u32 val, major, minor;
13255 /* Use native endian representation */
13256 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13257 return;
13259 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13260 TG3_NVM_HWSB_CFG1_MAJSFT;
13261 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13262 TG3_NVM_HWSB_CFG1_MINSFT;
13264 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13267 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13269 u32 offset, major, minor, build;
13271 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13273 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13274 return;
13276 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13277 case TG3_EEPROM_SB_REVISION_0:
13278 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13279 break;
13280 case TG3_EEPROM_SB_REVISION_2:
13281 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13282 break;
13283 case TG3_EEPROM_SB_REVISION_3:
13284 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13285 break;
13286 case TG3_EEPROM_SB_REVISION_4:
13287 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13288 break;
13289 case TG3_EEPROM_SB_REVISION_5:
13290 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13291 break;
13292 case TG3_EEPROM_SB_REVISION_6:
13293 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13294 break;
13295 default:
13296 return;
13299 if (tg3_nvram_read(tp, offset, &val))
13300 return;
13302 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13303 TG3_EEPROM_SB_EDH_BLD_SHFT;
13304 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13305 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13306 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13308 if (minor > 99 || build > 26)
13309 return;
13311 offset = strlen(tp->fw_ver);
13312 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13313 " v%d.%02d", major, minor);
13315 if (build > 0) {
13316 offset = strlen(tp->fw_ver);
13317 if (offset < TG3_VER_SIZE - 1)
13318 tp->fw_ver[offset] = 'a' + build - 1;
13322 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13324 u32 val, offset, start;
13325 int i, vlen;
13327 for (offset = TG3_NVM_DIR_START;
13328 offset < TG3_NVM_DIR_END;
13329 offset += TG3_NVM_DIRENT_SIZE) {
13330 if (tg3_nvram_read(tp, offset, &val))
13331 return;
13333 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13334 break;
13337 if (offset == TG3_NVM_DIR_END)
13338 return;
13340 if (!tg3_flag(tp, 5705_PLUS))
13341 start = 0x08000000;
13342 else if (tg3_nvram_read(tp, offset - 4, &start))
13343 return;
13345 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13346 !tg3_fw_img_is_valid(tp, offset) ||
13347 tg3_nvram_read(tp, offset + 8, &val))
13348 return;
13350 offset += val - start;
13352 vlen = strlen(tp->fw_ver);
13354 tp->fw_ver[vlen++] = ',';
13355 tp->fw_ver[vlen++] = ' ';
13357 for (i = 0; i < 4; i++) {
13358 __be32 v;
13359 if (tg3_nvram_read_be32(tp, offset, &v))
13360 return;
13362 offset += sizeof(v);
13364 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13365 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13366 break;
13369 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13370 vlen += sizeof(v);
13374 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13376 int vlen;
13377 u32 apedata;
13378 char *fwtype;
13380 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13381 return;
13383 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13384 if (apedata != APE_SEG_SIG_MAGIC)
13385 return;
13387 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13388 if (!(apedata & APE_FW_STATUS_READY))
13389 return;
13391 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13393 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13394 tg3_flag_set(tp, APE_HAS_NCSI);
13395 fwtype = "NCSI";
13396 } else {
13397 fwtype = "DASH";
13400 vlen = strlen(tp->fw_ver);
13402 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13403 fwtype,
13404 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13405 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13406 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13407 (apedata & APE_FW_VERSION_BLDMSK));
13410 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13412 u32 val;
13413 bool vpd_vers = false;
13415 if (tp->fw_ver[0] != 0)
13416 vpd_vers = true;
13418 if (tg3_flag(tp, NO_NVRAM)) {
13419 strcat(tp->fw_ver, "sb");
13420 return;
13423 if (tg3_nvram_read(tp, 0, &val))
13424 return;
13426 if (val == TG3_EEPROM_MAGIC)
13427 tg3_read_bc_ver(tp);
13428 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13429 tg3_read_sb_ver(tp, val);
13430 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13431 tg3_read_hwsb_ver(tp);
13432 else
13433 return;
13435 if (vpd_vers)
13436 goto done;
13438 if (tg3_flag(tp, ENABLE_APE)) {
13439 if (tg3_flag(tp, ENABLE_ASF))
13440 tg3_read_dash_ver(tp);
13441 } else if (tg3_flag(tp, ENABLE_ASF)) {
13442 tg3_read_mgmtfw_ver(tp);
13445 done:
13446 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13449 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13451 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13453 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13454 return TG3_RX_RET_MAX_SIZE_5717;
13455 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13456 return TG3_RX_RET_MAX_SIZE_5700;
13457 else
13458 return TG3_RX_RET_MAX_SIZE_5705;
13461 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13462 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13463 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13464 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13465 { },
13468 static int __devinit tg3_get_invariants(struct tg3 *tp)
13470 u32 misc_ctrl_reg;
13471 u32 pci_state_reg, grc_misc_cfg;
13472 u32 val;
13473 u16 pci_cmd;
13474 int err;
13476 /* Force memory write invalidate off. If we leave it on,
13477 * then on 5700_BX chips we have to enable a workaround.
13478 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13479 * to match the cacheline size. The Broadcom driver have this
13480 * workaround but turns MWI off all the times so never uses
13481 * it. This seems to suggest that the workaround is insufficient.
13483 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13484 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13485 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13487 /* Important! -- Make sure register accesses are byteswapped
13488 * correctly. Also, for those chips that require it, make
13489 * sure that indirect register accesses are enabled before
13490 * the first operation.
13492 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13493 &misc_ctrl_reg);
13494 tp->misc_host_ctrl |= (misc_ctrl_reg &
13495 MISC_HOST_CTRL_CHIPREV);
13496 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13497 tp->misc_host_ctrl);
13499 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13500 MISC_HOST_CTRL_CHIPREV_SHIFT);
13501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13502 u32 prod_id_asic_rev;
13504 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13505 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13506 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13507 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13508 pci_read_config_dword(tp->pdev,
13509 TG3PCI_GEN2_PRODID_ASICREV,
13510 &prod_id_asic_rev);
13511 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13512 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13513 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13514 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13515 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13516 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13517 pci_read_config_dword(tp->pdev,
13518 TG3PCI_GEN15_PRODID_ASICREV,
13519 &prod_id_asic_rev);
13520 else
13521 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13522 &prod_id_asic_rev);
13524 tp->pci_chip_rev_id = prod_id_asic_rev;
13527 /* Wrong chip ID in 5752 A0. This code can be removed later
13528 * as A0 is not in production.
13530 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13531 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13533 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13534 * we need to disable memory and use config. cycles
13535 * only to access all registers. The 5702/03 chips
13536 * can mistakenly decode the special cycles from the
13537 * ICH chipsets as memory write cycles, causing corruption
13538 * of register and memory space. Only certain ICH bridges
13539 * will drive special cycles with non-zero data during the
13540 * address phase which can fall within the 5703's address
13541 * range. This is not an ICH bug as the PCI spec allows
13542 * non-zero address during special cycles. However, only
13543 * these ICH bridges are known to drive non-zero addresses
13544 * during special cycles.
13546 * Since special cycles do not cross PCI bridges, we only
13547 * enable this workaround if the 5703 is on the secondary
13548 * bus of these ICH bridges.
13550 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13551 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13552 static struct tg3_dev_id {
13553 u32 vendor;
13554 u32 device;
13555 u32 rev;
13556 } ich_chipsets[] = {
13557 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13558 PCI_ANY_ID },
13559 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13560 PCI_ANY_ID },
13561 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13562 0xa },
13563 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13564 PCI_ANY_ID },
13565 { },
13567 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13568 struct pci_dev *bridge = NULL;
13570 while (pci_id->vendor != 0) {
13571 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13572 bridge);
13573 if (!bridge) {
13574 pci_id++;
13575 continue;
13577 if (pci_id->rev != PCI_ANY_ID) {
13578 if (bridge->revision > pci_id->rev)
13579 continue;
13581 if (bridge->subordinate &&
13582 (bridge->subordinate->number ==
13583 tp->pdev->bus->number)) {
13584 tg3_flag_set(tp, ICH_WORKAROUND);
13585 pci_dev_put(bridge);
13586 break;
13591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13592 static struct tg3_dev_id {
13593 u32 vendor;
13594 u32 device;
13595 } bridge_chipsets[] = {
13596 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13597 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13598 { },
13600 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13601 struct pci_dev *bridge = NULL;
13603 while (pci_id->vendor != 0) {
13604 bridge = pci_get_device(pci_id->vendor,
13605 pci_id->device,
13606 bridge);
13607 if (!bridge) {
13608 pci_id++;
13609 continue;
13611 if (bridge->subordinate &&
13612 (bridge->subordinate->number <=
13613 tp->pdev->bus->number) &&
13614 (bridge->subordinate->subordinate >=
13615 tp->pdev->bus->number)) {
13616 tg3_flag_set(tp, 5701_DMA_BUG);
13617 pci_dev_put(bridge);
13618 break;
13623 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13624 * DMA addresses > 40-bit. This bridge may have other additional
13625 * 57xx devices behind it in some 4-port NIC designs for example.
13626 * Any tg3 device found behind the bridge will also need the 40-bit
13627 * DMA workaround.
13629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13630 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13631 tg3_flag_set(tp, 5780_CLASS);
13632 tg3_flag_set(tp, 40BIT_DMA_BUG);
13633 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13634 } else {
13635 struct pci_dev *bridge = NULL;
13637 do {
13638 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13639 PCI_DEVICE_ID_SERVERWORKS_EPB,
13640 bridge);
13641 if (bridge && bridge->subordinate &&
13642 (bridge->subordinate->number <=
13643 tp->pdev->bus->number) &&
13644 (bridge->subordinate->subordinate >=
13645 tp->pdev->bus->number)) {
13646 tg3_flag_set(tp, 40BIT_DMA_BUG);
13647 pci_dev_put(bridge);
13648 break;
13650 } while (bridge);
13653 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13656 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13657 tp->pdev_peer = tg3_find_peer(tp);
13659 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13662 tg3_flag_set(tp, 5717_PLUS);
13664 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13665 tg3_flag(tp, 5717_PLUS))
13666 tg3_flag_set(tp, 57765_PLUS);
13668 /* Intentionally exclude ASIC_REV_5906 */
13669 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13670 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13672 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13673 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13675 tg3_flag(tp, 57765_PLUS))
13676 tg3_flag_set(tp, 5755_PLUS);
13678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13680 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13681 tg3_flag(tp, 5755_PLUS) ||
13682 tg3_flag(tp, 5780_CLASS))
13683 tg3_flag_set(tp, 5750_PLUS);
13685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13686 tg3_flag(tp, 5750_PLUS))
13687 tg3_flag_set(tp, 5705_PLUS);
13689 /* Determine TSO capabilities */
13690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13691 ; /* Do nothing. HW bug. */
13692 else if (tg3_flag(tp, 57765_PLUS))
13693 tg3_flag_set(tp, HW_TSO_3);
13694 else if (tg3_flag(tp, 5755_PLUS) ||
13695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13696 tg3_flag_set(tp, HW_TSO_2);
13697 else if (tg3_flag(tp, 5750_PLUS)) {
13698 tg3_flag_set(tp, HW_TSO_1);
13699 tg3_flag_set(tp, TSO_BUG);
13700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13701 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13702 tg3_flag_clear(tp, TSO_BUG);
13703 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13704 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13705 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13706 tg3_flag_set(tp, TSO_BUG);
13707 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13708 tp->fw_needed = FIRMWARE_TG3TSO5;
13709 else
13710 tp->fw_needed = FIRMWARE_TG3TSO;
13713 /* Selectively allow TSO based on operating conditions */
13714 if (tg3_flag(tp, HW_TSO_1) ||
13715 tg3_flag(tp, HW_TSO_2) ||
13716 tg3_flag(tp, HW_TSO_3) ||
13717 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13718 tg3_flag_set(tp, TSO_CAPABLE);
13719 else {
13720 tg3_flag_clear(tp, TSO_CAPABLE);
13721 tg3_flag_clear(tp, TSO_BUG);
13722 tp->fw_needed = NULL;
13725 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13726 tp->fw_needed = FIRMWARE_TG3;
13728 tp->irq_max = 1;
13730 if (tg3_flag(tp, 5750_PLUS)) {
13731 tg3_flag_set(tp, SUPPORT_MSI);
13732 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13733 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13734 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13735 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13736 tp->pdev_peer == tp->pdev))
13737 tg3_flag_clear(tp, SUPPORT_MSI);
13739 if (tg3_flag(tp, 5755_PLUS) ||
13740 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13741 tg3_flag_set(tp, 1SHOT_MSI);
13744 if (tg3_flag(tp, 57765_PLUS)) {
13745 tg3_flag_set(tp, SUPPORT_MSIX);
13746 tp->irq_max = TG3_IRQ_MAX_VECS;
13750 if (tg3_flag(tp, 5755_PLUS))
13751 tg3_flag_set(tp, SHORT_DMA_BUG);
13753 if (tg3_flag(tp, 5717_PLUS))
13754 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13756 if (tg3_flag(tp, 57765_PLUS) &&
13757 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13758 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13760 if (!tg3_flag(tp, 5705_PLUS) ||
13761 tg3_flag(tp, 5780_CLASS) ||
13762 tg3_flag(tp, USE_JUMBO_BDFLAG))
13763 tg3_flag_set(tp, JUMBO_CAPABLE);
13765 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13766 &pci_state_reg);
13768 if (pci_is_pcie(tp->pdev)) {
13769 u16 lnkctl;
13771 tg3_flag_set(tp, PCI_EXPRESS);
13773 tp->pcie_readrq = 4096;
13774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13775 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13776 tp->pcie_readrq = 2048;
13778 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13780 pci_read_config_word(tp->pdev,
13781 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13782 &lnkctl);
13783 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13784 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13785 ASIC_REV_5906) {
13786 tg3_flag_clear(tp, HW_TSO_2);
13787 tg3_flag_clear(tp, TSO_CAPABLE);
13789 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13790 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13791 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13792 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13793 tg3_flag_set(tp, CLKREQ_BUG);
13794 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13795 tg3_flag_set(tp, L1PLLPD_EN);
13797 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13798 /* BCM5785 devices are effectively PCIe devices, and should
13799 * follow PCIe codepaths, but do not have a PCIe capabilities
13800 * section.
13802 tg3_flag_set(tp, PCI_EXPRESS);
13803 } else if (!tg3_flag(tp, 5705_PLUS) ||
13804 tg3_flag(tp, 5780_CLASS)) {
13805 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13806 if (!tp->pcix_cap) {
13807 dev_err(&tp->pdev->dev,
13808 "Cannot find PCI-X capability, aborting\n");
13809 return -EIO;
13812 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13813 tg3_flag_set(tp, PCIX_MODE);
13816 /* If we have an AMD 762 or VIA K8T800 chipset, write
13817 * reordering to the mailbox registers done by the host
13818 * controller can cause major troubles. We read back from
13819 * every mailbox register write to force the writes to be
13820 * posted to the chip in order.
13822 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13823 !tg3_flag(tp, PCI_EXPRESS))
13824 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13826 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13827 &tp->pci_cacheline_sz);
13828 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13829 &tp->pci_lat_timer);
13830 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13831 tp->pci_lat_timer < 64) {
13832 tp->pci_lat_timer = 64;
13833 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13834 tp->pci_lat_timer);
13837 /* Important! -- It is critical that the PCI-X hw workaround
13838 * situation is decided before the first MMIO register access.
13840 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13841 /* 5700 BX chips need to have their TX producer index
13842 * mailboxes written twice to workaround a bug.
13844 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13846 /* If we are in PCI-X mode, enable register write workaround.
13848 * The workaround is to use indirect register accesses
13849 * for all chip writes not to mailbox registers.
13851 if (tg3_flag(tp, PCIX_MODE)) {
13852 u32 pm_reg;
13854 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13856 /* The chip can have it's power management PCI config
13857 * space registers clobbered due to this bug.
13858 * So explicitly force the chip into D0 here.
13860 pci_read_config_dword(tp->pdev,
13861 tp->pm_cap + PCI_PM_CTRL,
13862 &pm_reg);
13863 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13864 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13865 pci_write_config_dword(tp->pdev,
13866 tp->pm_cap + PCI_PM_CTRL,
13867 pm_reg);
13869 /* Also, force SERR#/PERR# in PCI command. */
13870 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13871 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13872 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13876 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13877 tg3_flag_set(tp, PCI_HIGH_SPEED);
13878 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13879 tg3_flag_set(tp, PCI_32BIT);
13881 /* Chip-specific fixup from Broadcom driver */
13882 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13883 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13884 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13885 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13888 /* Default fast path register access methods */
13889 tp->read32 = tg3_read32;
13890 tp->write32 = tg3_write32;
13891 tp->read32_mbox = tg3_read32;
13892 tp->write32_mbox = tg3_write32;
13893 tp->write32_tx_mbox = tg3_write32;
13894 tp->write32_rx_mbox = tg3_write32;
13896 /* Various workaround register access methods */
13897 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13898 tp->write32 = tg3_write_indirect_reg32;
13899 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13900 (tg3_flag(tp, PCI_EXPRESS) &&
13901 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13903 * Back to back register writes can cause problems on these
13904 * chips, the workaround is to read back all reg writes
13905 * except those to mailbox regs.
13907 * See tg3_write_indirect_reg32().
13909 tp->write32 = tg3_write_flush_reg32;
13912 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13913 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13914 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13915 tp->write32_rx_mbox = tg3_write_flush_reg32;
13918 if (tg3_flag(tp, ICH_WORKAROUND)) {
13919 tp->read32 = tg3_read_indirect_reg32;
13920 tp->write32 = tg3_write_indirect_reg32;
13921 tp->read32_mbox = tg3_read_indirect_mbox;
13922 tp->write32_mbox = tg3_write_indirect_mbox;
13923 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13924 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13926 iounmap(tp->regs);
13927 tp->regs = NULL;
13929 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13930 pci_cmd &= ~PCI_COMMAND_MEMORY;
13931 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13934 tp->read32_mbox = tg3_read32_mbox_5906;
13935 tp->write32_mbox = tg3_write32_mbox_5906;
13936 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13937 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13940 if (tp->write32 == tg3_write_indirect_reg32 ||
13941 (tg3_flag(tp, PCIX_MODE) &&
13942 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13944 tg3_flag_set(tp, SRAM_USE_CONFIG);
13946 /* The memory arbiter has to be enabled in order for SRAM accesses
13947 * to succeed. Normally on powerup the tg3 chip firmware will make
13948 * sure it is enabled, but other entities such as system netboot
13949 * code might disable it.
13951 val = tr32(MEMARB_MODE);
13952 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
13954 /* Get eeprom hw config before calling tg3_set_power_state().
13955 * In particular, the TG3_FLAG_IS_NIC flag must be
13956 * determined before calling tg3_set_power_state() so that
13957 * we know whether or not to switch out of Vaux power.
13958 * When the flag is set, it means that GPIO1 is used for eeprom
13959 * write protect and also implies that it is a LOM where GPIOs
13960 * are not used to switch power.
13962 tg3_get_eeprom_hw_cfg(tp);
13964 if (tg3_flag(tp, ENABLE_APE)) {
13965 /* Allow reads and writes to the
13966 * APE register and memory space.
13968 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13969 PCISTATE_ALLOW_APE_SHMEM_WR |
13970 PCISTATE_ALLOW_APE_PSPACE_WR;
13971 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13972 pci_state_reg);
13974 tg3_ape_lock_init(tp);
13977 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13981 tg3_flag(tp, 57765_PLUS))
13982 tg3_flag_set(tp, CPMU_PRESENT);
13984 /* Set up tp->grc_local_ctrl before calling
13985 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
13986 * will bring 5700's external PHY out of reset.
13987 * It is also used as eeprom write protect on LOMs.
13989 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13991 tg3_flag(tp, EEPROM_WRITE_PROT))
13992 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13993 GRC_LCLCTRL_GPIO_OUTPUT1);
13994 /* Unused GPIO3 must be driven as output on 5752 because there
13995 * are no pull-up resistors on unused GPIO pins.
13997 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13998 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14003 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14005 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14006 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14007 /* Turn off the debug UART. */
14008 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14009 if (tg3_flag(tp, IS_NIC))
14010 /* Keep VMain power. */
14011 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14012 GRC_LCLCTRL_GPIO_OUTPUT0;
14015 /* Switch out of Vaux if it is a NIC */
14016 tg3_pwrsrc_switch_to_vmain(tp);
14018 /* Derive initial jumbo mode from MTU assigned in
14019 * ether_setup() via the alloc_etherdev() call
14021 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14022 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14024 /* Determine WakeOnLan speed to use. */
14025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14026 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14027 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14028 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14029 tg3_flag_clear(tp, WOL_SPEED_100MB);
14030 } else {
14031 tg3_flag_set(tp, WOL_SPEED_100MB);
14034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14035 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14037 /* A few boards don't want Ethernet@WireSpeed phy feature */
14038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14039 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14040 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14041 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14042 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14043 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14044 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14046 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14047 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14048 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14049 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14050 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14052 if (tg3_flag(tp, 5705_PLUS) &&
14053 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14054 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14055 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14056 !tg3_flag(tp, 57765_PLUS)) {
14057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14061 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14062 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14063 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14064 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14065 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14066 } else
14067 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14070 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14071 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14072 tp->phy_otp = tg3_read_otp_phycfg(tp);
14073 if (tp->phy_otp == 0)
14074 tp->phy_otp = TG3_OTP_DEFAULT;
14077 if (tg3_flag(tp, CPMU_PRESENT))
14078 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14079 else
14080 tp->mi_mode = MAC_MI_MODE_BASE;
14082 tp->coalesce_mode = 0;
14083 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14084 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14085 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14087 /* Set these bits to enable statistics workaround. */
14088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14089 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14090 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14091 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14092 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14096 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14097 tg3_flag_set(tp, USE_PHYLIB);
14099 err = tg3_mdio_init(tp);
14100 if (err)
14101 return err;
14103 /* Initialize data/descriptor byte/word swapping. */
14104 val = tr32(GRC_MODE);
14105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14106 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14107 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14108 GRC_MODE_B2HRX_ENABLE |
14109 GRC_MODE_HTX2B_ENABLE |
14110 GRC_MODE_HOST_STACKUP);
14111 else
14112 val &= GRC_MODE_HOST_STACKUP;
14114 tw32(GRC_MODE, val | tp->grc_mode);
14116 tg3_switch_clocks(tp);
14118 /* Clear this out for sanity. */
14119 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14121 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14122 &pci_state_reg);
14123 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14124 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14125 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14127 if (chiprevid == CHIPREV_ID_5701_A0 ||
14128 chiprevid == CHIPREV_ID_5701_B0 ||
14129 chiprevid == CHIPREV_ID_5701_B2 ||
14130 chiprevid == CHIPREV_ID_5701_B5) {
14131 void __iomem *sram_base;
14133 /* Write some dummy words into the SRAM status block
14134 * area, see if it reads back correctly. If the return
14135 * value is bad, force enable the PCIX workaround.
14137 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14139 writel(0x00000000, sram_base);
14140 writel(0x00000000, sram_base + 4);
14141 writel(0xffffffff, sram_base + 4);
14142 if (readl(sram_base) != 0x00000000)
14143 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14147 udelay(50);
14148 tg3_nvram_init(tp);
14150 grc_misc_cfg = tr32(GRC_MISC_CFG);
14151 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14153 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14154 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14155 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14156 tg3_flag_set(tp, IS_5788);
14158 if (!tg3_flag(tp, IS_5788) &&
14159 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14160 tg3_flag_set(tp, TAGGED_STATUS);
14161 if (tg3_flag(tp, TAGGED_STATUS)) {
14162 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14163 HOSTCC_MODE_CLRTICK_TXBD);
14165 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14166 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14167 tp->misc_host_ctrl);
14170 /* Preserve the APE MAC_MODE bits */
14171 if (tg3_flag(tp, ENABLE_APE))
14172 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14173 else
14174 tp->mac_mode = TG3_DEF_MAC_MODE;
14176 /* these are limited to 10/100 only */
14177 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14178 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14179 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14180 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14181 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14182 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14183 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14184 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14185 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14186 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14187 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14188 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14189 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14190 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14191 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14192 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14194 err = tg3_phy_probe(tp);
14195 if (err) {
14196 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14197 /* ... but do not return immediately ... */
14198 tg3_mdio_fini(tp);
14201 tg3_read_vpd(tp);
14202 tg3_read_fw_ver(tp);
14204 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14205 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14206 } else {
14207 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14208 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14209 else
14210 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14213 /* 5700 {AX,BX} chips have a broken status block link
14214 * change bit implementation, so we must use the
14215 * status register in those cases.
14217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14218 tg3_flag_set(tp, USE_LINKCHG_REG);
14219 else
14220 tg3_flag_clear(tp, USE_LINKCHG_REG);
14222 /* The led_ctrl is set during tg3_phy_probe, here we might
14223 * have to force the link status polling mechanism based
14224 * upon subsystem IDs.
14226 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14228 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14229 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14230 tg3_flag_set(tp, USE_LINKCHG_REG);
14233 /* For all SERDES we poll the MAC status register. */
14234 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14235 tg3_flag_set(tp, POLL_SERDES);
14236 else
14237 tg3_flag_clear(tp, POLL_SERDES);
14239 tp->rx_offset = NET_IP_ALIGN;
14240 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14242 tg3_flag(tp, PCIX_MODE)) {
14243 tp->rx_offset = 0;
14244 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14245 tp->rx_copy_thresh = ~(u16)0;
14246 #endif
14249 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14250 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14251 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14253 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14255 /* Increment the rx prod index on the rx std ring by at most
14256 * 8 for these chips to workaround hw errata.
14258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14259 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14261 tp->rx_std_max_post = 8;
14263 if (tg3_flag(tp, ASPM_WORKAROUND))
14264 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14265 PCIE_PWR_MGMT_L1_THRESH_MSK;
14267 return err;
14270 #ifdef CONFIG_SPARC
14271 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14273 struct net_device *dev = tp->dev;
14274 struct pci_dev *pdev = tp->pdev;
14275 struct device_node *dp = pci_device_to_OF_node(pdev);
14276 const unsigned char *addr;
14277 int len;
14279 addr = of_get_property(dp, "local-mac-address", &len);
14280 if (addr && len == 6) {
14281 memcpy(dev->dev_addr, addr, 6);
14282 memcpy(dev->perm_addr, dev->dev_addr, 6);
14283 return 0;
14285 return -ENODEV;
14288 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14290 struct net_device *dev = tp->dev;
14292 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14293 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14294 return 0;
14296 #endif
14298 static int __devinit tg3_get_device_address(struct tg3 *tp)
14300 struct net_device *dev = tp->dev;
14301 u32 hi, lo, mac_offset;
14302 int addr_ok = 0;
14304 #ifdef CONFIG_SPARC
14305 if (!tg3_get_macaddr_sparc(tp))
14306 return 0;
14307 #endif
14309 mac_offset = 0x7c;
14310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14311 tg3_flag(tp, 5780_CLASS)) {
14312 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14313 mac_offset = 0xcc;
14314 if (tg3_nvram_lock(tp))
14315 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14316 else
14317 tg3_nvram_unlock(tp);
14318 } else if (tg3_flag(tp, 5717_PLUS)) {
14319 if (PCI_FUNC(tp->pdev->devfn) & 1)
14320 mac_offset = 0xcc;
14321 if (PCI_FUNC(tp->pdev->devfn) > 1)
14322 mac_offset += 0x18c;
14323 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14324 mac_offset = 0x10;
14326 /* First try to get it from MAC address mailbox. */
14327 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14328 if ((hi >> 16) == 0x484b) {
14329 dev->dev_addr[0] = (hi >> 8) & 0xff;
14330 dev->dev_addr[1] = (hi >> 0) & 0xff;
14332 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14333 dev->dev_addr[2] = (lo >> 24) & 0xff;
14334 dev->dev_addr[3] = (lo >> 16) & 0xff;
14335 dev->dev_addr[4] = (lo >> 8) & 0xff;
14336 dev->dev_addr[5] = (lo >> 0) & 0xff;
14338 /* Some old bootcode may report a 0 MAC address in SRAM */
14339 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14341 if (!addr_ok) {
14342 /* Next, try NVRAM. */
14343 if (!tg3_flag(tp, NO_NVRAM) &&
14344 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14345 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14346 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14347 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14349 /* Finally just fetch it out of the MAC control regs. */
14350 else {
14351 hi = tr32(MAC_ADDR_0_HIGH);
14352 lo = tr32(MAC_ADDR_0_LOW);
14354 dev->dev_addr[5] = lo & 0xff;
14355 dev->dev_addr[4] = (lo >> 8) & 0xff;
14356 dev->dev_addr[3] = (lo >> 16) & 0xff;
14357 dev->dev_addr[2] = (lo >> 24) & 0xff;
14358 dev->dev_addr[1] = hi & 0xff;
14359 dev->dev_addr[0] = (hi >> 8) & 0xff;
14363 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14364 #ifdef CONFIG_SPARC
14365 if (!tg3_get_default_macaddr_sparc(tp))
14366 return 0;
14367 #endif
14368 return -EINVAL;
14370 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14371 return 0;
14374 #define BOUNDARY_SINGLE_CACHELINE 1
14375 #define BOUNDARY_MULTI_CACHELINE 2
14377 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14379 int cacheline_size;
14380 u8 byte;
14381 int goal;
14383 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14384 if (byte == 0)
14385 cacheline_size = 1024;
14386 else
14387 cacheline_size = (int) byte * 4;
14389 /* On 5703 and later chips, the boundary bits have no
14390 * effect.
14392 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14393 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14394 !tg3_flag(tp, PCI_EXPRESS))
14395 goto out;
14397 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14398 goal = BOUNDARY_MULTI_CACHELINE;
14399 #else
14400 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14401 goal = BOUNDARY_SINGLE_CACHELINE;
14402 #else
14403 goal = 0;
14404 #endif
14405 #endif
14407 if (tg3_flag(tp, 57765_PLUS)) {
14408 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14409 goto out;
14412 if (!goal)
14413 goto out;
14415 /* PCI controllers on most RISC systems tend to disconnect
14416 * when a device tries to burst across a cache-line boundary.
14417 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14419 * Unfortunately, for PCI-E there are only limited
14420 * write-side controls for this, and thus for reads
14421 * we will still get the disconnects. We'll also waste
14422 * these PCI cycles for both read and write for chips
14423 * other than 5700 and 5701 which do not implement the
14424 * boundary bits.
14426 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14427 switch (cacheline_size) {
14428 case 16:
14429 case 32:
14430 case 64:
14431 case 128:
14432 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14433 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14434 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14435 } else {
14436 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14437 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14439 break;
14441 case 256:
14442 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14443 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14444 break;
14446 default:
14447 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14448 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14449 break;
14451 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14452 switch (cacheline_size) {
14453 case 16:
14454 case 32:
14455 case 64:
14456 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14457 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14458 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14459 break;
14461 /* fallthrough */
14462 case 128:
14463 default:
14464 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14465 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14466 break;
14468 } else {
14469 switch (cacheline_size) {
14470 case 16:
14471 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14472 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14473 DMA_RWCTRL_WRITE_BNDRY_16);
14474 break;
14476 /* fallthrough */
14477 case 32:
14478 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14479 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14480 DMA_RWCTRL_WRITE_BNDRY_32);
14481 break;
14483 /* fallthrough */
14484 case 64:
14485 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14486 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14487 DMA_RWCTRL_WRITE_BNDRY_64);
14488 break;
14490 /* fallthrough */
14491 case 128:
14492 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14493 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14494 DMA_RWCTRL_WRITE_BNDRY_128);
14495 break;
14497 /* fallthrough */
14498 case 256:
14499 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14500 DMA_RWCTRL_WRITE_BNDRY_256);
14501 break;
14502 case 512:
14503 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14504 DMA_RWCTRL_WRITE_BNDRY_512);
14505 break;
14506 case 1024:
14507 default:
14508 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14509 DMA_RWCTRL_WRITE_BNDRY_1024);
14510 break;
14514 out:
14515 return val;
14518 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14520 struct tg3_internal_buffer_desc test_desc;
14521 u32 sram_dma_descs;
14522 int i, ret;
14524 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14526 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14527 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14528 tw32(RDMAC_STATUS, 0);
14529 tw32(WDMAC_STATUS, 0);
14531 tw32(BUFMGR_MODE, 0);
14532 tw32(FTQ_RESET, 0);
14534 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14535 test_desc.addr_lo = buf_dma & 0xffffffff;
14536 test_desc.nic_mbuf = 0x00002100;
14537 test_desc.len = size;
14540 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14541 * the *second* time the tg3 driver was getting loaded after an
14542 * initial scan.
14544 * Broadcom tells me:
14545 * ...the DMA engine is connected to the GRC block and a DMA
14546 * reset may affect the GRC block in some unpredictable way...
14547 * The behavior of resets to individual blocks has not been tested.
14549 * Broadcom noted the GRC reset will also reset all sub-components.
14551 if (to_device) {
14552 test_desc.cqid_sqid = (13 << 8) | 2;
14554 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14555 udelay(40);
14556 } else {
14557 test_desc.cqid_sqid = (16 << 8) | 7;
14559 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14560 udelay(40);
14562 test_desc.flags = 0x00000005;
14564 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14565 u32 val;
14567 val = *(((u32 *)&test_desc) + i);
14568 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14569 sram_dma_descs + (i * sizeof(u32)));
14570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14572 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14574 if (to_device)
14575 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14576 else
14577 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14579 ret = -ENODEV;
14580 for (i = 0; i < 40; i++) {
14581 u32 val;
14583 if (to_device)
14584 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14585 else
14586 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14587 if ((val & 0xffff) == sram_dma_descs) {
14588 ret = 0;
14589 break;
14592 udelay(100);
14595 return ret;
14598 #define TEST_BUFFER_SIZE 0x2000
14600 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14601 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14602 { },
14605 static int __devinit tg3_test_dma(struct tg3 *tp)
14607 dma_addr_t buf_dma;
14608 u32 *buf, saved_dma_rwctrl;
14609 int ret = 0;
14611 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14612 &buf_dma, GFP_KERNEL);
14613 if (!buf) {
14614 ret = -ENOMEM;
14615 goto out_nofree;
14618 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14619 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14621 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14623 if (tg3_flag(tp, 57765_PLUS))
14624 goto out;
14626 if (tg3_flag(tp, PCI_EXPRESS)) {
14627 /* DMA read watermark not used on PCIE */
14628 tp->dma_rwctrl |= 0x00180000;
14629 } else if (!tg3_flag(tp, PCIX_MODE)) {
14630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14632 tp->dma_rwctrl |= 0x003f0000;
14633 else
14634 tp->dma_rwctrl |= 0x003f000f;
14635 } else {
14636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14638 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14639 u32 read_water = 0x7;
14641 /* If the 5704 is behind the EPB bridge, we can
14642 * do the less restrictive ONE_DMA workaround for
14643 * better performance.
14645 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14646 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14647 tp->dma_rwctrl |= 0x8000;
14648 else if (ccval == 0x6 || ccval == 0x7)
14649 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14651 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14652 read_water = 4;
14653 /* Set bit 23 to enable PCIX hw bug fix */
14654 tp->dma_rwctrl |=
14655 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14656 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14657 (1 << 23);
14658 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14659 /* 5780 always in PCIX mode */
14660 tp->dma_rwctrl |= 0x00144000;
14661 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14662 /* 5714 always in PCIX mode */
14663 tp->dma_rwctrl |= 0x00148000;
14664 } else {
14665 tp->dma_rwctrl |= 0x001b000f;
14669 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14670 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14671 tp->dma_rwctrl &= 0xfffffff0;
14673 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14675 /* Remove this if it causes problems for some boards. */
14676 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14678 /* On 5700/5701 chips, we need to set this bit.
14679 * Otherwise the chip will issue cacheline transactions
14680 * to streamable DMA memory with not all the byte
14681 * enables turned on. This is an error on several
14682 * RISC PCI controllers, in particular sparc64.
14684 * On 5703/5704 chips, this bit has been reassigned
14685 * a different meaning. In particular, it is used
14686 * on those chips to enable a PCI-X workaround.
14688 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14691 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14693 #if 0
14694 /* Unneeded, already done by tg3_get_invariants. */
14695 tg3_switch_clocks(tp);
14696 #endif
14698 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14699 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14700 goto out;
14702 /* It is best to perform DMA test with maximum write burst size
14703 * to expose the 5700/5701 write DMA bug.
14705 saved_dma_rwctrl = tp->dma_rwctrl;
14706 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14707 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14709 while (1) {
14710 u32 *p = buf, i;
14712 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14713 p[i] = i;
14715 /* Send the buffer to the chip. */
14716 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14717 if (ret) {
14718 dev_err(&tp->pdev->dev,
14719 "%s: Buffer write failed. err = %d\n",
14720 __func__, ret);
14721 break;
14724 #if 0
14725 /* validate data reached card RAM correctly. */
14726 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14727 u32 val;
14728 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14729 if (le32_to_cpu(val) != p[i]) {
14730 dev_err(&tp->pdev->dev,
14731 "%s: Buffer corrupted on device! "
14732 "(%d != %d)\n", __func__, val, i);
14733 /* ret = -ENODEV here? */
14735 p[i] = 0;
14737 #endif
14738 /* Now read it back. */
14739 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14740 if (ret) {
14741 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14742 "err = %d\n", __func__, ret);
14743 break;
14746 /* Verify it. */
14747 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14748 if (p[i] == i)
14749 continue;
14751 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14752 DMA_RWCTRL_WRITE_BNDRY_16) {
14753 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14754 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14755 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14756 break;
14757 } else {
14758 dev_err(&tp->pdev->dev,
14759 "%s: Buffer corrupted on read back! "
14760 "(%d != %d)\n", __func__, p[i], i);
14761 ret = -ENODEV;
14762 goto out;
14766 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14767 /* Success. */
14768 ret = 0;
14769 break;
14772 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14773 DMA_RWCTRL_WRITE_BNDRY_16) {
14774 /* DMA test passed without adjusting DMA boundary,
14775 * now look for chipsets that are known to expose the
14776 * DMA bug without failing the test.
14778 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14779 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14780 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14781 } else {
14782 /* Safe to use the calculated DMA boundary. */
14783 tp->dma_rwctrl = saved_dma_rwctrl;
14786 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14789 out:
14790 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14791 out_nofree:
14792 return ret;
14795 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14797 if (tg3_flag(tp, 57765_PLUS)) {
14798 tp->bufmgr_config.mbuf_read_dma_low_water =
14799 DEFAULT_MB_RDMA_LOW_WATER_5705;
14800 tp->bufmgr_config.mbuf_mac_rx_low_water =
14801 DEFAULT_MB_MACRX_LOW_WATER_57765;
14802 tp->bufmgr_config.mbuf_high_water =
14803 DEFAULT_MB_HIGH_WATER_57765;
14805 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14806 DEFAULT_MB_RDMA_LOW_WATER_5705;
14807 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14808 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14809 tp->bufmgr_config.mbuf_high_water_jumbo =
14810 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14811 } else if (tg3_flag(tp, 5705_PLUS)) {
14812 tp->bufmgr_config.mbuf_read_dma_low_water =
14813 DEFAULT_MB_RDMA_LOW_WATER_5705;
14814 tp->bufmgr_config.mbuf_mac_rx_low_water =
14815 DEFAULT_MB_MACRX_LOW_WATER_5705;
14816 tp->bufmgr_config.mbuf_high_water =
14817 DEFAULT_MB_HIGH_WATER_5705;
14818 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14819 tp->bufmgr_config.mbuf_mac_rx_low_water =
14820 DEFAULT_MB_MACRX_LOW_WATER_5906;
14821 tp->bufmgr_config.mbuf_high_water =
14822 DEFAULT_MB_HIGH_WATER_5906;
14825 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14826 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14827 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14828 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14829 tp->bufmgr_config.mbuf_high_water_jumbo =
14830 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14831 } else {
14832 tp->bufmgr_config.mbuf_read_dma_low_water =
14833 DEFAULT_MB_RDMA_LOW_WATER;
14834 tp->bufmgr_config.mbuf_mac_rx_low_water =
14835 DEFAULT_MB_MACRX_LOW_WATER;
14836 tp->bufmgr_config.mbuf_high_water =
14837 DEFAULT_MB_HIGH_WATER;
14839 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14840 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14841 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14842 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14843 tp->bufmgr_config.mbuf_high_water_jumbo =
14844 DEFAULT_MB_HIGH_WATER_JUMBO;
14847 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14848 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14851 static char * __devinit tg3_phy_string(struct tg3 *tp)
14853 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14854 case TG3_PHY_ID_BCM5400: return "5400";
14855 case TG3_PHY_ID_BCM5401: return "5401";
14856 case TG3_PHY_ID_BCM5411: return "5411";
14857 case TG3_PHY_ID_BCM5701: return "5701";
14858 case TG3_PHY_ID_BCM5703: return "5703";
14859 case TG3_PHY_ID_BCM5704: return "5704";
14860 case TG3_PHY_ID_BCM5705: return "5705";
14861 case TG3_PHY_ID_BCM5750: return "5750";
14862 case TG3_PHY_ID_BCM5752: return "5752";
14863 case TG3_PHY_ID_BCM5714: return "5714";
14864 case TG3_PHY_ID_BCM5780: return "5780";
14865 case TG3_PHY_ID_BCM5755: return "5755";
14866 case TG3_PHY_ID_BCM5787: return "5787";
14867 case TG3_PHY_ID_BCM5784: return "5784";
14868 case TG3_PHY_ID_BCM5756: return "5722/5756";
14869 case TG3_PHY_ID_BCM5906: return "5906";
14870 case TG3_PHY_ID_BCM5761: return "5761";
14871 case TG3_PHY_ID_BCM5718C: return "5718C";
14872 case TG3_PHY_ID_BCM5718S: return "5718S";
14873 case TG3_PHY_ID_BCM57765: return "57765";
14874 case TG3_PHY_ID_BCM5719C: return "5719C";
14875 case TG3_PHY_ID_BCM5720C: return "5720C";
14876 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14877 case 0: return "serdes";
14878 default: return "unknown";
14882 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14884 if (tg3_flag(tp, PCI_EXPRESS)) {
14885 strcpy(str, "PCI Express");
14886 return str;
14887 } else if (tg3_flag(tp, PCIX_MODE)) {
14888 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14890 strcpy(str, "PCIX:");
14892 if ((clock_ctrl == 7) ||
14893 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14894 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14895 strcat(str, "133MHz");
14896 else if (clock_ctrl == 0)
14897 strcat(str, "33MHz");
14898 else if (clock_ctrl == 2)
14899 strcat(str, "50MHz");
14900 else if (clock_ctrl == 4)
14901 strcat(str, "66MHz");
14902 else if (clock_ctrl == 6)
14903 strcat(str, "100MHz");
14904 } else {
14905 strcpy(str, "PCI:");
14906 if (tg3_flag(tp, PCI_HIGH_SPEED))
14907 strcat(str, "66MHz");
14908 else
14909 strcat(str, "33MHz");
14911 if (tg3_flag(tp, PCI_32BIT))
14912 strcat(str, ":32-bit");
14913 else
14914 strcat(str, ":64-bit");
14915 return str;
14918 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14920 struct pci_dev *peer;
14921 unsigned int func, devnr = tp->pdev->devfn & ~7;
14923 for (func = 0; func < 8; func++) {
14924 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14925 if (peer && peer != tp->pdev)
14926 break;
14927 pci_dev_put(peer);
14929 /* 5704 can be configured in single-port mode, set peer to
14930 * tp->pdev in that case.
14932 if (!peer) {
14933 peer = tp->pdev;
14934 return peer;
14938 * We don't need to keep the refcount elevated; there's no way
14939 * to remove one half of this device without removing the other
14941 pci_dev_put(peer);
14943 return peer;
14946 static void __devinit tg3_init_coal(struct tg3 *tp)
14948 struct ethtool_coalesce *ec = &tp->coal;
14950 memset(ec, 0, sizeof(*ec));
14951 ec->cmd = ETHTOOL_GCOALESCE;
14952 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14953 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14954 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14955 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14956 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14957 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14958 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14959 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14960 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14962 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14963 HOSTCC_MODE_CLRTICK_TXBD)) {
14964 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14965 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14966 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14967 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14970 if (tg3_flag(tp, 5705_PLUS)) {
14971 ec->rx_coalesce_usecs_irq = 0;
14972 ec->tx_coalesce_usecs_irq = 0;
14973 ec->stats_block_coalesce_usecs = 0;
14977 static const struct net_device_ops tg3_netdev_ops = {
14978 .ndo_open = tg3_open,
14979 .ndo_stop = tg3_close,
14980 .ndo_start_xmit = tg3_start_xmit,
14981 .ndo_get_stats64 = tg3_get_stats64,
14982 .ndo_validate_addr = eth_validate_addr,
14983 .ndo_set_multicast_list = tg3_set_rx_mode,
14984 .ndo_set_mac_address = tg3_set_mac_addr,
14985 .ndo_do_ioctl = tg3_ioctl,
14986 .ndo_tx_timeout = tg3_tx_timeout,
14987 .ndo_change_mtu = tg3_change_mtu,
14988 .ndo_fix_features = tg3_fix_features,
14989 .ndo_set_features = tg3_set_features,
14990 #ifdef CONFIG_NET_POLL_CONTROLLER
14991 .ndo_poll_controller = tg3_poll_controller,
14992 #endif
14995 static int __devinit tg3_init_one(struct pci_dev *pdev,
14996 const struct pci_device_id *ent)
14998 struct net_device *dev;
14999 struct tg3 *tp;
15000 int i, err, pm_cap;
15001 u32 sndmbx, rcvmbx, intmbx;
15002 char str[40];
15003 u64 dma_mask, persist_dma_mask;
15004 u32 features = 0;
15006 printk_once(KERN_INFO "%s\n", version);
15008 err = pci_enable_device(pdev);
15009 if (err) {
15010 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15011 return err;
15014 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15015 if (err) {
15016 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15017 goto err_out_disable_pdev;
15020 pci_set_master(pdev);
15022 /* Find power-management capability. */
15023 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15024 if (pm_cap == 0) {
15025 dev_err(&pdev->dev,
15026 "Cannot find Power Management capability, aborting\n");
15027 err = -EIO;
15028 goto err_out_free_res;
15031 err = pci_set_power_state(pdev, PCI_D0);
15032 if (err) {
15033 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15034 goto err_out_free_res;
15037 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15038 if (!dev) {
15039 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15040 err = -ENOMEM;
15041 goto err_out_power_down;
15044 SET_NETDEV_DEV(dev, &pdev->dev);
15046 tp = netdev_priv(dev);
15047 tp->pdev = pdev;
15048 tp->dev = dev;
15049 tp->pm_cap = pm_cap;
15050 tp->rx_mode = TG3_DEF_RX_MODE;
15051 tp->tx_mode = TG3_DEF_TX_MODE;
15053 if (tg3_debug > 0)
15054 tp->msg_enable = tg3_debug;
15055 else
15056 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15058 /* The word/byte swap controls here control register access byte
15059 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15060 * setting below.
15062 tp->misc_host_ctrl =
15063 MISC_HOST_CTRL_MASK_PCI_INT |
15064 MISC_HOST_CTRL_WORD_SWAP |
15065 MISC_HOST_CTRL_INDIR_ACCESS |
15066 MISC_HOST_CTRL_PCISTATE_RW;
15068 /* The NONFRM (non-frame) byte/word swap controls take effect
15069 * on descriptor entries, anything which isn't packet data.
15071 * The StrongARM chips on the board (one for tx, one for rx)
15072 * are running in big-endian mode.
15074 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15075 GRC_MODE_WSWAP_NONFRM_DATA);
15076 #ifdef __BIG_ENDIAN
15077 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15078 #endif
15079 spin_lock_init(&tp->lock);
15080 spin_lock_init(&tp->indirect_lock);
15081 INIT_WORK(&tp->reset_task, tg3_reset_task);
15083 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15084 if (!tp->regs) {
15085 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15086 err = -ENOMEM;
15087 goto err_out_free_dev;
15090 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15091 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15092 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15093 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15094 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15095 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15096 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15098 tg3_flag_set(tp, ENABLE_APE);
15099 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15100 if (!tp->aperegs) {
15101 dev_err(&pdev->dev,
15102 "Cannot map APE registers, aborting\n");
15103 err = -ENOMEM;
15104 goto err_out_iounmap;
15108 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15109 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15111 dev->ethtool_ops = &tg3_ethtool_ops;
15112 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15113 dev->netdev_ops = &tg3_netdev_ops;
15114 dev->irq = pdev->irq;
15116 err = tg3_get_invariants(tp);
15117 if (err) {
15118 dev_err(&pdev->dev,
15119 "Problem fetching invariants of chip, aborting\n");
15120 goto err_out_apeunmap;
15123 /* The EPB bridge inside 5714, 5715, and 5780 and any
15124 * device behind the EPB cannot support DMA addresses > 40-bit.
15125 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15126 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15127 * do DMA address check in tg3_start_xmit().
15129 if (tg3_flag(tp, IS_5788))
15130 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15131 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15132 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15133 #ifdef CONFIG_HIGHMEM
15134 dma_mask = DMA_BIT_MASK(64);
15135 #endif
15136 } else
15137 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15139 /* Configure DMA attributes. */
15140 if (dma_mask > DMA_BIT_MASK(32)) {
15141 err = pci_set_dma_mask(pdev, dma_mask);
15142 if (!err) {
15143 features |= NETIF_F_HIGHDMA;
15144 err = pci_set_consistent_dma_mask(pdev,
15145 persist_dma_mask);
15146 if (err < 0) {
15147 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15148 "DMA for consistent allocations\n");
15149 goto err_out_apeunmap;
15153 if (err || dma_mask == DMA_BIT_MASK(32)) {
15154 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15155 if (err) {
15156 dev_err(&pdev->dev,
15157 "No usable DMA configuration, aborting\n");
15158 goto err_out_apeunmap;
15162 tg3_init_bufmgr_config(tp);
15164 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15166 /* 5700 B0 chips do not support checksumming correctly due
15167 * to hardware bugs.
15169 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15170 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15172 if (tg3_flag(tp, 5755_PLUS))
15173 features |= NETIF_F_IPV6_CSUM;
15176 /* TSO is on by default on chips that support hardware TSO.
15177 * Firmware TSO on older chips gives lower performance, so it
15178 * is off by default, but can be enabled using ethtool.
15180 if ((tg3_flag(tp, HW_TSO_1) ||
15181 tg3_flag(tp, HW_TSO_2) ||
15182 tg3_flag(tp, HW_TSO_3)) &&
15183 (features & NETIF_F_IP_CSUM))
15184 features |= NETIF_F_TSO;
15185 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15186 if (features & NETIF_F_IPV6_CSUM)
15187 features |= NETIF_F_TSO6;
15188 if (tg3_flag(tp, HW_TSO_3) ||
15189 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15190 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15191 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15194 features |= NETIF_F_TSO_ECN;
15197 dev->features |= features;
15198 dev->vlan_features |= features;
15201 * Add loopback capability only for a subset of devices that support
15202 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15203 * loopback for the remaining devices.
15205 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15206 !tg3_flag(tp, CPMU_PRESENT))
15207 /* Add the loopback capability */
15208 features |= NETIF_F_LOOPBACK;
15210 dev->hw_features |= features;
15212 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15213 !tg3_flag(tp, TSO_CAPABLE) &&
15214 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15215 tg3_flag_set(tp, MAX_RXPEND_64);
15216 tp->rx_pending = 63;
15219 err = tg3_get_device_address(tp);
15220 if (err) {
15221 dev_err(&pdev->dev,
15222 "Could not obtain valid ethernet address, aborting\n");
15223 goto err_out_apeunmap;
15227 * Reset chip in case UNDI or EFI driver did not shutdown
15228 * DMA self test will enable WDMAC and we'll see (spurious)
15229 * pending DMA on the PCI bus at that point.
15231 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15232 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15233 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15234 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15237 err = tg3_test_dma(tp);
15238 if (err) {
15239 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15240 goto err_out_apeunmap;
15243 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15244 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15245 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15246 for (i = 0; i < tp->irq_max; i++) {
15247 struct tg3_napi *tnapi = &tp->napi[i];
15249 tnapi->tp = tp;
15250 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15252 tnapi->int_mbox = intmbx;
15253 if (i < 4)
15254 intmbx += 0x8;
15255 else
15256 intmbx += 0x4;
15258 tnapi->consmbox = rcvmbx;
15259 tnapi->prodmbox = sndmbx;
15261 if (i)
15262 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15263 else
15264 tnapi->coal_now = HOSTCC_MODE_NOW;
15266 if (!tg3_flag(tp, SUPPORT_MSIX))
15267 break;
15270 * If we support MSIX, we'll be using RSS. If we're using
15271 * RSS, the first vector only handles link interrupts and the
15272 * remaining vectors handle rx and tx interrupts. Reuse the
15273 * mailbox values for the next iteration. The values we setup
15274 * above are still useful for the single vectored mode.
15276 if (!i)
15277 continue;
15279 rcvmbx += 0x8;
15281 if (sndmbx & 0x4)
15282 sndmbx -= 0x4;
15283 else
15284 sndmbx += 0xc;
15287 tg3_init_coal(tp);
15289 pci_set_drvdata(pdev, dev);
15291 err = register_netdev(dev);
15292 if (err) {
15293 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15294 goto err_out_apeunmap;
15297 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15298 tp->board_part_number,
15299 tp->pci_chip_rev_id,
15300 tg3_bus_string(tp, str),
15301 dev->dev_addr);
15303 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15304 struct phy_device *phydev;
15305 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15306 netdev_info(dev,
15307 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15308 phydev->drv->name, dev_name(&phydev->dev));
15309 } else {
15310 char *ethtype;
15312 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15313 ethtype = "10/100Base-TX";
15314 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15315 ethtype = "1000Base-SX";
15316 else
15317 ethtype = "10/100/1000Base-T";
15319 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15320 "(WireSpeed[%d], EEE[%d])\n",
15321 tg3_phy_string(tp), ethtype,
15322 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15323 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15326 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15327 (dev->features & NETIF_F_RXCSUM) != 0,
15328 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15329 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15330 tg3_flag(tp, ENABLE_ASF) != 0,
15331 tg3_flag(tp, TSO_CAPABLE) != 0);
15332 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15333 tp->dma_rwctrl,
15334 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15335 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15337 pci_save_state(pdev);
15339 return 0;
15341 err_out_apeunmap:
15342 if (tp->aperegs) {
15343 iounmap(tp->aperegs);
15344 tp->aperegs = NULL;
15347 err_out_iounmap:
15348 if (tp->regs) {
15349 iounmap(tp->regs);
15350 tp->regs = NULL;
15353 err_out_free_dev:
15354 free_netdev(dev);
15356 err_out_power_down:
15357 pci_set_power_state(pdev, PCI_D3hot);
15359 err_out_free_res:
15360 pci_release_regions(pdev);
15362 err_out_disable_pdev:
15363 pci_disable_device(pdev);
15364 pci_set_drvdata(pdev, NULL);
15365 return err;
15368 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15370 struct net_device *dev = pci_get_drvdata(pdev);
15372 if (dev) {
15373 struct tg3 *tp = netdev_priv(dev);
15375 if (tp->fw)
15376 release_firmware(tp->fw);
15378 cancel_work_sync(&tp->reset_task);
15380 if (!tg3_flag(tp, USE_PHYLIB)) {
15381 tg3_phy_fini(tp);
15382 tg3_mdio_fini(tp);
15385 unregister_netdev(dev);
15386 if (tp->aperegs) {
15387 iounmap(tp->aperegs);
15388 tp->aperegs = NULL;
15390 if (tp->regs) {
15391 iounmap(tp->regs);
15392 tp->regs = NULL;
15394 free_netdev(dev);
15395 pci_release_regions(pdev);
15396 pci_disable_device(pdev);
15397 pci_set_drvdata(pdev, NULL);
15401 #ifdef CONFIG_PM_SLEEP
15402 static int tg3_suspend(struct device *device)
15404 struct pci_dev *pdev = to_pci_dev(device);
15405 struct net_device *dev = pci_get_drvdata(pdev);
15406 struct tg3 *tp = netdev_priv(dev);
15407 int err;
15409 if (!netif_running(dev))
15410 return 0;
15412 flush_work_sync(&tp->reset_task);
15413 tg3_phy_stop(tp);
15414 tg3_netif_stop(tp);
15416 del_timer_sync(&tp->timer);
15418 tg3_full_lock(tp, 1);
15419 tg3_disable_ints(tp);
15420 tg3_full_unlock(tp);
15422 netif_device_detach(dev);
15424 tg3_full_lock(tp, 0);
15425 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15426 tg3_flag_clear(tp, INIT_COMPLETE);
15427 tg3_full_unlock(tp);
15429 err = tg3_power_down_prepare(tp);
15430 if (err) {
15431 int err2;
15433 tg3_full_lock(tp, 0);
15435 tg3_flag_set(tp, INIT_COMPLETE);
15436 err2 = tg3_restart_hw(tp, 1);
15437 if (err2)
15438 goto out;
15440 tp->timer.expires = jiffies + tp->timer_offset;
15441 add_timer(&tp->timer);
15443 netif_device_attach(dev);
15444 tg3_netif_start(tp);
15446 out:
15447 tg3_full_unlock(tp);
15449 if (!err2)
15450 tg3_phy_start(tp);
15453 return err;
15456 static int tg3_resume(struct device *device)
15458 struct pci_dev *pdev = to_pci_dev(device);
15459 struct net_device *dev = pci_get_drvdata(pdev);
15460 struct tg3 *tp = netdev_priv(dev);
15461 int err;
15463 if (!netif_running(dev))
15464 return 0;
15466 netif_device_attach(dev);
15468 tg3_full_lock(tp, 0);
15470 tg3_flag_set(tp, INIT_COMPLETE);
15471 err = tg3_restart_hw(tp, 1);
15472 if (err)
15473 goto out;
15475 tp->timer.expires = jiffies + tp->timer_offset;
15476 add_timer(&tp->timer);
15478 tg3_netif_start(tp);
15480 out:
15481 tg3_full_unlock(tp);
15483 if (!err)
15484 tg3_phy_start(tp);
15486 return err;
15489 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15490 #define TG3_PM_OPS (&tg3_pm_ops)
15492 #else
15494 #define TG3_PM_OPS NULL
15496 #endif /* CONFIG_PM_SLEEP */
15499 * tg3_io_error_detected - called when PCI error is detected
15500 * @pdev: Pointer to PCI device
15501 * @state: The current pci connection state
15503 * This function is called after a PCI bus error affecting
15504 * this device has been detected.
15506 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15507 pci_channel_state_t state)
15509 struct net_device *netdev = pci_get_drvdata(pdev);
15510 struct tg3 *tp = netdev_priv(netdev);
15511 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15513 netdev_info(netdev, "PCI I/O error detected\n");
15515 rtnl_lock();
15517 if (!netif_running(netdev))
15518 goto done;
15520 tg3_phy_stop(tp);
15522 tg3_netif_stop(tp);
15524 del_timer_sync(&tp->timer);
15525 tg3_flag_clear(tp, RESTART_TIMER);
15527 /* Want to make sure that the reset task doesn't run */
15528 cancel_work_sync(&tp->reset_task);
15529 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15530 tg3_flag_clear(tp, RESTART_TIMER);
15532 netif_device_detach(netdev);
15534 /* Clean up software state, even if MMIO is blocked */
15535 tg3_full_lock(tp, 0);
15536 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15537 tg3_full_unlock(tp);
15539 done:
15540 if (state == pci_channel_io_perm_failure)
15541 err = PCI_ERS_RESULT_DISCONNECT;
15542 else
15543 pci_disable_device(pdev);
15545 rtnl_unlock();
15547 return err;
15551 * tg3_io_slot_reset - called after the pci bus has been reset.
15552 * @pdev: Pointer to PCI device
15554 * Restart the card from scratch, as if from a cold-boot.
15555 * At this point, the card has exprienced a hard reset,
15556 * followed by fixups by BIOS, and has its config space
15557 * set up identically to what it was at cold boot.
15559 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15561 struct net_device *netdev = pci_get_drvdata(pdev);
15562 struct tg3 *tp = netdev_priv(netdev);
15563 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15564 int err;
15566 rtnl_lock();
15568 if (pci_enable_device(pdev)) {
15569 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15570 goto done;
15573 pci_set_master(pdev);
15574 pci_restore_state(pdev);
15575 pci_save_state(pdev);
15577 if (!netif_running(netdev)) {
15578 rc = PCI_ERS_RESULT_RECOVERED;
15579 goto done;
15582 err = tg3_power_up(tp);
15583 if (err)
15584 goto done;
15586 rc = PCI_ERS_RESULT_RECOVERED;
15588 done:
15589 rtnl_unlock();
15591 return rc;
15595 * tg3_io_resume - called when traffic can start flowing again.
15596 * @pdev: Pointer to PCI device
15598 * This callback is called when the error recovery driver tells
15599 * us that its OK to resume normal operation.
15601 static void tg3_io_resume(struct pci_dev *pdev)
15603 struct net_device *netdev = pci_get_drvdata(pdev);
15604 struct tg3 *tp = netdev_priv(netdev);
15605 int err;
15607 rtnl_lock();
15609 if (!netif_running(netdev))
15610 goto done;
15612 tg3_full_lock(tp, 0);
15613 tg3_flag_set(tp, INIT_COMPLETE);
15614 err = tg3_restart_hw(tp, 1);
15615 tg3_full_unlock(tp);
15616 if (err) {
15617 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15618 goto done;
15621 netif_device_attach(netdev);
15623 tp->timer.expires = jiffies + tp->timer_offset;
15624 add_timer(&tp->timer);
15626 tg3_netif_start(tp);
15628 tg3_phy_start(tp);
15630 done:
15631 rtnl_unlock();
15634 static struct pci_error_handlers tg3_err_handler = {
15635 .error_detected = tg3_io_error_detected,
15636 .slot_reset = tg3_io_slot_reset,
15637 .resume = tg3_io_resume
15640 static struct pci_driver tg3_driver = {
15641 .name = DRV_MODULE_NAME,
15642 .id_table = tg3_pci_tbl,
15643 .probe = tg3_init_one,
15644 .remove = __devexit_p(tg3_remove_one),
15645 .err_handler = &tg3_err_handler,
15646 .driver.pm = TG3_PM_OPS,
15649 static int __init tg3_init(void)
15651 return pci_register_driver(&tg3_driver);
15654 static void __exit tg3_cleanup(void)
15656 pci_unregister_driver(&tg3_driver);
15659 module_init(tg3_init);
15660 module_exit(tg3_cleanup);