bnx2: Read iSCSI config from shared memory during ->probe()
[linux-2.6.git] / drivers / net / tg3.c
bloba5ff82d3b7509eea9cc3797dabc4a9f5d19b721e
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
49 #include <net/ip.h>
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
61 #define BAR_0 0
62 #define BAR_2 2
64 #include "tg3.h"
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75 set_bit(flag, bits);
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
101 (NETIF_MSG_DRV | \
102 NETIF_MSG_PROBE | \
103 NETIF_MSG_LINK | \
104 NETIF_MSG_TIMER | \
105 NETIF_MSG_IFDOWN | \
106 NETIF_MSG_IFUP | \
107 NETIF_MSG_RX_ERR | \
108 NETIF_MSG_TX_ERR)
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
112 /* length of time before we decide the hardware is borked,
113 * and dev->tx_timeout() should be called to fix the problem
116 #define TG3_TX_TIMEOUT (5 * HZ)
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU 60
120 #define TG3_MAX_MTU(tp) \
121 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124 * You can't change the ring sizes, but you can change where you place
125 * them in the NIC onboard memory.
127 #define TG3_RX_STD_RING_SIZE(tp) \
128 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING 200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
135 #define TG3_RSS_INDIR_TBL_SIZE 128
137 /* Do not place this n-ring entries value into the tp struct itself,
138 * we really want to expose these constants to GCC so that modulo et
139 * al. operations are done with shifts and masks instead of with
140 * hw multiply/modulo instructions. Another solution would be to
141 * replace things like '% foo' with '& (foo - 1)'.
144 #define TG3_TX_RING_SIZE 512
145 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
147 #define TG3_RX_STD_RING_BYTES(tp) \
148 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
154 TG3_TX_RING_SIZE)
155 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157 #define TG3_DMA_BYTE_ENAB 64
159 #define TG3_RX_STD_DMA_SZ 1536
160 #define TG3_RX_JMB_DMA_SZ 9046
162 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
164 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174 * that are at least dword aligned when used in PCIX mode. The driver
175 * works around this bug by double copying the packet. This workaround
176 * is built into the normal double copy length check for efficiency.
178 * However, the double copy is only necessary on those architectures
179 * where unaligned memory accesses are inefficient. For those architectures
180 * where unaligned memory accesses incur little penalty, we can reintegrate
181 * the 5701 in the normal rx path. Doing so saves a device structure
182 * dereference by hardcoding the double copy threshold in place.
184 #define TG3_RX_COPY_THRESHOLD 256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
187 #else
188 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189 #endif
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
194 #define TG3_RAW_IP_ALIGN 2
196 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
198 #define FIRMWARE_TG3 "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
202 static char version[] __devinitdata =
203 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
213 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
304 static const struct {
305 const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307 { "rx_octets" },
308 { "rx_fragments" },
309 { "rx_ucast_packets" },
310 { "rx_mcast_packets" },
311 { "rx_bcast_packets" },
312 { "rx_fcs_errors" },
313 { "rx_align_errors" },
314 { "rx_xon_pause_rcvd" },
315 { "rx_xoff_pause_rcvd" },
316 { "rx_mac_ctrl_rcvd" },
317 { "rx_xoff_entered" },
318 { "rx_frame_too_long_errors" },
319 { "rx_jabbers" },
320 { "rx_undersize_packets" },
321 { "rx_in_length_errors" },
322 { "rx_out_length_errors" },
323 { "rx_64_or_less_octet_packets" },
324 { "rx_65_to_127_octet_packets" },
325 { "rx_128_to_255_octet_packets" },
326 { "rx_256_to_511_octet_packets" },
327 { "rx_512_to_1023_octet_packets" },
328 { "rx_1024_to_1522_octet_packets" },
329 { "rx_1523_to_2047_octet_packets" },
330 { "rx_2048_to_4095_octet_packets" },
331 { "rx_4096_to_8191_octet_packets" },
332 { "rx_8192_to_9022_octet_packets" },
334 { "tx_octets" },
335 { "tx_collisions" },
337 { "tx_xon_sent" },
338 { "tx_xoff_sent" },
339 { "tx_flow_control" },
340 { "tx_mac_errors" },
341 { "tx_single_collisions" },
342 { "tx_mult_collisions" },
343 { "tx_deferred" },
344 { "tx_excessive_collisions" },
345 { "tx_late_collisions" },
346 { "tx_collide_2times" },
347 { "tx_collide_3times" },
348 { "tx_collide_4times" },
349 { "tx_collide_5times" },
350 { "tx_collide_6times" },
351 { "tx_collide_7times" },
352 { "tx_collide_8times" },
353 { "tx_collide_9times" },
354 { "tx_collide_10times" },
355 { "tx_collide_11times" },
356 { "tx_collide_12times" },
357 { "tx_collide_13times" },
358 { "tx_collide_14times" },
359 { "tx_collide_15times" },
360 { "tx_ucast_packets" },
361 { "tx_mcast_packets" },
362 { "tx_bcast_packets" },
363 { "tx_carrier_sense_errors" },
364 { "tx_discards" },
365 { "tx_errors" },
367 { "dma_writeq_full" },
368 { "dma_write_prioq_full" },
369 { "rxbds_empty" },
370 { "rx_discards" },
371 { "rx_errors" },
372 { "rx_threshold_hit" },
374 { "dma_readq_full" },
375 { "dma_read_prioq_full" },
376 { "tx_comp_queue_full" },
378 { "ring_set_send_prod_index" },
379 { "ring_status_update" },
380 { "nic_irqs" },
381 { "nic_avoided_irqs" },
382 { "nic_tx_threshold_hit" },
384 { "mbuf_lwm_thresh_hit" },
387 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
390 static const struct {
391 const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393 { "nvram test (online) " },
394 { "link test (online) " },
395 { "register test (offline)" },
396 { "memory test (offline)" },
397 { "loopback test (offline)" },
398 { "interrupt test (offline)" },
401 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
406 writel(val, tp->regs + off);
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
411 return readl(tp->regs + off);
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
416 writel(val, tp->aperegs + off);
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
421 return readl(tp->aperegs + off);
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
426 unsigned long flags;
428 spin_lock_irqsave(&tp->indirect_lock, flags);
429 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431 spin_unlock_irqrestore(&tp->indirect_lock, flags);
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
436 writel(val, tp->regs + off);
437 readl(tp->regs + off);
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
442 unsigned long flags;
443 u32 val;
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 return val;
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
454 unsigned long flags;
456 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458 TG3_64BIT_REG_LOW, val);
459 return;
461 if (off == TG3_RX_STD_PROD_IDX_REG) {
462 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463 TG3_64BIT_REG_LOW, val);
464 return;
467 spin_lock_irqsave(&tp->indirect_lock, flags);
468 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470 spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 /* In indirect mode when disabling interrupts, we also need
473 * to clear the interrupt bit in the GRC local ctrl register.
475 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476 (val == 0x1)) {
477 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
484 unsigned long flags;
485 u32 val;
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 return val;
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495 * where it is unsafe to read back the register without some delay.
496 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
501 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502 /* Non-posted methods */
503 tp->write32(tp, off, val);
504 else {
505 /* Posted method */
506 tg3_write32(tp, off, val);
507 if (usec_wait)
508 udelay(usec_wait);
509 tp->read32(tp, off);
511 /* Wait again after the read for the posted method to guarantee that
512 * the wait time is met.
514 if (usec_wait)
515 udelay(usec_wait);
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
520 tp->write32_mbox(tp, off, val);
521 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522 tp->read32_mbox(tp, off);
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
527 void __iomem *mbox = tp->regs + off;
528 writel(val, mbox);
529 if (tg3_flag(tp, TXD_MBOX_HWBUG))
530 writel(val, mbox);
531 if (tg3_flag(tp, MBOX_WRITE_REORDER))
532 readl(mbox);
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
537 return readl(tp->regs + off + GRCMBOX_BASE);
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
542 writel(val, tp->regs + off + GRCMBOX_BASE);
545 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
551 #define tw32(reg, val) tp->write32(tp, reg, val)
552 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg) tp->read32(tp, reg)
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
558 unsigned long flags;
560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562 return;
564 spin_lock_irqsave(&tp->indirect_lock, flags);
565 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
569 /* Always leave this as zero. */
570 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571 } else {
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573 tw32_f(TG3PCI_MEM_WIN_DATA, val);
575 /* Always leave this as zero. */
576 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
578 spin_unlock_irqrestore(&tp->indirect_lock, flags);
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
583 unsigned long flags;
585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587 *val = 0;
588 return;
591 spin_lock_irqsave(&tp->indirect_lock, flags);
592 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
596 /* Always leave this as zero. */
597 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598 } else {
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600 *val = tr32(TG3PCI_MEM_WIN_DATA);
602 /* Always leave this as zero. */
603 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
605 spin_unlock_irqrestore(&tp->indirect_lock, flags);
608 static void tg3_ape_lock_init(struct tg3 *tp)
610 int i;
611 u32 regbase, bit;
613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614 regbase = TG3_APE_LOCK_GRANT;
615 else
616 regbase = TG3_APE_PER_LOCK_GRANT;
618 /* Make sure the driver hasn't any stale locks. */
619 for (i = 0; i < 8; i++) {
620 if (i == TG3_APE_LOCK_GPIO)
621 continue;
622 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
625 /* Clear the correct bit of the GPIO lock too. */
626 if (!tp->pci_fn)
627 bit = APE_LOCK_GRANT_DRIVER;
628 else
629 bit = 1 << tp->pci_fn;
631 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
634 static int tg3_ape_lock(struct tg3 *tp, int locknum)
636 int i, off;
637 int ret = 0;
638 u32 status, req, gnt, bit;
640 if (!tg3_flag(tp, ENABLE_APE))
641 return 0;
643 switch (locknum) {
644 case TG3_APE_LOCK_GPIO:
645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646 return 0;
647 case TG3_APE_LOCK_GRC:
648 case TG3_APE_LOCK_MEM:
649 break;
650 default:
651 return -EINVAL;
654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655 req = TG3_APE_LOCK_REQ;
656 gnt = TG3_APE_LOCK_GRANT;
657 } else {
658 req = TG3_APE_PER_LOCK_REQ;
659 gnt = TG3_APE_PER_LOCK_GRANT;
662 off = 4 * locknum;
664 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665 bit = APE_LOCK_REQ_DRIVER;
666 else
667 bit = 1 << tp->pci_fn;
669 tg3_ape_write32(tp, req + off, bit);
671 /* Wait for up to 1 millisecond to acquire lock. */
672 for (i = 0; i < 100; i++) {
673 status = tg3_ape_read32(tp, gnt + off);
674 if (status == bit)
675 break;
676 udelay(10);
679 if (status != bit) {
680 /* Revoke the lock request. */
681 tg3_ape_write32(tp, gnt + off, bit);
682 ret = -EBUSY;
685 return ret;
688 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
690 u32 gnt, bit;
692 if (!tg3_flag(tp, ENABLE_APE))
693 return;
695 switch (locknum) {
696 case TG3_APE_LOCK_GPIO:
697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698 return;
699 case TG3_APE_LOCK_GRC:
700 case TG3_APE_LOCK_MEM:
701 break;
702 default:
703 return;
706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707 gnt = TG3_APE_LOCK_GRANT;
708 else
709 gnt = TG3_APE_PER_LOCK_GRANT;
711 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712 bit = APE_LOCK_GRANT_DRIVER;
713 else
714 bit = 1 << tp->pci_fn;
716 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
719 static void tg3_disable_ints(struct tg3 *tp)
721 int i;
723 tw32(TG3PCI_MISC_HOST_CTRL,
724 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
725 for (i = 0; i < tp->irq_max; i++)
726 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
729 static void tg3_enable_ints(struct tg3 *tp)
731 int i;
733 tp->irq_sync = 0;
734 wmb();
736 tw32(TG3PCI_MISC_HOST_CTRL,
737 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
739 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
740 for (i = 0; i < tp->irq_cnt; i++) {
741 struct tg3_napi *tnapi = &tp->napi[i];
743 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
744 if (tg3_flag(tp, 1SHOT_MSI))
745 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
747 tp->coal_now |= tnapi->coal_now;
750 /* Force an initial interrupt */
751 if (!tg3_flag(tp, TAGGED_STATUS) &&
752 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754 else
755 tw32(HOSTCC_MODE, tp->coal_now);
757 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
760 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
762 struct tg3 *tp = tnapi->tp;
763 struct tg3_hw_status *sblk = tnapi->hw_status;
764 unsigned int work_exists = 0;
766 /* check for phy events */
767 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
768 if (sblk->status & SD_STATUS_LINK_CHG)
769 work_exists = 1;
771 /* check for RX/TX work to do */
772 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
773 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
774 work_exists = 1;
776 return work_exists;
779 /* tg3_int_reenable
780 * similar to tg3_enable_ints, but it accurately determines whether there
781 * is new work pending and can return without flushing the PIO write
782 * which reenables interrupts
784 static void tg3_int_reenable(struct tg3_napi *tnapi)
786 struct tg3 *tp = tnapi->tp;
788 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
789 mmiowb();
791 /* When doing tagged status, this work check is unnecessary.
792 * The last_tag we write above tells the chip which piece of
793 * work we've completed.
795 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
796 tw32(HOSTCC_MODE, tp->coalesce_mode |
797 HOSTCC_MODE_ENABLE | tnapi->coal_now);
800 static void tg3_switch_clocks(struct tg3 *tp)
802 u32 clock_ctrl;
803 u32 orig_clock_ctrl;
805 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
806 return;
808 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
810 orig_clock_ctrl = clock_ctrl;
811 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812 CLOCK_CTRL_CLKRUN_OENABLE |
813 0x1f);
814 tp->pci_clock_ctrl = clock_ctrl;
816 if (tg3_flag(tp, 5705_PLUS)) {
817 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
818 tw32_wait_f(TG3PCI_CLOCK_CTRL,
819 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
821 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
822 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823 clock_ctrl |
824 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825 40);
826 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827 clock_ctrl | (CLOCK_CTRL_ALTCLK),
828 40);
830 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
833 #define PHY_BUSY_LOOPS 5000
835 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
837 u32 frame_val;
838 unsigned int loops;
839 int ret;
841 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842 tw32_f(MAC_MI_MODE,
843 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844 udelay(80);
847 *val = 0x0;
849 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850 MI_COM_PHY_ADDR_MASK);
851 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852 MI_COM_REG_ADDR_MASK);
853 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
855 tw32_f(MAC_MI_COM, frame_val);
857 loops = PHY_BUSY_LOOPS;
858 while (loops != 0) {
859 udelay(10);
860 frame_val = tr32(MAC_MI_COM);
862 if ((frame_val & MI_COM_BUSY) == 0) {
863 udelay(5);
864 frame_val = tr32(MAC_MI_COM);
865 break;
867 loops -= 1;
870 ret = -EBUSY;
871 if (loops != 0) {
872 *val = frame_val & MI_COM_DATA_MASK;
873 ret = 0;
876 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877 tw32_f(MAC_MI_MODE, tp->mi_mode);
878 udelay(80);
881 return ret;
884 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
886 u32 frame_val;
887 unsigned int loops;
888 int ret;
890 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
891 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
892 return 0;
894 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895 tw32_f(MAC_MI_MODE,
896 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897 udelay(80);
900 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
901 MI_COM_PHY_ADDR_MASK);
902 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903 MI_COM_REG_ADDR_MASK);
904 frame_val |= (val & MI_COM_DATA_MASK);
905 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
907 tw32_f(MAC_MI_COM, frame_val);
909 loops = PHY_BUSY_LOOPS;
910 while (loops != 0) {
911 udelay(10);
912 frame_val = tr32(MAC_MI_COM);
913 if ((frame_val & MI_COM_BUSY) == 0) {
914 udelay(5);
915 frame_val = tr32(MAC_MI_COM);
916 break;
918 loops -= 1;
921 ret = -EBUSY;
922 if (loops != 0)
923 ret = 0;
925 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926 tw32_f(MAC_MI_MODE, tp->mi_mode);
927 udelay(80);
930 return ret;
933 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
935 int err;
937 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938 if (err)
939 goto done;
941 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942 if (err)
943 goto done;
945 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947 if (err)
948 goto done;
950 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
952 done:
953 return err;
956 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
958 int err;
960 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961 if (err)
962 goto done;
964 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965 if (err)
966 goto done;
968 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970 if (err)
971 goto done;
973 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
975 done:
976 return err;
979 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
981 int err;
983 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984 if (!err)
985 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
987 return err;
990 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
992 int err;
994 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995 if (!err)
996 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
998 return err;
1001 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1003 int err;
1005 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007 MII_TG3_AUXCTL_SHDWSEL_MISC);
1008 if (!err)
1009 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1011 return err;
1014 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1016 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017 set |= MII_TG3_AUXCTL_MISC_WREN;
1019 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025 MII_TG3_AUXCTL_ACTL_TX_6DB)
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029 MII_TG3_AUXCTL_ACTL_TX_6DB);
1031 static int tg3_bmcr_reset(struct tg3 *tp)
1033 u32 phy_control;
1034 int limit, err;
1036 /* OK, reset it, and poll the BMCR_RESET bit until it
1037 * clears or we time out.
1039 phy_control = BMCR_RESET;
1040 err = tg3_writephy(tp, MII_BMCR, phy_control);
1041 if (err != 0)
1042 return -EBUSY;
1044 limit = 5000;
1045 while (limit--) {
1046 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047 if (err != 0)
1048 return -EBUSY;
1050 if ((phy_control & BMCR_RESET) == 0) {
1051 udelay(40);
1052 break;
1054 udelay(10);
1056 if (limit < 0)
1057 return -EBUSY;
1059 return 0;
1062 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1064 struct tg3 *tp = bp->priv;
1065 u32 val;
1067 spin_lock_bh(&tp->lock);
1069 if (tg3_readphy(tp, reg, &val))
1070 val = -EIO;
1072 spin_unlock_bh(&tp->lock);
1074 return val;
1077 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1079 struct tg3 *tp = bp->priv;
1080 u32 ret = 0;
1082 spin_lock_bh(&tp->lock);
1084 if (tg3_writephy(tp, reg, val))
1085 ret = -EIO;
1087 spin_unlock_bh(&tp->lock);
1089 return ret;
1092 static int tg3_mdio_reset(struct mii_bus *bp)
1094 return 0;
1097 static void tg3_mdio_config_5785(struct tg3 *tp)
1099 u32 val;
1100 struct phy_device *phydev;
1102 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104 case PHY_ID_BCM50610:
1105 case PHY_ID_BCM50610M:
1106 val = MAC_PHYCFG2_50610_LED_MODES;
1107 break;
1108 case PHY_ID_BCMAC131:
1109 val = MAC_PHYCFG2_AC131_LED_MODES;
1110 break;
1111 case PHY_ID_RTL8211C:
1112 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113 break;
1114 case PHY_ID_RTL8201E:
1115 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116 break;
1117 default:
1118 return;
1121 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122 tw32(MAC_PHYCFG2, val);
1124 val = tr32(MAC_PHYCFG1);
1125 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1128 tw32(MAC_PHYCFG1, val);
1130 return;
1133 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1134 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135 MAC_PHYCFG2_FMODE_MASK_MASK |
1136 MAC_PHYCFG2_GMODE_MASK_MASK |
1137 MAC_PHYCFG2_ACT_MASK_MASK |
1138 MAC_PHYCFG2_QUAL_MASK_MASK |
1139 MAC_PHYCFG2_INBAND_ENABLE;
1141 tw32(MAC_PHYCFG2, val);
1143 val = tr32(MAC_PHYCFG1);
1144 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1146 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1148 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1149 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1150 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1152 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154 tw32(MAC_PHYCFG1, val);
1156 val = tr32(MAC_EXT_RGMII_MODE);
1157 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158 MAC_RGMII_MODE_RX_QUALITY |
1159 MAC_RGMII_MODE_RX_ACTIVITY |
1160 MAC_RGMII_MODE_RX_ENG_DET |
1161 MAC_RGMII_MODE_TX_ENABLE |
1162 MAC_RGMII_MODE_TX_LOWPWR |
1163 MAC_RGMII_MODE_TX_RESET);
1164 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1166 val |= MAC_RGMII_MODE_RX_INT_B |
1167 MAC_RGMII_MODE_RX_QUALITY |
1168 MAC_RGMII_MODE_RX_ACTIVITY |
1169 MAC_RGMII_MODE_RX_ENG_DET;
1170 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1171 val |= MAC_RGMII_MODE_TX_ENABLE |
1172 MAC_RGMII_MODE_TX_LOWPWR |
1173 MAC_RGMII_MODE_TX_RESET;
1175 tw32(MAC_EXT_RGMII_MODE, val);
1178 static void tg3_mdio_start(struct tg3 *tp)
1180 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181 tw32_f(MAC_MI_MODE, tp->mi_mode);
1182 udelay(80);
1184 if (tg3_flag(tp, MDIOBUS_INITED) &&
1185 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186 tg3_mdio_config_5785(tp);
1189 static int tg3_mdio_init(struct tg3 *tp)
1191 int i;
1192 u32 reg;
1193 struct phy_device *phydev;
1195 if (tg3_flag(tp, 5717_PLUS)) {
1196 u32 is_serdes;
1198 tp->phy_addr = tp->pci_fn + 1;
1200 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202 else
1203 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204 TG3_CPMU_PHY_STRAP_IS_SERDES;
1205 if (is_serdes)
1206 tp->phy_addr += 7;
1207 } else
1208 tp->phy_addr = TG3_PHY_MII_ADDR;
1210 tg3_mdio_start(tp);
1212 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1213 return 0;
1215 tp->mdio_bus = mdiobus_alloc();
1216 if (tp->mdio_bus == NULL)
1217 return -ENOMEM;
1219 tp->mdio_bus->name = "tg3 mdio bus";
1220 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1221 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1222 tp->mdio_bus->priv = tp;
1223 tp->mdio_bus->parent = &tp->pdev->dev;
1224 tp->mdio_bus->read = &tg3_mdio_read;
1225 tp->mdio_bus->write = &tg3_mdio_write;
1226 tp->mdio_bus->reset = &tg3_mdio_reset;
1227 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1228 tp->mdio_bus->irq = &tp->mdio_irq[0];
1230 for (i = 0; i < PHY_MAX_ADDR; i++)
1231 tp->mdio_bus->irq[i] = PHY_POLL;
1233 /* The bus registration will look for all the PHYs on the mdio bus.
1234 * Unfortunately, it does not ensure the PHY is powered up before
1235 * accessing the PHY ID registers. A chip reset is the
1236 * quickest way to bring the device back to an operational state..
1238 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239 tg3_bmcr_reset(tp);
1241 i = mdiobus_register(tp->mdio_bus);
1242 if (i) {
1243 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1244 mdiobus_free(tp->mdio_bus);
1245 return i;
1248 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1250 if (!phydev || !phydev->drv) {
1251 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1252 mdiobus_unregister(tp->mdio_bus);
1253 mdiobus_free(tp->mdio_bus);
1254 return -ENODEV;
1257 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1258 case PHY_ID_BCM57780:
1259 phydev->interface = PHY_INTERFACE_MODE_GMII;
1260 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1261 break;
1262 case PHY_ID_BCM50610:
1263 case PHY_ID_BCM50610M:
1264 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1265 PHY_BRCM_RX_REFCLK_UNUSED |
1266 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1267 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1268 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1269 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1270 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1271 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1272 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1273 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1274 /* fallthru */
1275 case PHY_ID_RTL8211C:
1276 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1277 break;
1278 case PHY_ID_RTL8201E:
1279 case PHY_ID_BCMAC131:
1280 phydev->interface = PHY_INTERFACE_MODE_MII;
1281 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1282 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1283 break;
1286 tg3_flag_set(tp, MDIOBUS_INITED);
1288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289 tg3_mdio_config_5785(tp);
1291 return 0;
1294 static void tg3_mdio_fini(struct tg3 *tp)
1296 if (tg3_flag(tp, MDIOBUS_INITED)) {
1297 tg3_flag_clear(tp, MDIOBUS_INITED);
1298 mdiobus_unregister(tp->mdio_bus);
1299 mdiobus_free(tp->mdio_bus);
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3 *tp)
1306 u32 val;
1308 val = tr32(GRC_RX_CPU_EVENT);
1309 val |= GRC_RX_CPU_DRIVER_EVENT;
1310 tw32_f(GRC_RX_CPU_EVENT, val);
1312 tp->last_event_jiffies = jiffies;
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3 *tp)
1320 int i;
1321 unsigned int delay_cnt;
1322 long time_remain;
1324 /* If enough time has passed, no wait is necessary. */
1325 time_remain = (long)(tp->last_event_jiffies + 1 +
1326 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327 (long)jiffies;
1328 if (time_remain < 0)
1329 return;
1331 /* Check if we can shorten the wait time. */
1332 delay_cnt = jiffies_to_usecs(time_remain);
1333 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335 delay_cnt = (delay_cnt >> 3) + 1;
1337 for (i = 0; i < delay_cnt; i++) {
1338 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339 break;
1340 udelay(8);
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3 *tp)
1347 u32 reg;
1348 u32 val;
1350 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1351 return;
1353 tg3_wait_for_event_ack(tp);
1355 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1357 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1359 val = 0;
1360 if (!tg3_readphy(tp, MII_BMCR, &reg))
1361 val = reg << 16;
1362 if (!tg3_readphy(tp, MII_BMSR, &reg))
1363 val |= (reg & 0xffff);
1364 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1366 val = 0;
1367 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368 val = reg << 16;
1369 if (!tg3_readphy(tp, MII_LPA, &reg))
1370 val |= (reg & 0xffff);
1371 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1373 val = 0;
1374 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1375 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376 val = reg << 16;
1377 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378 val |= (reg & 0xffff);
1380 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1382 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383 val = reg << 16;
1384 else
1385 val = 0;
1386 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1388 tg3_generate_fw_event(tp);
1391 static void tg3_link_report(struct tg3 *tp)
1393 if (!netif_carrier_ok(tp->dev)) {
1394 netif_info(tp, link, tp->dev, "Link is down\n");
1395 tg3_ump_link_report(tp);
1396 } else if (netif_msg_link(tp)) {
1397 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398 (tp->link_config.active_speed == SPEED_1000 ?
1399 1000 :
1400 (tp->link_config.active_speed == SPEED_100 ?
1401 100 : 10)),
1402 (tp->link_config.active_duplex == DUPLEX_FULL ?
1403 "full" : "half"));
1405 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407 "on" : "off",
1408 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409 "on" : "off");
1411 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412 netdev_info(tp->dev, "EEE is %s\n",
1413 tp->setlpicnt ? "enabled" : "disabled");
1415 tg3_ump_link_report(tp);
1419 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1421 u16 miireg;
1423 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1424 miireg = ADVERTISE_PAUSE_CAP;
1425 else if (flow_ctrl & FLOW_CTRL_TX)
1426 miireg = ADVERTISE_PAUSE_ASYM;
1427 else if (flow_ctrl & FLOW_CTRL_RX)
1428 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429 else
1430 miireg = 0;
1432 return miireg;
1435 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1437 u16 miireg;
1439 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1440 miireg = ADVERTISE_1000XPAUSE;
1441 else if (flow_ctrl & FLOW_CTRL_TX)
1442 miireg = ADVERTISE_1000XPSE_ASYM;
1443 else if (flow_ctrl & FLOW_CTRL_RX)
1444 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445 else
1446 miireg = 0;
1448 return miireg;
1451 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1453 u8 cap = 0;
1455 if (lcladv & ADVERTISE_1000XPAUSE) {
1456 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457 if (rmtadv & LPA_1000XPAUSE)
1458 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1459 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1460 cap = FLOW_CTRL_RX;
1461 } else {
1462 if (rmtadv & LPA_1000XPAUSE)
1463 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1465 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1467 cap = FLOW_CTRL_TX;
1470 return cap;
1473 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1475 u8 autoneg;
1476 u8 flowctrl = 0;
1477 u32 old_rx_mode = tp->rx_mode;
1478 u32 old_tx_mode = tp->tx_mode;
1480 if (tg3_flag(tp, USE_PHYLIB))
1481 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1482 else
1483 autoneg = tp->link_config.autoneg;
1485 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1486 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1487 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1488 else
1489 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1490 } else
1491 flowctrl = tp->link_config.flowctrl;
1493 tp->link_config.active_flowctrl = flowctrl;
1495 if (flowctrl & FLOW_CTRL_RX)
1496 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497 else
1498 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1500 if (old_rx_mode != tp->rx_mode)
1501 tw32_f(MAC_RX_MODE, tp->rx_mode);
1503 if (flowctrl & FLOW_CTRL_TX)
1504 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505 else
1506 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1508 if (old_tx_mode != tp->tx_mode)
1509 tw32_f(MAC_TX_MODE, tp->tx_mode);
1512 static void tg3_adjust_link(struct net_device *dev)
1514 u8 oldflowctrl, linkmesg = 0;
1515 u32 mac_mode, lcl_adv, rmt_adv;
1516 struct tg3 *tp = netdev_priv(dev);
1517 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1519 spin_lock_bh(&tp->lock);
1521 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522 MAC_MODE_HALF_DUPLEX);
1524 oldflowctrl = tp->link_config.active_flowctrl;
1526 if (phydev->link) {
1527 lcl_adv = 0;
1528 rmt_adv = 0;
1530 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531 mac_mode |= MAC_MODE_PORT_MODE_MII;
1532 else if (phydev->speed == SPEED_1000 ||
1533 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1534 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1535 else
1536 mac_mode |= MAC_MODE_PORT_MODE_MII;
1538 if (phydev->duplex == DUPLEX_HALF)
1539 mac_mode |= MAC_MODE_HALF_DUPLEX;
1540 else {
1541 lcl_adv = tg3_advert_flowctrl_1000T(
1542 tp->link_config.flowctrl);
1544 if (phydev->pause)
1545 rmt_adv = LPA_PAUSE_CAP;
1546 if (phydev->asym_pause)
1547 rmt_adv |= LPA_PAUSE_ASYM;
1550 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551 } else
1552 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1554 if (mac_mode != tp->mac_mode) {
1555 tp->mac_mode = mac_mode;
1556 tw32_f(MAC_MODE, tp->mac_mode);
1557 udelay(40);
1560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561 if (phydev->speed == SPEED_10)
1562 tw32(MAC_MI_STAT,
1563 MAC_MI_STAT_10MBPS_MODE |
1564 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565 else
1566 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1569 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570 tw32(MAC_TX_LENGTHS,
1571 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572 (6 << TX_LENGTHS_IPG_SHIFT) |
1573 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574 else
1575 tw32(MAC_TX_LENGTHS,
1576 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577 (6 << TX_LENGTHS_IPG_SHIFT) |
1578 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1580 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582 phydev->speed != tp->link_config.active_speed ||
1583 phydev->duplex != tp->link_config.active_duplex ||
1584 oldflowctrl != tp->link_config.active_flowctrl)
1585 linkmesg = 1;
1587 tp->link_config.active_speed = phydev->speed;
1588 tp->link_config.active_duplex = phydev->duplex;
1590 spin_unlock_bh(&tp->lock);
1592 if (linkmesg)
1593 tg3_link_report(tp);
1596 static int tg3_phy_init(struct tg3 *tp)
1598 struct phy_device *phydev;
1600 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1601 return 0;
1603 /* Bring the PHY back to a known state. */
1604 tg3_bmcr_reset(tp);
1606 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1608 /* Attach the MAC to the PHY. */
1609 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1610 phydev->dev_flags, phydev->interface);
1611 if (IS_ERR(phydev)) {
1612 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1613 return PTR_ERR(phydev);
1616 /* Mask with MAC supported features. */
1617 switch (phydev->interface) {
1618 case PHY_INTERFACE_MODE_GMII:
1619 case PHY_INTERFACE_MODE_RGMII:
1620 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1621 phydev->supported &= (PHY_GBIT_FEATURES |
1622 SUPPORTED_Pause |
1623 SUPPORTED_Asym_Pause);
1624 break;
1626 /* fallthru */
1627 case PHY_INTERFACE_MODE_MII:
1628 phydev->supported &= (PHY_BASIC_FEATURES |
1629 SUPPORTED_Pause |
1630 SUPPORTED_Asym_Pause);
1631 break;
1632 default:
1633 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1634 return -EINVAL;
1637 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1639 phydev->advertising = phydev->supported;
1641 return 0;
1644 static void tg3_phy_start(struct tg3 *tp)
1646 struct phy_device *phydev;
1648 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1649 return;
1651 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1653 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1655 phydev->speed = tp->link_config.orig_speed;
1656 phydev->duplex = tp->link_config.orig_duplex;
1657 phydev->autoneg = tp->link_config.orig_autoneg;
1658 phydev->advertising = tp->link_config.orig_advertising;
1661 phy_start(phydev);
1663 phy_start_aneg(phydev);
1666 static void tg3_phy_stop(struct tg3 *tp)
1668 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1669 return;
1671 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1674 static void tg3_phy_fini(struct tg3 *tp)
1676 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1677 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1678 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1682 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1684 u32 phytest;
1686 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687 u32 phy;
1689 tg3_writephy(tp, MII_TG3_FET_TEST,
1690 phytest | MII_TG3_FET_SHADOW_EN);
1691 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692 if (enable)
1693 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694 else
1695 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1698 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1702 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1704 u32 reg;
1706 if (!tg3_flag(tp, 5705_PLUS) ||
1707 (tg3_flag(tp, 5717_PLUS) &&
1708 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1709 return;
1711 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1712 tg3_phy_fet_toggle_apd(tp, enable);
1713 return;
1716 reg = MII_TG3_MISC_SHDW_WREN |
1717 MII_TG3_MISC_SHDW_SCR5_SEL |
1718 MII_TG3_MISC_SHDW_SCR5_LPED |
1719 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720 MII_TG3_MISC_SHDW_SCR5_SDTL |
1721 MII_TG3_MISC_SHDW_SCR5_C125OE;
1722 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1725 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1728 reg = MII_TG3_MISC_SHDW_WREN |
1729 MII_TG3_MISC_SHDW_APD_SEL |
1730 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731 if (enable)
1732 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1734 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1737 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1739 u32 phy;
1741 if (!tg3_flag(tp, 5705_PLUS) ||
1742 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1743 return;
1745 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1746 u32 ephy;
1748 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1751 tg3_writephy(tp, MII_TG3_FET_TEST,
1752 ephy | MII_TG3_FET_SHADOW_EN);
1753 if (!tg3_readphy(tp, reg, &phy)) {
1754 if (enable)
1755 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1756 else
1757 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758 tg3_writephy(tp, reg, phy);
1760 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1762 } else {
1763 int ret;
1765 ret = tg3_phy_auxctl_read(tp,
1766 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767 if (!ret) {
1768 if (enable)
1769 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770 else
1771 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1772 tg3_phy_auxctl_write(tp,
1773 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1778 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1780 int ret;
1781 u32 val;
1783 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1784 return;
1786 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787 if (!ret)
1788 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1792 static void tg3_phy_apply_otp(struct tg3 *tp)
1794 u32 otp, phy;
1796 if (!tp->phy_otp)
1797 return;
1799 otp = tp->phy_otp;
1801 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802 return;
1804 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1808 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1812 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1816 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1819 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1822 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1826 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1829 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1831 u32 val;
1833 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834 return;
1836 tp->setlpicnt = 0;
1838 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839 current_link_up == 1 &&
1840 tp->link_config.active_duplex == DUPLEX_FULL &&
1841 (tp->link_config.active_speed == SPEED_100 ||
1842 tp->link_config.active_speed == SPEED_1000)) {
1843 u32 eeectl;
1845 if (tp->link_config.active_speed == SPEED_1000)
1846 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847 else
1848 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1850 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1852 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853 TG3_CL45_D7_EEERES_STAT, &val);
1855 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1857 tp->setlpicnt = 2;
1860 if (!tp->setlpicnt) {
1861 val = tr32(TG3_CPMU_EEE_MODE);
1862 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1866 static void tg3_phy_eee_enable(struct tg3 *tp)
1868 u32 val;
1870 if (tp->link_config.active_speed == SPEED_1000 &&
1871 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1872 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1873 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1874 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1875 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1876 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1879 val = tr32(TG3_CPMU_EEE_MODE);
1880 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1883 static int tg3_wait_macro_done(struct tg3 *tp)
1885 int limit = 100;
1887 while (limit--) {
1888 u32 tmp32;
1890 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1891 if ((tmp32 & 0x1000) == 0)
1892 break;
1895 if (limit < 0)
1896 return -EBUSY;
1898 return 0;
1901 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1903 static const u32 test_pat[4][6] = {
1904 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1905 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1906 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1907 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1909 int chan;
1911 for (chan = 0; chan < 4; chan++) {
1912 int i;
1914 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1915 (chan * 0x2000) | 0x0200);
1916 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1918 for (i = 0; i < 6; i++)
1919 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1920 test_pat[chan][i]);
1922 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1923 if (tg3_wait_macro_done(tp)) {
1924 *resetp = 1;
1925 return -EBUSY;
1928 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1929 (chan * 0x2000) | 0x0200);
1930 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1931 if (tg3_wait_macro_done(tp)) {
1932 *resetp = 1;
1933 return -EBUSY;
1936 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1937 if (tg3_wait_macro_done(tp)) {
1938 *resetp = 1;
1939 return -EBUSY;
1942 for (i = 0; i < 6; i += 2) {
1943 u32 low, high;
1945 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1946 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1947 tg3_wait_macro_done(tp)) {
1948 *resetp = 1;
1949 return -EBUSY;
1951 low &= 0x7fff;
1952 high &= 0x000f;
1953 if (low != test_pat[chan][i] ||
1954 high != test_pat[chan][i+1]) {
1955 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1956 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1957 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1959 return -EBUSY;
1964 return 0;
1967 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1969 int chan;
1971 for (chan = 0; chan < 4; chan++) {
1972 int i;
1974 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1975 (chan * 0x2000) | 0x0200);
1976 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1977 for (i = 0; i < 6; i++)
1978 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1979 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1980 if (tg3_wait_macro_done(tp))
1981 return -EBUSY;
1984 return 0;
1987 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1989 u32 reg32, phy9_orig;
1990 int retries, do_phy_reset, err;
1992 retries = 10;
1993 do_phy_reset = 1;
1994 do {
1995 if (do_phy_reset) {
1996 err = tg3_bmcr_reset(tp);
1997 if (err)
1998 return err;
1999 do_phy_reset = 0;
2002 /* Disable transmitter and interrupt. */
2003 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2004 continue;
2006 reg32 |= 0x3000;
2007 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2009 /* Set full-duplex, 1000 mbps. */
2010 tg3_writephy(tp, MII_BMCR,
2011 BMCR_FULLDPLX | BMCR_SPEED1000);
2013 /* Set to master mode. */
2014 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2015 continue;
2017 tg3_writephy(tp, MII_CTRL1000,
2018 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2020 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2021 if (err)
2022 return err;
2024 /* Block the PHY control access. */
2025 tg3_phydsp_write(tp, 0x8005, 0x0800);
2027 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2028 if (!err)
2029 break;
2030 } while (--retries);
2032 err = tg3_phy_reset_chanpat(tp);
2033 if (err)
2034 return err;
2036 tg3_phydsp_write(tp, 0x8005, 0x0000);
2038 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2039 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2041 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2043 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2045 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2046 reg32 &= ~0x3000;
2047 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2048 } else if (!err)
2049 err = -EBUSY;
2051 return err;
2054 /* This will reset the tigon3 PHY if there is no valid
2055 * link unless the FORCE argument is non-zero.
2057 static int tg3_phy_reset(struct tg3 *tp)
2059 u32 val, cpmuctrl;
2060 int err;
2062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2063 val = tr32(GRC_MISC_CFG);
2064 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2065 udelay(40);
2067 err = tg3_readphy(tp, MII_BMSR, &val);
2068 err |= tg3_readphy(tp, MII_BMSR, &val);
2069 if (err != 0)
2070 return -EBUSY;
2072 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2073 netif_carrier_off(tp->dev);
2074 tg3_link_report(tp);
2077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2079 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2080 err = tg3_phy_reset_5703_4_5(tp);
2081 if (err)
2082 return err;
2083 goto out;
2086 cpmuctrl = 0;
2087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2088 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2089 cpmuctrl = tr32(TG3_CPMU_CTRL);
2090 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2091 tw32(TG3_CPMU_CTRL,
2092 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2095 err = tg3_bmcr_reset(tp);
2096 if (err)
2097 return err;
2099 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2100 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2101 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2103 tw32(TG3_CPMU_CTRL, cpmuctrl);
2106 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2107 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2108 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2109 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2110 CPMU_LSPD_1000MB_MACCLK_12_5) {
2111 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2112 udelay(40);
2113 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2117 if (tg3_flag(tp, 5717_PLUS) &&
2118 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2119 return 0;
2121 tg3_phy_apply_otp(tp);
2123 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2124 tg3_phy_toggle_apd(tp, true);
2125 else
2126 tg3_phy_toggle_apd(tp, false);
2128 out:
2129 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2130 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2131 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2132 tg3_phydsp_write(tp, 0x000a, 0x0323);
2133 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2136 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2137 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2138 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2141 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2142 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143 tg3_phydsp_write(tp, 0x000a, 0x310b);
2144 tg3_phydsp_write(tp, 0x201f, 0x9506);
2145 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2146 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2148 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2149 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2150 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2151 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2152 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2153 tg3_writephy(tp, MII_TG3_TEST1,
2154 MII_TG3_TEST1_TRIM_EN | 0x4);
2155 } else
2156 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2158 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2162 /* Set Extended packet length bit (bit 14) on all chips that */
2163 /* support jumbo frames */
2164 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2165 /* Cannot do read-modify-write on 5401 */
2166 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2167 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2168 /* Set bit 14 with read-modify-write to preserve other bits */
2169 err = tg3_phy_auxctl_read(tp,
2170 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2171 if (!err)
2172 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2173 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2176 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2177 * jumbo frames transmission.
2179 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2180 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2181 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2182 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2186 /* adjust output voltage */
2187 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2190 tg3_phy_toggle_automdix(tp, 1);
2191 tg3_phy_set_wirespeed(tp);
2192 return 0;
2195 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2196 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2197 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2198 TG3_GPIO_MSG_NEED_VAUX)
2199 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2200 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2201 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2202 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2203 (TG3_GPIO_MSG_DRVR_PRES << 12))
2205 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2206 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2207 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2208 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2209 (TG3_GPIO_MSG_NEED_VAUX << 12))
2211 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2213 u32 status, shift;
2215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2217 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2218 else
2219 status = tr32(TG3_CPMU_DRV_STATUS);
2221 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2222 status &= ~(TG3_GPIO_MSG_MASK << shift);
2223 status |= (newstat << shift);
2225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2227 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2228 else
2229 tw32(TG3_CPMU_DRV_STATUS, status);
2231 return status >> TG3_APE_GPIO_MSG_SHIFT;
2234 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2236 if (!tg3_flag(tp, IS_NIC))
2237 return 0;
2239 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2240 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2242 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2243 return -EIO;
2245 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2247 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2248 TG3_GRC_LCLCTL_PWRSW_DELAY);
2250 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2251 } else {
2252 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2253 TG3_GRC_LCLCTL_PWRSW_DELAY);
2256 return 0;
2259 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2261 u32 grc_local_ctrl;
2263 if (!tg3_flag(tp, IS_NIC) ||
2264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2266 return;
2268 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2270 tw32_wait_f(GRC_LOCAL_CTRL,
2271 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2272 TG3_GRC_LCLCTL_PWRSW_DELAY);
2274 tw32_wait_f(GRC_LOCAL_CTRL,
2275 grc_local_ctrl,
2276 TG3_GRC_LCLCTL_PWRSW_DELAY);
2278 tw32_wait_f(GRC_LOCAL_CTRL,
2279 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280 TG3_GRC_LCLCTL_PWRSW_DELAY);
2283 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2285 if (!tg3_flag(tp, IS_NIC))
2286 return;
2288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2290 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2291 (GRC_LCLCTRL_GPIO_OE0 |
2292 GRC_LCLCTRL_GPIO_OE1 |
2293 GRC_LCLCTRL_GPIO_OE2 |
2294 GRC_LCLCTRL_GPIO_OUTPUT0 |
2295 GRC_LCLCTRL_GPIO_OUTPUT1),
2296 TG3_GRC_LCLCTL_PWRSW_DELAY);
2297 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2298 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2299 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2300 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2301 GRC_LCLCTRL_GPIO_OE1 |
2302 GRC_LCLCTRL_GPIO_OE2 |
2303 GRC_LCLCTRL_GPIO_OUTPUT0 |
2304 GRC_LCLCTRL_GPIO_OUTPUT1 |
2305 tp->grc_local_ctrl;
2306 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2307 TG3_GRC_LCLCTL_PWRSW_DELAY);
2309 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2310 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2311 TG3_GRC_LCLCTL_PWRSW_DELAY);
2313 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2314 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315 TG3_GRC_LCLCTL_PWRSW_DELAY);
2316 } else {
2317 u32 no_gpio2;
2318 u32 grc_local_ctrl = 0;
2320 /* Workaround to prevent overdrawing Amps. */
2321 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2322 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2323 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2324 grc_local_ctrl,
2325 TG3_GRC_LCLCTL_PWRSW_DELAY);
2328 /* On 5753 and variants, GPIO2 cannot be used. */
2329 no_gpio2 = tp->nic_sram_data_cfg &
2330 NIC_SRAM_DATA_CFG_NO_GPIO2;
2332 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2333 GRC_LCLCTRL_GPIO_OE1 |
2334 GRC_LCLCTRL_GPIO_OE2 |
2335 GRC_LCLCTRL_GPIO_OUTPUT1 |
2336 GRC_LCLCTRL_GPIO_OUTPUT2;
2337 if (no_gpio2) {
2338 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2339 GRC_LCLCTRL_GPIO_OUTPUT2);
2341 tw32_wait_f(GRC_LOCAL_CTRL,
2342 tp->grc_local_ctrl | grc_local_ctrl,
2343 TG3_GRC_LCLCTL_PWRSW_DELAY);
2345 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2347 tw32_wait_f(GRC_LOCAL_CTRL,
2348 tp->grc_local_ctrl | grc_local_ctrl,
2349 TG3_GRC_LCLCTL_PWRSW_DELAY);
2351 if (!no_gpio2) {
2352 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2353 tw32_wait_f(GRC_LOCAL_CTRL,
2354 tp->grc_local_ctrl | grc_local_ctrl,
2355 TG3_GRC_LCLCTL_PWRSW_DELAY);
2360 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2362 u32 msg = 0;
2364 /* Serialize power state transitions */
2365 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2366 return;
2368 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2369 msg = TG3_GPIO_MSG_NEED_VAUX;
2371 msg = tg3_set_function_status(tp, msg);
2373 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2374 goto done;
2376 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2377 tg3_pwrsrc_switch_to_vaux(tp);
2378 else
2379 tg3_pwrsrc_die_with_vmain(tp);
2381 done:
2382 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2385 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2387 bool need_vaux = false;
2389 /* The GPIOs do something completely different on 57765. */
2390 if (!tg3_flag(tp, IS_NIC) ||
2391 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2392 return;
2394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2396 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2397 tg3_frob_aux_power_5717(tp, include_wol ?
2398 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2399 return;
2402 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2403 struct net_device *dev_peer;
2405 dev_peer = pci_get_drvdata(tp->pdev_peer);
2407 /* remove_one() may have been run on the peer. */
2408 if (dev_peer) {
2409 struct tg3 *tp_peer = netdev_priv(dev_peer);
2411 if (tg3_flag(tp_peer, INIT_COMPLETE))
2412 return;
2414 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2415 tg3_flag(tp_peer, ENABLE_ASF))
2416 need_vaux = true;
2420 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2421 tg3_flag(tp, ENABLE_ASF))
2422 need_vaux = true;
2424 if (need_vaux)
2425 tg3_pwrsrc_switch_to_vaux(tp);
2426 else
2427 tg3_pwrsrc_die_with_vmain(tp);
2430 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2432 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2433 return 1;
2434 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2435 if (speed != SPEED_10)
2436 return 1;
2437 } else if (speed == SPEED_10)
2438 return 1;
2440 return 0;
2443 static int tg3_setup_phy(struct tg3 *, int);
2445 #define RESET_KIND_SHUTDOWN 0
2446 #define RESET_KIND_INIT 1
2447 #define RESET_KIND_SUSPEND 2
2449 static void tg3_write_sig_post_reset(struct tg3 *, int);
2450 static int tg3_halt_cpu(struct tg3 *, u32);
2452 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2454 u32 val;
2456 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2457 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2458 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2459 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2461 sg_dig_ctrl |=
2462 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2463 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2464 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2466 return;
2469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2470 tg3_bmcr_reset(tp);
2471 val = tr32(GRC_MISC_CFG);
2472 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2473 udelay(40);
2474 return;
2475 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2476 u32 phytest;
2477 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2478 u32 phy;
2480 tg3_writephy(tp, MII_ADVERTISE, 0);
2481 tg3_writephy(tp, MII_BMCR,
2482 BMCR_ANENABLE | BMCR_ANRESTART);
2484 tg3_writephy(tp, MII_TG3_FET_TEST,
2485 phytest | MII_TG3_FET_SHADOW_EN);
2486 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2487 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2488 tg3_writephy(tp,
2489 MII_TG3_FET_SHDW_AUXMODE4,
2490 phy);
2492 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2494 return;
2495 } else if (do_low_power) {
2496 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2497 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2499 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2500 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2501 MII_TG3_AUXCTL_PCTL_VREG_11V;
2502 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2505 /* The PHY should not be powered down on some chips because
2506 * of bugs.
2508 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2509 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2510 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2511 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2512 return;
2514 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2515 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2516 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2517 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2518 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2519 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2522 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2525 /* tp->lock is held. */
2526 static int tg3_nvram_lock(struct tg3 *tp)
2528 if (tg3_flag(tp, NVRAM)) {
2529 int i;
2531 if (tp->nvram_lock_cnt == 0) {
2532 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2533 for (i = 0; i < 8000; i++) {
2534 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2535 break;
2536 udelay(20);
2538 if (i == 8000) {
2539 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2540 return -ENODEV;
2543 tp->nvram_lock_cnt++;
2545 return 0;
2548 /* tp->lock is held. */
2549 static void tg3_nvram_unlock(struct tg3 *tp)
2551 if (tg3_flag(tp, NVRAM)) {
2552 if (tp->nvram_lock_cnt > 0)
2553 tp->nvram_lock_cnt--;
2554 if (tp->nvram_lock_cnt == 0)
2555 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2559 /* tp->lock is held. */
2560 static void tg3_enable_nvram_access(struct tg3 *tp)
2562 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2563 u32 nvaccess = tr32(NVRAM_ACCESS);
2565 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2569 /* tp->lock is held. */
2570 static void tg3_disable_nvram_access(struct tg3 *tp)
2572 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2573 u32 nvaccess = tr32(NVRAM_ACCESS);
2575 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2579 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2580 u32 offset, u32 *val)
2582 u32 tmp;
2583 int i;
2585 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2586 return -EINVAL;
2588 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2589 EEPROM_ADDR_DEVID_MASK |
2590 EEPROM_ADDR_READ);
2591 tw32(GRC_EEPROM_ADDR,
2592 tmp |
2593 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2594 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2595 EEPROM_ADDR_ADDR_MASK) |
2596 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2598 for (i = 0; i < 1000; i++) {
2599 tmp = tr32(GRC_EEPROM_ADDR);
2601 if (tmp & EEPROM_ADDR_COMPLETE)
2602 break;
2603 msleep(1);
2605 if (!(tmp & EEPROM_ADDR_COMPLETE))
2606 return -EBUSY;
2608 tmp = tr32(GRC_EEPROM_DATA);
2611 * The data will always be opposite the native endian
2612 * format. Perform a blind byteswap to compensate.
2614 *val = swab32(tmp);
2616 return 0;
2619 #define NVRAM_CMD_TIMEOUT 10000
2621 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2623 int i;
2625 tw32(NVRAM_CMD, nvram_cmd);
2626 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2627 udelay(10);
2628 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2629 udelay(10);
2630 break;
2634 if (i == NVRAM_CMD_TIMEOUT)
2635 return -EBUSY;
2637 return 0;
2640 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2642 if (tg3_flag(tp, NVRAM) &&
2643 tg3_flag(tp, NVRAM_BUFFERED) &&
2644 tg3_flag(tp, FLASH) &&
2645 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2646 (tp->nvram_jedecnum == JEDEC_ATMEL))
2648 addr = ((addr / tp->nvram_pagesize) <<
2649 ATMEL_AT45DB0X1B_PAGE_POS) +
2650 (addr % tp->nvram_pagesize);
2652 return addr;
2655 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2657 if (tg3_flag(tp, NVRAM) &&
2658 tg3_flag(tp, NVRAM_BUFFERED) &&
2659 tg3_flag(tp, FLASH) &&
2660 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2661 (tp->nvram_jedecnum == JEDEC_ATMEL))
2663 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2664 tp->nvram_pagesize) +
2665 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2667 return addr;
2670 /* NOTE: Data read in from NVRAM is byteswapped according to
2671 * the byteswapping settings for all other register accesses.
2672 * tg3 devices are BE devices, so on a BE machine, the data
2673 * returned will be exactly as it is seen in NVRAM. On a LE
2674 * machine, the 32-bit value will be byteswapped.
2676 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2678 int ret;
2680 if (!tg3_flag(tp, NVRAM))
2681 return tg3_nvram_read_using_eeprom(tp, offset, val);
2683 offset = tg3_nvram_phys_addr(tp, offset);
2685 if (offset > NVRAM_ADDR_MSK)
2686 return -EINVAL;
2688 ret = tg3_nvram_lock(tp);
2689 if (ret)
2690 return ret;
2692 tg3_enable_nvram_access(tp);
2694 tw32(NVRAM_ADDR, offset);
2695 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2696 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2698 if (ret == 0)
2699 *val = tr32(NVRAM_RDDATA);
2701 tg3_disable_nvram_access(tp);
2703 tg3_nvram_unlock(tp);
2705 return ret;
2708 /* Ensures NVRAM data is in bytestream format. */
2709 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2711 u32 v;
2712 int res = tg3_nvram_read(tp, offset, &v);
2713 if (!res)
2714 *val = cpu_to_be32(v);
2715 return res;
2718 /* tp->lock is held. */
2719 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2721 u32 addr_high, addr_low;
2722 int i;
2724 addr_high = ((tp->dev->dev_addr[0] << 8) |
2725 tp->dev->dev_addr[1]);
2726 addr_low = ((tp->dev->dev_addr[2] << 24) |
2727 (tp->dev->dev_addr[3] << 16) |
2728 (tp->dev->dev_addr[4] << 8) |
2729 (tp->dev->dev_addr[5] << 0));
2730 for (i = 0; i < 4; i++) {
2731 if (i == 1 && skip_mac_1)
2732 continue;
2733 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2734 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2737 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2738 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2739 for (i = 0; i < 12; i++) {
2740 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2741 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2745 addr_high = (tp->dev->dev_addr[0] +
2746 tp->dev->dev_addr[1] +
2747 tp->dev->dev_addr[2] +
2748 tp->dev->dev_addr[3] +
2749 tp->dev->dev_addr[4] +
2750 tp->dev->dev_addr[5]) &
2751 TX_BACKOFF_SEED_MASK;
2752 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2755 static void tg3_enable_register_access(struct tg3 *tp)
2758 * Make sure register accesses (indirect or otherwise) will function
2759 * correctly.
2761 pci_write_config_dword(tp->pdev,
2762 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2765 static int tg3_power_up(struct tg3 *tp)
2767 int err;
2769 tg3_enable_register_access(tp);
2771 err = pci_set_power_state(tp->pdev, PCI_D0);
2772 if (!err) {
2773 /* Switch out of Vaux if it is a NIC */
2774 tg3_pwrsrc_switch_to_vmain(tp);
2775 } else {
2776 netdev_err(tp->dev, "Transition to D0 failed\n");
2779 return err;
2782 static int tg3_power_down_prepare(struct tg3 *tp)
2784 u32 misc_host_ctrl;
2785 bool device_should_wake, do_low_power;
2787 tg3_enable_register_access(tp);
2789 /* Restore the CLKREQ setting. */
2790 if (tg3_flag(tp, CLKREQ_BUG)) {
2791 u16 lnkctl;
2793 pci_read_config_word(tp->pdev,
2794 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2795 &lnkctl);
2796 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2797 pci_write_config_word(tp->pdev,
2798 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2799 lnkctl);
2802 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2803 tw32(TG3PCI_MISC_HOST_CTRL,
2804 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2806 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2807 tg3_flag(tp, WOL_ENABLE);
2809 if (tg3_flag(tp, USE_PHYLIB)) {
2810 do_low_power = false;
2811 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2812 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2813 struct phy_device *phydev;
2814 u32 phyid, advertising;
2816 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2818 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2820 tp->link_config.orig_speed = phydev->speed;
2821 tp->link_config.orig_duplex = phydev->duplex;
2822 tp->link_config.orig_autoneg = phydev->autoneg;
2823 tp->link_config.orig_advertising = phydev->advertising;
2825 advertising = ADVERTISED_TP |
2826 ADVERTISED_Pause |
2827 ADVERTISED_Autoneg |
2828 ADVERTISED_10baseT_Half;
2830 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2831 if (tg3_flag(tp, WOL_SPEED_100MB))
2832 advertising |=
2833 ADVERTISED_100baseT_Half |
2834 ADVERTISED_100baseT_Full |
2835 ADVERTISED_10baseT_Full;
2836 else
2837 advertising |= ADVERTISED_10baseT_Full;
2840 phydev->advertising = advertising;
2842 phy_start_aneg(phydev);
2844 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2845 if (phyid != PHY_ID_BCMAC131) {
2846 phyid &= PHY_BCM_OUI_MASK;
2847 if (phyid == PHY_BCM_OUI_1 ||
2848 phyid == PHY_BCM_OUI_2 ||
2849 phyid == PHY_BCM_OUI_3)
2850 do_low_power = true;
2853 } else {
2854 do_low_power = true;
2856 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2857 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2858 tp->link_config.orig_speed = tp->link_config.speed;
2859 tp->link_config.orig_duplex = tp->link_config.duplex;
2860 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2863 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2864 tp->link_config.speed = SPEED_10;
2865 tp->link_config.duplex = DUPLEX_HALF;
2866 tp->link_config.autoneg = AUTONEG_ENABLE;
2867 tg3_setup_phy(tp, 0);
2871 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2872 u32 val;
2874 val = tr32(GRC_VCPU_EXT_CTRL);
2875 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2876 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2877 int i;
2878 u32 val;
2880 for (i = 0; i < 200; i++) {
2881 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2882 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2883 break;
2884 msleep(1);
2887 if (tg3_flag(tp, WOL_CAP))
2888 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2889 WOL_DRV_STATE_SHUTDOWN |
2890 WOL_DRV_WOL |
2891 WOL_SET_MAGIC_PKT);
2893 if (device_should_wake) {
2894 u32 mac_mode;
2896 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2897 if (do_low_power &&
2898 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2899 tg3_phy_auxctl_write(tp,
2900 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2901 MII_TG3_AUXCTL_PCTL_WOL_EN |
2902 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2903 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2904 udelay(40);
2907 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2908 mac_mode = MAC_MODE_PORT_MODE_GMII;
2909 else
2910 mac_mode = MAC_MODE_PORT_MODE_MII;
2912 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2913 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2914 ASIC_REV_5700) {
2915 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2916 SPEED_100 : SPEED_10;
2917 if (tg3_5700_link_polarity(tp, speed))
2918 mac_mode |= MAC_MODE_LINK_POLARITY;
2919 else
2920 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2922 } else {
2923 mac_mode = MAC_MODE_PORT_MODE_TBI;
2926 if (!tg3_flag(tp, 5750_PLUS))
2927 tw32(MAC_LED_CTRL, tp->led_ctrl);
2929 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2930 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2931 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2932 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2934 if (tg3_flag(tp, ENABLE_APE))
2935 mac_mode |= MAC_MODE_APE_TX_EN |
2936 MAC_MODE_APE_RX_EN |
2937 MAC_MODE_TDE_ENABLE;
2939 tw32_f(MAC_MODE, mac_mode);
2940 udelay(100);
2942 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2943 udelay(10);
2946 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2947 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2949 u32 base_val;
2951 base_val = tp->pci_clock_ctrl;
2952 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2953 CLOCK_CTRL_TXCLK_DISABLE);
2955 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2956 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2957 } else if (tg3_flag(tp, 5780_CLASS) ||
2958 tg3_flag(tp, CPMU_PRESENT) ||
2959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2960 /* do nothing */
2961 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2962 u32 newbits1, newbits2;
2964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2966 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2967 CLOCK_CTRL_TXCLK_DISABLE |
2968 CLOCK_CTRL_ALTCLK);
2969 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2970 } else if (tg3_flag(tp, 5705_PLUS)) {
2971 newbits1 = CLOCK_CTRL_625_CORE;
2972 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2973 } else {
2974 newbits1 = CLOCK_CTRL_ALTCLK;
2975 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2978 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2979 40);
2981 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2982 40);
2984 if (!tg3_flag(tp, 5705_PLUS)) {
2985 u32 newbits3;
2987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2988 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2989 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2990 CLOCK_CTRL_TXCLK_DISABLE |
2991 CLOCK_CTRL_44MHZ_CORE);
2992 } else {
2993 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2996 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2997 tp->pci_clock_ctrl | newbits3, 40);
3001 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3002 tg3_power_down_phy(tp, do_low_power);
3004 tg3_frob_aux_power(tp, true);
3006 /* Workaround for unstable PLL clock */
3007 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3008 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3009 u32 val = tr32(0x7d00);
3011 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3012 tw32(0x7d00, val);
3013 if (!tg3_flag(tp, ENABLE_ASF)) {
3014 int err;
3016 err = tg3_nvram_lock(tp);
3017 tg3_halt_cpu(tp, RX_CPU_BASE);
3018 if (!err)
3019 tg3_nvram_unlock(tp);
3023 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3025 return 0;
3028 static void tg3_power_down(struct tg3 *tp)
3030 tg3_power_down_prepare(tp);
3032 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3033 pci_set_power_state(tp->pdev, PCI_D3hot);
3036 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3038 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3039 case MII_TG3_AUX_STAT_10HALF:
3040 *speed = SPEED_10;
3041 *duplex = DUPLEX_HALF;
3042 break;
3044 case MII_TG3_AUX_STAT_10FULL:
3045 *speed = SPEED_10;
3046 *duplex = DUPLEX_FULL;
3047 break;
3049 case MII_TG3_AUX_STAT_100HALF:
3050 *speed = SPEED_100;
3051 *duplex = DUPLEX_HALF;
3052 break;
3054 case MII_TG3_AUX_STAT_100FULL:
3055 *speed = SPEED_100;
3056 *duplex = DUPLEX_FULL;
3057 break;
3059 case MII_TG3_AUX_STAT_1000HALF:
3060 *speed = SPEED_1000;
3061 *duplex = DUPLEX_HALF;
3062 break;
3064 case MII_TG3_AUX_STAT_1000FULL:
3065 *speed = SPEED_1000;
3066 *duplex = DUPLEX_FULL;
3067 break;
3069 default:
3070 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3071 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3072 SPEED_10;
3073 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3074 DUPLEX_HALF;
3075 break;
3077 *speed = SPEED_INVALID;
3078 *duplex = DUPLEX_INVALID;
3079 break;
3083 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3085 int err = 0;
3086 u32 val, new_adv;
3088 new_adv = ADVERTISE_CSMA;
3089 if (advertise & ADVERTISED_10baseT_Half)
3090 new_adv |= ADVERTISE_10HALF;
3091 if (advertise & ADVERTISED_10baseT_Full)
3092 new_adv |= ADVERTISE_10FULL;
3093 if (advertise & ADVERTISED_100baseT_Half)
3094 new_adv |= ADVERTISE_100HALF;
3095 if (advertise & ADVERTISED_100baseT_Full)
3096 new_adv |= ADVERTISE_100FULL;
3098 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3100 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3101 if (err)
3102 goto done;
3104 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3105 goto done;
3107 new_adv = 0;
3108 if (advertise & ADVERTISED_1000baseT_Half)
3109 new_adv |= ADVERTISE_1000HALF;
3110 if (advertise & ADVERTISED_1000baseT_Full)
3111 new_adv |= ADVERTISE_1000FULL;
3113 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3114 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3115 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3117 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3118 if (err)
3119 goto done;
3121 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3122 goto done;
3124 tw32(TG3_CPMU_EEE_MODE,
3125 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3127 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3128 if (!err) {
3129 u32 err2;
3131 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3132 case ASIC_REV_5717:
3133 case ASIC_REV_57765:
3134 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3135 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3136 MII_TG3_DSP_CH34TP2_HIBW01);
3137 /* Fall through */
3138 case ASIC_REV_5719:
3139 val = MII_TG3_DSP_TAP26_ALNOKO |
3140 MII_TG3_DSP_TAP26_RMRXSTO |
3141 MII_TG3_DSP_TAP26_OPCSINPT;
3142 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3145 val = 0;
3146 /* Advertise 100-BaseTX EEE ability */
3147 if (advertise & ADVERTISED_100baseT_Full)
3148 val |= MDIO_AN_EEE_ADV_100TX;
3149 /* Advertise 1000-BaseT EEE ability */
3150 if (advertise & ADVERTISED_1000baseT_Full)
3151 val |= MDIO_AN_EEE_ADV_1000T;
3152 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3154 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3155 if (!err)
3156 err = err2;
3159 done:
3160 return err;
3163 static void tg3_phy_copper_begin(struct tg3 *tp)
3165 u32 new_adv;
3166 int i;
3168 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3169 new_adv = ADVERTISED_10baseT_Half |
3170 ADVERTISED_10baseT_Full;
3171 if (tg3_flag(tp, WOL_SPEED_100MB))
3172 new_adv |= ADVERTISED_100baseT_Half |
3173 ADVERTISED_100baseT_Full;
3175 tg3_phy_autoneg_cfg(tp, new_adv,
3176 FLOW_CTRL_TX | FLOW_CTRL_RX);
3177 } else if (tp->link_config.speed == SPEED_INVALID) {
3178 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3179 tp->link_config.advertising &=
3180 ~(ADVERTISED_1000baseT_Half |
3181 ADVERTISED_1000baseT_Full);
3183 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3184 tp->link_config.flowctrl);
3185 } else {
3186 /* Asking for a specific link mode. */
3187 if (tp->link_config.speed == SPEED_1000) {
3188 if (tp->link_config.duplex == DUPLEX_FULL)
3189 new_adv = ADVERTISED_1000baseT_Full;
3190 else
3191 new_adv = ADVERTISED_1000baseT_Half;
3192 } else if (tp->link_config.speed == SPEED_100) {
3193 if (tp->link_config.duplex == DUPLEX_FULL)
3194 new_adv = ADVERTISED_100baseT_Full;
3195 else
3196 new_adv = ADVERTISED_100baseT_Half;
3197 } else {
3198 if (tp->link_config.duplex == DUPLEX_FULL)
3199 new_adv = ADVERTISED_10baseT_Full;
3200 else
3201 new_adv = ADVERTISED_10baseT_Half;
3204 tg3_phy_autoneg_cfg(tp, new_adv,
3205 tp->link_config.flowctrl);
3208 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3209 tp->link_config.speed != SPEED_INVALID) {
3210 u32 bmcr, orig_bmcr;
3212 tp->link_config.active_speed = tp->link_config.speed;
3213 tp->link_config.active_duplex = tp->link_config.duplex;
3215 bmcr = 0;
3216 switch (tp->link_config.speed) {
3217 default:
3218 case SPEED_10:
3219 break;
3221 case SPEED_100:
3222 bmcr |= BMCR_SPEED100;
3223 break;
3225 case SPEED_1000:
3226 bmcr |= BMCR_SPEED1000;
3227 break;
3230 if (tp->link_config.duplex == DUPLEX_FULL)
3231 bmcr |= BMCR_FULLDPLX;
3233 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3234 (bmcr != orig_bmcr)) {
3235 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3236 for (i = 0; i < 1500; i++) {
3237 u32 tmp;
3239 udelay(10);
3240 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3241 tg3_readphy(tp, MII_BMSR, &tmp))
3242 continue;
3243 if (!(tmp & BMSR_LSTATUS)) {
3244 udelay(40);
3245 break;
3248 tg3_writephy(tp, MII_BMCR, bmcr);
3249 udelay(40);
3251 } else {
3252 tg3_writephy(tp, MII_BMCR,
3253 BMCR_ANENABLE | BMCR_ANRESTART);
3257 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3259 int err;
3261 /* Turn off tap power management. */
3262 /* Set Extended packet length bit */
3263 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3265 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3266 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3267 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3268 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3269 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3271 udelay(40);
3273 return err;
3276 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3278 u32 adv_reg, all_mask = 0;
3280 if (mask & ADVERTISED_10baseT_Half)
3281 all_mask |= ADVERTISE_10HALF;
3282 if (mask & ADVERTISED_10baseT_Full)
3283 all_mask |= ADVERTISE_10FULL;
3284 if (mask & ADVERTISED_100baseT_Half)
3285 all_mask |= ADVERTISE_100HALF;
3286 if (mask & ADVERTISED_100baseT_Full)
3287 all_mask |= ADVERTISE_100FULL;
3289 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3290 return 0;
3292 if ((adv_reg & all_mask) != all_mask)
3293 return 0;
3294 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3295 u32 tg3_ctrl;
3297 all_mask = 0;
3298 if (mask & ADVERTISED_1000baseT_Half)
3299 all_mask |= ADVERTISE_1000HALF;
3300 if (mask & ADVERTISED_1000baseT_Full)
3301 all_mask |= ADVERTISE_1000FULL;
3303 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3304 return 0;
3306 if ((tg3_ctrl & all_mask) != all_mask)
3307 return 0;
3309 return 1;
3312 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3314 u32 curadv, reqadv;
3316 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3317 return 1;
3319 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3320 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3322 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3323 if (curadv != reqadv)
3324 return 0;
3326 if (tg3_flag(tp, PAUSE_AUTONEG))
3327 tg3_readphy(tp, MII_LPA, rmtadv);
3328 } else {
3329 /* Reprogram the advertisement register, even if it
3330 * does not affect the current link. If the link
3331 * gets renegotiated in the future, we can save an
3332 * additional renegotiation cycle by advertising
3333 * it correctly in the first place.
3335 if (curadv != reqadv) {
3336 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3337 ADVERTISE_PAUSE_ASYM);
3338 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3342 return 1;
3345 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3347 int current_link_up;
3348 u32 bmsr, val;
3349 u32 lcl_adv, rmt_adv;
3350 u16 current_speed;
3351 u8 current_duplex;
3352 int i, err;
3354 tw32(MAC_EVENT, 0);
3356 tw32_f(MAC_STATUS,
3357 (MAC_STATUS_SYNC_CHANGED |
3358 MAC_STATUS_CFG_CHANGED |
3359 MAC_STATUS_MI_COMPLETION |
3360 MAC_STATUS_LNKSTATE_CHANGED));
3361 udelay(40);
3363 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3364 tw32_f(MAC_MI_MODE,
3365 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3366 udelay(80);
3369 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3371 /* Some third-party PHYs need to be reset on link going
3372 * down.
3374 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3375 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3377 netif_carrier_ok(tp->dev)) {
3378 tg3_readphy(tp, MII_BMSR, &bmsr);
3379 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3380 !(bmsr & BMSR_LSTATUS))
3381 force_reset = 1;
3383 if (force_reset)
3384 tg3_phy_reset(tp);
3386 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3387 tg3_readphy(tp, MII_BMSR, &bmsr);
3388 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3389 !tg3_flag(tp, INIT_COMPLETE))
3390 bmsr = 0;
3392 if (!(bmsr & BMSR_LSTATUS)) {
3393 err = tg3_init_5401phy_dsp(tp);
3394 if (err)
3395 return err;
3397 tg3_readphy(tp, MII_BMSR, &bmsr);
3398 for (i = 0; i < 1000; i++) {
3399 udelay(10);
3400 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3401 (bmsr & BMSR_LSTATUS)) {
3402 udelay(40);
3403 break;
3407 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3408 TG3_PHY_REV_BCM5401_B0 &&
3409 !(bmsr & BMSR_LSTATUS) &&
3410 tp->link_config.active_speed == SPEED_1000) {
3411 err = tg3_phy_reset(tp);
3412 if (!err)
3413 err = tg3_init_5401phy_dsp(tp);
3414 if (err)
3415 return err;
3418 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3419 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3420 /* 5701 {A0,B0} CRC bug workaround */
3421 tg3_writephy(tp, 0x15, 0x0a75);
3422 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3423 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3424 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3427 /* Clear pending interrupts... */
3428 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3429 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3431 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3432 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3433 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3434 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3437 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3438 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3439 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3440 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3441 else
3442 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3445 current_link_up = 0;
3446 current_speed = SPEED_INVALID;
3447 current_duplex = DUPLEX_INVALID;
3449 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3450 err = tg3_phy_auxctl_read(tp,
3451 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3452 &val);
3453 if (!err && !(val & (1 << 10))) {
3454 tg3_phy_auxctl_write(tp,
3455 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3456 val | (1 << 10));
3457 goto relink;
3461 bmsr = 0;
3462 for (i = 0; i < 100; i++) {
3463 tg3_readphy(tp, MII_BMSR, &bmsr);
3464 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3465 (bmsr & BMSR_LSTATUS))
3466 break;
3467 udelay(40);
3470 if (bmsr & BMSR_LSTATUS) {
3471 u32 aux_stat, bmcr;
3473 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3474 for (i = 0; i < 2000; i++) {
3475 udelay(10);
3476 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3477 aux_stat)
3478 break;
3481 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3482 &current_speed,
3483 &current_duplex);
3485 bmcr = 0;
3486 for (i = 0; i < 200; i++) {
3487 tg3_readphy(tp, MII_BMCR, &bmcr);
3488 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3489 continue;
3490 if (bmcr && bmcr != 0x7fff)
3491 break;
3492 udelay(10);
3495 lcl_adv = 0;
3496 rmt_adv = 0;
3498 tp->link_config.active_speed = current_speed;
3499 tp->link_config.active_duplex = current_duplex;
3501 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3502 if ((bmcr & BMCR_ANENABLE) &&
3503 tg3_copper_is_advertising_all(tp,
3504 tp->link_config.advertising)) {
3505 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3506 &rmt_adv))
3507 current_link_up = 1;
3509 } else {
3510 if (!(bmcr & BMCR_ANENABLE) &&
3511 tp->link_config.speed == current_speed &&
3512 tp->link_config.duplex == current_duplex &&
3513 tp->link_config.flowctrl ==
3514 tp->link_config.active_flowctrl) {
3515 current_link_up = 1;
3519 if (current_link_up == 1 &&
3520 tp->link_config.active_duplex == DUPLEX_FULL)
3521 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3524 relink:
3525 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3526 tg3_phy_copper_begin(tp);
3528 tg3_readphy(tp, MII_BMSR, &bmsr);
3529 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3530 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3531 current_link_up = 1;
3534 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3535 if (current_link_up == 1) {
3536 if (tp->link_config.active_speed == SPEED_100 ||
3537 tp->link_config.active_speed == SPEED_10)
3538 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3539 else
3540 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3541 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3542 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3543 else
3544 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3546 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3547 if (tp->link_config.active_duplex == DUPLEX_HALF)
3548 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3551 if (current_link_up == 1 &&
3552 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3553 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3554 else
3555 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3558 /* ??? Without this setting Netgear GA302T PHY does not
3559 * ??? send/receive packets...
3561 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3562 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3563 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3564 tw32_f(MAC_MI_MODE, tp->mi_mode);
3565 udelay(80);
3568 tw32_f(MAC_MODE, tp->mac_mode);
3569 udelay(40);
3571 tg3_phy_eee_adjust(tp, current_link_up);
3573 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3574 /* Polled via timer. */
3575 tw32_f(MAC_EVENT, 0);
3576 } else {
3577 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3579 udelay(40);
3581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3582 current_link_up == 1 &&
3583 tp->link_config.active_speed == SPEED_1000 &&
3584 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3585 udelay(120);
3586 tw32_f(MAC_STATUS,
3587 (MAC_STATUS_SYNC_CHANGED |
3588 MAC_STATUS_CFG_CHANGED));
3589 udelay(40);
3590 tg3_write_mem(tp,
3591 NIC_SRAM_FIRMWARE_MBOX,
3592 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3595 /* Prevent send BD corruption. */
3596 if (tg3_flag(tp, CLKREQ_BUG)) {
3597 u16 oldlnkctl, newlnkctl;
3599 pci_read_config_word(tp->pdev,
3600 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3601 &oldlnkctl);
3602 if (tp->link_config.active_speed == SPEED_100 ||
3603 tp->link_config.active_speed == SPEED_10)
3604 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3605 else
3606 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3607 if (newlnkctl != oldlnkctl)
3608 pci_write_config_word(tp->pdev,
3609 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3610 newlnkctl);
3613 if (current_link_up != netif_carrier_ok(tp->dev)) {
3614 if (current_link_up)
3615 netif_carrier_on(tp->dev);
3616 else
3617 netif_carrier_off(tp->dev);
3618 tg3_link_report(tp);
3621 return 0;
3624 struct tg3_fiber_aneginfo {
3625 int state;
3626 #define ANEG_STATE_UNKNOWN 0
3627 #define ANEG_STATE_AN_ENABLE 1
3628 #define ANEG_STATE_RESTART_INIT 2
3629 #define ANEG_STATE_RESTART 3
3630 #define ANEG_STATE_DISABLE_LINK_OK 4
3631 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3632 #define ANEG_STATE_ABILITY_DETECT 6
3633 #define ANEG_STATE_ACK_DETECT_INIT 7
3634 #define ANEG_STATE_ACK_DETECT 8
3635 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3636 #define ANEG_STATE_COMPLETE_ACK 10
3637 #define ANEG_STATE_IDLE_DETECT_INIT 11
3638 #define ANEG_STATE_IDLE_DETECT 12
3639 #define ANEG_STATE_LINK_OK 13
3640 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3641 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3643 u32 flags;
3644 #define MR_AN_ENABLE 0x00000001
3645 #define MR_RESTART_AN 0x00000002
3646 #define MR_AN_COMPLETE 0x00000004
3647 #define MR_PAGE_RX 0x00000008
3648 #define MR_NP_LOADED 0x00000010
3649 #define MR_TOGGLE_TX 0x00000020
3650 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3651 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3652 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3653 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3654 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3655 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3656 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3657 #define MR_TOGGLE_RX 0x00002000
3658 #define MR_NP_RX 0x00004000
3660 #define MR_LINK_OK 0x80000000
3662 unsigned long link_time, cur_time;
3664 u32 ability_match_cfg;
3665 int ability_match_count;
3667 char ability_match, idle_match, ack_match;
3669 u32 txconfig, rxconfig;
3670 #define ANEG_CFG_NP 0x00000080
3671 #define ANEG_CFG_ACK 0x00000040
3672 #define ANEG_CFG_RF2 0x00000020
3673 #define ANEG_CFG_RF1 0x00000010
3674 #define ANEG_CFG_PS2 0x00000001
3675 #define ANEG_CFG_PS1 0x00008000
3676 #define ANEG_CFG_HD 0x00004000
3677 #define ANEG_CFG_FD 0x00002000
3678 #define ANEG_CFG_INVAL 0x00001f06
3681 #define ANEG_OK 0
3682 #define ANEG_DONE 1
3683 #define ANEG_TIMER_ENAB 2
3684 #define ANEG_FAILED -1
3686 #define ANEG_STATE_SETTLE_TIME 10000
3688 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3689 struct tg3_fiber_aneginfo *ap)
3691 u16 flowctrl;
3692 unsigned long delta;
3693 u32 rx_cfg_reg;
3694 int ret;
3696 if (ap->state == ANEG_STATE_UNKNOWN) {
3697 ap->rxconfig = 0;
3698 ap->link_time = 0;
3699 ap->cur_time = 0;
3700 ap->ability_match_cfg = 0;
3701 ap->ability_match_count = 0;
3702 ap->ability_match = 0;
3703 ap->idle_match = 0;
3704 ap->ack_match = 0;
3706 ap->cur_time++;
3708 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3709 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3711 if (rx_cfg_reg != ap->ability_match_cfg) {
3712 ap->ability_match_cfg = rx_cfg_reg;
3713 ap->ability_match = 0;
3714 ap->ability_match_count = 0;
3715 } else {
3716 if (++ap->ability_match_count > 1) {
3717 ap->ability_match = 1;
3718 ap->ability_match_cfg = rx_cfg_reg;
3721 if (rx_cfg_reg & ANEG_CFG_ACK)
3722 ap->ack_match = 1;
3723 else
3724 ap->ack_match = 0;
3726 ap->idle_match = 0;
3727 } else {
3728 ap->idle_match = 1;
3729 ap->ability_match_cfg = 0;
3730 ap->ability_match_count = 0;
3731 ap->ability_match = 0;
3732 ap->ack_match = 0;
3734 rx_cfg_reg = 0;
3737 ap->rxconfig = rx_cfg_reg;
3738 ret = ANEG_OK;
3740 switch (ap->state) {
3741 case ANEG_STATE_UNKNOWN:
3742 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3743 ap->state = ANEG_STATE_AN_ENABLE;
3745 /* fallthru */
3746 case ANEG_STATE_AN_ENABLE:
3747 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3748 if (ap->flags & MR_AN_ENABLE) {
3749 ap->link_time = 0;
3750 ap->cur_time = 0;
3751 ap->ability_match_cfg = 0;
3752 ap->ability_match_count = 0;
3753 ap->ability_match = 0;
3754 ap->idle_match = 0;
3755 ap->ack_match = 0;
3757 ap->state = ANEG_STATE_RESTART_INIT;
3758 } else {
3759 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3761 break;
3763 case ANEG_STATE_RESTART_INIT:
3764 ap->link_time = ap->cur_time;
3765 ap->flags &= ~(MR_NP_LOADED);
3766 ap->txconfig = 0;
3767 tw32(MAC_TX_AUTO_NEG, 0);
3768 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3769 tw32_f(MAC_MODE, tp->mac_mode);
3770 udelay(40);
3772 ret = ANEG_TIMER_ENAB;
3773 ap->state = ANEG_STATE_RESTART;
3775 /* fallthru */
3776 case ANEG_STATE_RESTART:
3777 delta = ap->cur_time - ap->link_time;
3778 if (delta > ANEG_STATE_SETTLE_TIME)
3779 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3780 else
3781 ret = ANEG_TIMER_ENAB;
3782 break;
3784 case ANEG_STATE_DISABLE_LINK_OK:
3785 ret = ANEG_DONE;
3786 break;
3788 case ANEG_STATE_ABILITY_DETECT_INIT:
3789 ap->flags &= ~(MR_TOGGLE_TX);
3790 ap->txconfig = ANEG_CFG_FD;
3791 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3792 if (flowctrl & ADVERTISE_1000XPAUSE)
3793 ap->txconfig |= ANEG_CFG_PS1;
3794 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3795 ap->txconfig |= ANEG_CFG_PS2;
3796 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3797 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3798 tw32_f(MAC_MODE, tp->mac_mode);
3799 udelay(40);
3801 ap->state = ANEG_STATE_ABILITY_DETECT;
3802 break;
3804 case ANEG_STATE_ABILITY_DETECT:
3805 if (ap->ability_match != 0 && ap->rxconfig != 0)
3806 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3807 break;
3809 case ANEG_STATE_ACK_DETECT_INIT:
3810 ap->txconfig |= ANEG_CFG_ACK;
3811 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3812 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3813 tw32_f(MAC_MODE, tp->mac_mode);
3814 udelay(40);
3816 ap->state = ANEG_STATE_ACK_DETECT;
3818 /* fallthru */
3819 case ANEG_STATE_ACK_DETECT:
3820 if (ap->ack_match != 0) {
3821 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3822 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3823 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3824 } else {
3825 ap->state = ANEG_STATE_AN_ENABLE;
3827 } else if (ap->ability_match != 0 &&
3828 ap->rxconfig == 0) {
3829 ap->state = ANEG_STATE_AN_ENABLE;
3831 break;
3833 case ANEG_STATE_COMPLETE_ACK_INIT:
3834 if (ap->rxconfig & ANEG_CFG_INVAL) {
3835 ret = ANEG_FAILED;
3836 break;
3838 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3839 MR_LP_ADV_HALF_DUPLEX |
3840 MR_LP_ADV_SYM_PAUSE |
3841 MR_LP_ADV_ASYM_PAUSE |
3842 MR_LP_ADV_REMOTE_FAULT1 |
3843 MR_LP_ADV_REMOTE_FAULT2 |
3844 MR_LP_ADV_NEXT_PAGE |
3845 MR_TOGGLE_RX |
3846 MR_NP_RX);
3847 if (ap->rxconfig & ANEG_CFG_FD)
3848 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3849 if (ap->rxconfig & ANEG_CFG_HD)
3850 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3851 if (ap->rxconfig & ANEG_CFG_PS1)
3852 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3853 if (ap->rxconfig & ANEG_CFG_PS2)
3854 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3855 if (ap->rxconfig & ANEG_CFG_RF1)
3856 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3857 if (ap->rxconfig & ANEG_CFG_RF2)
3858 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3859 if (ap->rxconfig & ANEG_CFG_NP)
3860 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3862 ap->link_time = ap->cur_time;
3864 ap->flags ^= (MR_TOGGLE_TX);
3865 if (ap->rxconfig & 0x0008)
3866 ap->flags |= MR_TOGGLE_RX;
3867 if (ap->rxconfig & ANEG_CFG_NP)
3868 ap->flags |= MR_NP_RX;
3869 ap->flags |= MR_PAGE_RX;
3871 ap->state = ANEG_STATE_COMPLETE_ACK;
3872 ret = ANEG_TIMER_ENAB;
3873 break;
3875 case ANEG_STATE_COMPLETE_ACK:
3876 if (ap->ability_match != 0 &&
3877 ap->rxconfig == 0) {
3878 ap->state = ANEG_STATE_AN_ENABLE;
3879 break;
3881 delta = ap->cur_time - ap->link_time;
3882 if (delta > ANEG_STATE_SETTLE_TIME) {
3883 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3884 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3885 } else {
3886 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3887 !(ap->flags & MR_NP_RX)) {
3888 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3889 } else {
3890 ret = ANEG_FAILED;
3894 break;
3896 case ANEG_STATE_IDLE_DETECT_INIT:
3897 ap->link_time = ap->cur_time;
3898 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3899 tw32_f(MAC_MODE, tp->mac_mode);
3900 udelay(40);
3902 ap->state = ANEG_STATE_IDLE_DETECT;
3903 ret = ANEG_TIMER_ENAB;
3904 break;
3906 case ANEG_STATE_IDLE_DETECT:
3907 if (ap->ability_match != 0 &&
3908 ap->rxconfig == 0) {
3909 ap->state = ANEG_STATE_AN_ENABLE;
3910 break;
3912 delta = ap->cur_time - ap->link_time;
3913 if (delta > ANEG_STATE_SETTLE_TIME) {
3914 /* XXX another gem from the Broadcom driver :( */
3915 ap->state = ANEG_STATE_LINK_OK;
3917 break;
3919 case ANEG_STATE_LINK_OK:
3920 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3921 ret = ANEG_DONE;
3922 break;
3924 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3925 /* ??? unimplemented */
3926 break;
3928 case ANEG_STATE_NEXT_PAGE_WAIT:
3929 /* ??? unimplemented */
3930 break;
3932 default:
3933 ret = ANEG_FAILED;
3934 break;
3937 return ret;
3940 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3942 int res = 0;
3943 struct tg3_fiber_aneginfo aninfo;
3944 int status = ANEG_FAILED;
3945 unsigned int tick;
3946 u32 tmp;
3948 tw32_f(MAC_TX_AUTO_NEG, 0);
3950 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3951 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3952 udelay(40);
3954 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3955 udelay(40);
3957 memset(&aninfo, 0, sizeof(aninfo));
3958 aninfo.flags |= MR_AN_ENABLE;
3959 aninfo.state = ANEG_STATE_UNKNOWN;
3960 aninfo.cur_time = 0;
3961 tick = 0;
3962 while (++tick < 195000) {
3963 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3964 if (status == ANEG_DONE || status == ANEG_FAILED)
3965 break;
3967 udelay(1);
3970 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3971 tw32_f(MAC_MODE, tp->mac_mode);
3972 udelay(40);
3974 *txflags = aninfo.txconfig;
3975 *rxflags = aninfo.flags;
3977 if (status == ANEG_DONE &&
3978 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3979 MR_LP_ADV_FULL_DUPLEX)))
3980 res = 1;
3982 return res;
3985 static void tg3_init_bcm8002(struct tg3 *tp)
3987 u32 mac_status = tr32(MAC_STATUS);
3988 int i;
3990 /* Reset when initting first time or we have a link. */
3991 if (tg3_flag(tp, INIT_COMPLETE) &&
3992 !(mac_status & MAC_STATUS_PCS_SYNCED))
3993 return;
3995 /* Set PLL lock range. */
3996 tg3_writephy(tp, 0x16, 0x8007);
3998 /* SW reset */
3999 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4001 /* Wait for reset to complete. */
4002 /* XXX schedule_timeout() ... */
4003 for (i = 0; i < 500; i++)
4004 udelay(10);
4006 /* Config mode; select PMA/Ch 1 regs. */
4007 tg3_writephy(tp, 0x10, 0x8411);
4009 /* Enable auto-lock and comdet, select txclk for tx. */
4010 tg3_writephy(tp, 0x11, 0x0a10);
4012 tg3_writephy(tp, 0x18, 0x00a0);
4013 tg3_writephy(tp, 0x16, 0x41ff);
4015 /* Assert and deassert POR. */
4016 tg3_writephy(tp, 0x13, 0x0400);
4017 udelay(40);
4018 tg3_writephy(tp, 0x13, 0x0000);
4020 tg3_writephy(tp, 0x11, 0x0a50);
4021 udelay(40);
4022 tg3_writephy(tp, 0x11, 0x0a10);
4024 /* Wait for signal to stabilize */
4025 /* XXX schedule_timeout() ... */
4026 for (i = 0; i < 15000; i++)
4027 udelay(10);
4029 /* Deselect the channel register so we can read the PHYID
4030 * later.
4032 tg3_writephy(tp, 0x10, 0x8011);
4035 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4037 u16 flowctrl;
4038 u32 sg_dig_ctrl, sg_dig_status;
4039 u32 serdes_cfg, expected_sg_dig_ctrl;
4040 int workaround, port_a;
4041 int current_link_up;
4043 serdes_cfg = 0;
4044 expected_sg_dig_ctrl = 0;
4045 workaround = 0;
4046 port_a = 1;
4047 current_link_up = 0;
4049 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4050 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4051 workaround = 1;
4052 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4053 port_a = 0;
4055 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4056 /* preserve bits 20-23 for voltage regulator */
4057 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4060 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4062 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4063 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4064 if (workaround) {
4065 u32 val = serdes_cfg;
4067 if (port_a)
4068 val |= 0xc010000;
4069 else
4070 val |= 0x4010000;
4071 tw32_f(MAC_SERDES_CFG, val);
4074 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4076 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4077 tg3_setup_flow_control(tp, 0, 0);
4078 current_link_up = 1;
4080 goto out;
4083 /* Want auto-negotiation. */
4084 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4086 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4087 if (flowctrl & ADVERTISE_1000XPAUSE)
4088 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4089 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4090 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4092 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4093 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4094 tp->serdes_counter &&
4095 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4096 MAC_STATUS_RCVD_CFG)) ==
4097 MAC_STATUS_PCS_SYNCED)) {
4098 tp->serdes_counter--;
4099 current_link_up = 1;
4100 goto out;
4102 restart_autoneg:
4103 if (workaround)
4104 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4105 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4106 udelay(5);
4107 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4109 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4110 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4111 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4112 MAC_STATUS_SIGNAL_DET)) {
4113 sg_dig_status = tr32(SG_DIG_STATUS);
4114 mac_status = tr32(MAC_STATUS);
4116 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4117 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4118 u32 local_adv = 0, remote_adv = 0;
4120 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4121 local_adv |= ADVERTISE_1000XPAUSE;
4122 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4123 local_adv |= ADVERTISE_1000XPSE_ASYM;
4125 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4126 remote_adv |= LPA_1000XPAUSE;
4127 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4128 remote_adv |= LPA_1000XPAUSE_ASYM;
4130 tg3_setup_flow_control(tp, local_adv, remote_adv);
4131 current_link_up = 1;
4132 tp->serdes_counter = 0;
4133 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4134 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4135 if (tp->serdes_counter)
4136 tp->serdes_counter--;
4137 else {
4138 if (workaround) {
4139 u32 val = serdes_cfg;
4141 if (port_a)
4142 val |= 0xc010000;
4143 else
4144 val |= 0x4010000;
4146 tw32_f(MAC_SERDES_CFG, val);
4149 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4150 udelay(40);
4152 /* Link parallel detection - link is up */
4153 /* only if we have PCS_SYNC and not */
4154 /* receiving config code words */
4155 mac_status = tr32(MAC_STATUS);
4156 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4157 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4158 tg3_setup_flow_control(tp, 0, 0);
4159 current_link_up = 1;
4160 tp->phy_flags |=
4161 TG3_PHYFLG_PARALLEL_DETECT;
4162 tp->serdes_counter =
4163 SERDES_PARALLEL_DET_TIMEOUT;
4164 } else
4165 goto restart_autoneg;
4168 } else {
4169 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4170 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4173 out:
4174 return current_link_up;
4177 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4179 int current_link_up = 0;
4181 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4182 goto out;
4184 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4185 u32 txflags, rxflags;
4186 int i;
4188 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4189 u32 local_adv = 0, remote_adv = 0;
4191 if (txflags & ANEG_CFG_PS1)
4192 local_adv |= ADVERTISE_1000XPAUSE;
4193 if (txflags & ANEG_CFG_PS2)
4194 local_adv |= ADVERTISE_1000XPSE_ASYM;
4196 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4197 remote_adv |= LPA_1000XPAUSE;
4198 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4199 remote_adv |= LPA_1000XPAUSE_ASYM;
4201 tg3_setup_flow_control(tp, local_adv, remote_adv);
4203 current_link_up = 1;
4205 for (i = 0; i < 30; i++) {
4206 udelay(20);
4207 tw32_f(MAC_STATUS,
4208 (MAC_STATUS_SYNC_CHANGED |
4209 MAC_STATUS_CFG_CHANGED));
4210 udelay(40);
4211 if ((tr32(MAC_STATUS) &
4212 (MAC_STATUS_SYNC_CHANGED |
4213 MAC_STATUS_CFG_CHANGED)) == 0)
4214 break;
4217 mac_status = tr32(MAC_STATUS);
4218 if (current_link_up == 0 &&
4219 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4220 !(mac_status & MAC_STATUS_RCVD_CFG))
4221 current_link_up = 1;
4222 } else {
4223 tg3_setup_flow_control(tp, 0, 0);
4225 /* Forcing 1000FD link up. */
4226 current_link_up = 1;
4228 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4229 udelay(40);
4231 tw32_f(MAC_MODE, tp->mac_mode);
4232 udelay(40);
4235 out:
4236 return current_link_up;
4239 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4241 u32 orig_pause_cfg;
4242 u16 orig_active_speed;
4243 u8 orig_active_duplex;
4244 u32 mac_status;
4245 int current_link_up;
4246 int i;
4248 orig_pause_cfg = tp->link_config.active_flowctrl;
4249 orig_active_speed = tp->link_config.active_speed;
4250 orig_active_duplex = tp->link_config.active_duplex;
4252 if (!tg3_flag(tp, HW_AUTONEG) &&
4253 netif_carrier_ok(tp->dev) &&
4254 tg3_flag(tp, INIT_COMPLETE)) {
4255 mac_status = tr32(MAC_STATUS);
4256 mac_status &= (MAC_STATUS_PCS_SYNCED |
4257 MAC_STATUS_SIGNAL_DET |
4258 MAC_STATUS_CFG_CHANGED |
4259 MAC_STATUS_RCVD_CFG);
4260 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4261 MAC_STATUS_SIGNAL_DET)) {
4262 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4263 MAC_STATUS_CFG_CHANGED));
4264 return 0;
4268 tw32_f(MAC_TX_AUTO_NEG, 0);
4270 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4271 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4272 tw32_f(MAC_MODE, tp->mac_mode);
4273 udelay(40);
4275 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4276 tg3_init_bcm8002(tp);
4278 /* Enable link change event even when serdes polling. */
4279 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4280 udelay(40);
4282 current_link_up = 0;
4283 mac_status = tr32(MAC_STATUS);
4285 if (tg3_flag(tp, HW_AUTONEG))
4286 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4287 else
4288 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4290 tp->napi[0].hw_status->status =
4291 (SD_STATUS_UPDATED |
4292 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4294 for (i = 0; i < 100; i++) {
4295 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4296 MAC_STATUS_CFG_CHANGED));
4297 udelay(5);
4298 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4299 MAC_STATUS_CFG_CHANGED |
4300 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4301 break;
4304 mac_status = tr32(MAC_STATUS);
4305 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4306 current_link_up = 0;
4307 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4308 tp->serdes_counter == 0) {
4309 tw32_f(MAC_MODE, (tp->mac_mode |
4310 MAC_MODE_SEND_CONFIGS));
4311 udelay(1);
4312 tw32_f(MAC_MODE, tp->mac_mode);
4316 if (current_link_up == 1) {
4317 tp->link_config.active_speed = SPEED_1000;
4318 tp->link_config.active_duplex = DUPLEX_FULL;
4319 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4320 LED_CTRL_LNKLED_OVERRIDE |
4321 LED_CTRL_1000MBPS_ON));
4322 } else {
4323 tp->link_config.active_speed = SPEED_INVALID;
4324 tp->link_config.active_duplex = DUPLEX_INVALID;
4325 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4326 LED_CTRL_LNKLED_OVERRIDE |
4327 LED_CTRL_TRAFFIC_OVERRIDE));
4330 if (current_link_up != netif_carrier_ok(tp->dev)) {
4331 if (current_link_up)
4332 netif_carrier_on(tp->dev);
4333 else
4334 netif_carrier_off(tp->dev);
4335 tg3_link_report(tp);
4336 } else {
4337 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4338 if (orig_pause_cfg != now_pause_cfg ||
4339 orig_active_speed != tp->link_config.active_speed ||
4340 orig_active_duplex != tp->link_config.active_duplex)
4341 tg3_link_report(tp);
4344 return 0;
4347 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4349 int current_link_up, err = 0;
4350 u32 bmsr, bmcr;
4351 u16 current_speed;
4352 u8 current_duplex;
4353 u32 local_adv, remote_adv;
4355 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4356 tw32_f(MAC_MODE, tp->mac_mode);
4357 udelay(40);
4359 tw32(MAC_EVENT, 0);
4361 tw32_f(MAC_STATUS,
4362 (MAC_STATUS_SYNC_CHANGED |
4363 MAC_STATUS_CFG_CHANGED |
4364 MAC_STATUS_MI_COMPLETION |
4365 MAC_STATUS_LNKSTATE_CHANGED));
4366 udelay(40);
4368 if (force_reset)
4369 tg3_phy_reset(tp);
4371 current_link_up = 0;
4372 current_speed = SPEED_INVALID;
4373 current_duplex = DUPLEX_INVALID;
4375 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4376 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4378 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4379 bmsr |= BMSR_LSTATUS;
4380 else
4381 bmsr &= ~BMSR_LSTATUS;
4384 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4386 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4387 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4388 /* do nothing, just check for link up at the end */
4389 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4390 u32 adv, new_adv;
4392 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4393 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4394 ADVERTISE_1000XPAUSE |
4395 ADVERTISE_1000XPSE_ASYM |
4396 ADVERTISE_SLCT);
4398 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4400 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4401 new_adv |= ADVERTISE_1000XHALF;
4402 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4403 new_adv |= ADVERTISE_1000XFULL;
4405 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4406 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4407 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4408 tg3_writephy(tp, MII_BMCR, bmcr);
4410 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4411 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4412 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4414 return err;
4416 } else {
4417 u32 new_bmcr;
4419 bmcr &= ~BMCR_SPEED1000;
4420 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4422 if (tp->link_config.duplex == DUPLEX_FULL)
4423 new_bmcr |= BMCR_FULLDPLX;
4425 if (new_bmcr != bmcr) {
4426 /* BMCR_SPEED1000 is a reserved bit that needs
4427 * to be set on write.
4429 new_bmcr |= BMCR_SPEED1000;
4431 /* Force a linkdown */
4432 if (netif_carrier_ok(tp->dev)) {
4433 u32 adv;
4435 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4436 adv &= ~(ADVERTISE_1000XFULL |
4437 ADVERTISE_1000XHALF |
4438 ADVERTISE_SLCT);
4439 tg3_writephy(tp, MII_ADVERTISE, adv);
4440 tg3_writephy(tp, MII_BMCR, bmcr |
4441 BMCR_ANRESTART |
4442 BMCR_ANENABLE);
4443 udelay(10);
4444 netif_carrier_off(tp->dev);
4446 tg3_writephy(tp, MII_BMCR, new_bmcr);
4447 bmcr = new_bmcr;
4448 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4449 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4450 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4451 ASIC_REV_5714) {
4452 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4453 bmsr |= BMSR_LSTATUS;
4454 else
4455 bmsr &= ~BMSR_LSTATUS;
4457 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4461 if (bmsr & BMSR_LSTATUS) {
4462 current_speed = SPEED_1000;
4463 current_link_up = 1;
4464 if (bmcr & BMCR_FULLDPLX)
4465 current_duplex = DUPLEX_FULL;
4466 else
4467 current_duplex = DUPLEX_HALF;
4469 local_adv = 0;
4470 remote_adv = 0;
4472 if (bmcr & BMCR_ANENABLE) {
4473 u32 common;
4475 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4476 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4477 common = local_adv & remote_adv;
4478 if (common & (ADVERTISE_1000XHALF |
4479 ADVERTISE_1000XFULL)) {
4480 if (common & ADVERTISE_1000XFULL)
4481 current_duplex = DUPLEX_FULL;
4482 else
4483 current_duplex = DUPLEX_HALF;
4484 } else if (!tg3_flag(tp, 5780_CLASS)) {
4485 /* Link is up via parallel detect */
4486 } else {
4487 current_link_up = 0;
4492 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4493 tg3_setup_flow_control(tp, local_adv, remote_adv);
4495 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4496 if (tp->link_config.active_duplex == DUPLEX_HALF)
4497 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4499 tw32_f(MAC_MODE, tp->mac_mode);
4500 udelay(40);
4502 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4504 tp->link_config.active_speed = current_speed;
4505 tp->link_config.active_duplex = current_duplex;
4507 if (current_link_up != netif_carrier_ok(tp->dev)) {
4508 if (current_link_up)
4509 netif_carrier_on(tp->dev);
4510 else {
4511 netif_carrier_off(tp->dev);
4512 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4514 tg3_link_report(tp);
4516 return err;
4519 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4521 if (tp->serdes_counter) {
4522 /* Give autoneg time to complete. */
4523 tp->serdes_counter--;
4524 return;
4527 if (!netif_carrier_ok(tp->dev) &&
4528 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4529 u32 bmcr;
4531 tg3_readphy(tp, MII_BMCR, &bmcr);
4532 if (bmcr & BMCR_ANENABLE) {
4533 u32 phy1, phy2;
4535 /* Select shadow register 0x1f */
4536 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4537 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4539 /* Select expansion interrupt status register */
4540 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4541 MII_TG3_DSP_EXP1_INT_STAT);
4542 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4543 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4545 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4546 /* We have signal detect and not receiving
4547 * config code words, link is up by parallel
4548 * detection.
4551 bmcr &= ~BMCR_ANENABLE;
4552 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4553 tg3_writephy(tp, MII_BMCR, bmcr);
4554 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4557 } else if (netif_carrier_ok(tp->dev) &&
4558 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4559 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4560 u32 phy2;
4562 /* Select expansion interrupt status register */
4563 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4564 MII_TG3_DSP_EXP1_INT_STAT);
4565 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4566 if (phy2 & 0x20) {
4567 u32 bmcr;
4569 /* Config code words received, turn on autoneg. */
4570 tg3_readphy(tp, MII_BMCR, &bmcr);
4571 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4573 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4579 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4581 u32 val;
4582 int err;
4584 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4585 err = tg3_setup_fiber_phy(tp, force_reset);
4586 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4587 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4588 else
4589 err = tg3_setup_copper_phy(tp, force_reset);
4591 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4592 u32 scale;
4594 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4595 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4596 scale = 65;
4597 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4598 scale = 6;
4599 else
4600 scale = 12;
4602 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4603 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4604 tw32(GRC_MISC_CFG, val);
4607 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4608 (6 << TX_LENGTHS_IPG_SHIFT);
4609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4610 val |= tr32(MAC_TX_LENGTHS) &
4611 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4612 TX_LENGTHS_CNT_DWN_VAL_MSK);
4614 if (tp->link_config.active_speed == SPEED_1000 &&
4615 tp->link_config.active_duplex == DUPLEX_HALF)
4616 tw32(MAC_TX_LENGTHS, val |
4617 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4618 else
4619 tw32(MAC_TX_LENGTHS, val |
4620 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4622 if (!tg3_flag(tp, 5705_PLUS)) {
4623 if (netif_carrier_ok(tp->dev)) {
4624 tw32(HOSTCC_STAT_COAL_TICKS,
4625 tp->coal.stats_block_coalesce_usecs);
4626 } else {
4627 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4631 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4632 val = tr32(PCIE_PWR_MGMT_THRESH);
4633 if (!netif_carrier_ok(tp->dev))
4634 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4635 tp->pwrmgmt_thresh;
4636 else
4637 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4638 tw32(PCIE_PWR_MGMT_THRESH, val);
4641 return err;
4644 static inline int tg3_irq_sync(struct tg3 *tp)
4646 return tp->irq_sync;
4649 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4651 int i;
4653 dst = (u32 *)((u8 *)dst + off);
4654 for (i = 0; i < len; i += sizeof(u32))
4655 *dst++ = tr32(off + i);
4658 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4660 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4661 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4662 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4663 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4664 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4665 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4666 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4667 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4668 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4669 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4670 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4671 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4672 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4673 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4674 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4675 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4676 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4677 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4678 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4680 if (tg3_flag(tp, SUPPORT_MSIX))
4681 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4683 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4684 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4685 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4686 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4687 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4688 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4689 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4690 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4692 if (!tg3_flag(tp, 5705_PLUS)) {
4693 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4694 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4695 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4698 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4699 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4700 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4701 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4702 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4704 if (tg3_flag(tp, NVRAM))
4705 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4708 static void tg3_dump_state(struct tg3 *tp)
4710 int i;
4711 u32 *regs;
4713 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4714 if (!regs) {
4715 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4716 return;
4719 if (tg3_flag(tp, PCI_EXPRESS)) {
4720 /* Read up to but not including private PCI registers */
4721 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4722 regs[i / sizeof(u32)] = tr32(i);
4723 } else
4724 tg3_dump_legacy_regs(tp, regs);
4726 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4727 if (!regs[i + 0] && !regs[i + 1] &&
4728 !regs[i + 2] && !regs[i + 3])
4729 continue;
4731 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4732 i * 4,
4733 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4736 kfree(regs);
4738 for (i = 0; i < tp->irq_cnt; i++) {
4739 struct tg3_napi *tnapi = &tp->napi[i];
4741 /* SW status block */
4742 netdev_err(tp->dev,
4743 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4745 tnapi->hw_status->status,
4746 tnapi->hw_status->status_tag,
4747 tnapi->hw_status->rx_jumbo_consumer,
4748 tnapi->hw_status->rx_consumer,
4749 tnapi->hw_status->rx_mini_consumer,
4750 tnapi->hw_status->idx[0].rx_producer,
4751 tnapi->hw_status->idx[0].tx_consumer);
4753 netdev_err(tp->dev,
4754 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4756 tnapi->last_tag, tnapi->last_irq_tag,
4757 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4758 tnapi->rx_rcb_ptr,
4759 tnapi->prodring.rx_std_prod_idx,
4760 tnapi->prodring.rx_std_cons_idx,
4761 tnapi->prodring.rx_jmb_prod_idx,
4762 tnapi->prodring.rx_jmb_cons_idx);
4766 /* This is called whenever we suspect that the system chipset is re-
4767 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4768 * is bogus tx completions. We try to recover by setting the
4769 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4770 * in the workqueue.
4772 static void tg3_tx_recover(struct tg3 *tp)
4774 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4775 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4777 netdev_warn(tp->dev,
4778 "The system may be re-ordering memory-mapped I/O "
4779 "cycles to the network device, attempting to recover. "
4780 "Please report the problem to the driver maintainer "
4781 "and include system chipset information.\n");
4783 spin_lock(&tp->lock);
4784 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4785 spin_unlock(&tp->lock);
4788 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4790 /* Tell compiler to fetch tx indices from memory. */
4791 barrier();
4792 return tnapi->tx_pending -
4793 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4796 /* Tigon3 never reports partial packet sends. So we do not
4797 * need special logic to handle SKBs that have not had all
4798 * of their frags sent yet, like SunGEM does.
4800 static void tg3_tx(struct tg3_napi *tnapi)
4802 struct tg3 *tp = tnapi->tp;
4803 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4804 u32 sw_idx = tnapi->tx_cons;
4805 struct netdev_queue *txq;
4806 int index = tnapi - tp->napi;
4808 if (tg3_flag(tp, ENABLE_TSS))
4809 index--;
4811 txq = netdev_get_tx_queue(tp->dev, index);
4813 while (sw_idx != hw_idx) {
4814 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4815 struct sk_buff *skb = ri->skb;
4816 int i, tx_bug = 0;
4818 if (unlikely(skb == NULL)) {
4819 tg3_tx_recover(tp);
4820 return;
4823 pci_unmap_single(tp->pdev,
4824 dma_unmap_addr(ri, mapping),
4825 skb_headlen(skb),
4826 PCI_DMA_TODEVICE);
4828 ri->skb = NULL;
4830 sw_idx = NEXT_TX(sw_idx);
4832 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4833 ri = &tnapi->tx_buffers[sw_idx];
4834 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4835 tx_bug = 1;
4837 pci_unmap_page(tp->pdev,
4838 dma_unmap_addr(ri, mapping),
4839 skb_shinfo(skb)->frags[i].size,
4840 PCI_DMA_TODEVICE);
4841 sw_idx = NEXT_TX(sw_idx);
4844 dev_kfree_skb(skb);
4846 if (unlikely(tx_bug)) {
4847 tg3_tx_recover(tp);
4848 return;
4852 tnapi->tx_cons = sw_idx;
4854 /* Need to make the tx_cons update visible to tg3_start_xmit()
4855 * before checking for netif_queue_stopped(). Without the
4856 * memory barrier, there is a small possibility that tg3_start_xmit()
4857 * will miss it and cause the queue to be stopped forever.
4859 smp_mb();
4861 if (unlikely(netif_tx_queue_stopped(txq) &&
4862 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4863 __netif_tx_lock(txq, smp_processor_id());
4864 if (netif_tx_queue_stopped(txq) &&
4865 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4866 netif_tx_wake_queue(txq);
4867 __netif_tx_unlock(txq);
4871 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4873 if (!ri->skb)
4874 return;
4876 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4877 map_sz, PCI_DMA_FROMDEVICE);
4878 dev_kfree_skb_any(ri->skb);
4879 ri->skb = NULL;
4882 /* Returns size of skb allocated or < 0 on error.
4884 * We only need to fill in the address because the other members
4885 * of the RX descriptor are invariant, see tg3_init_rings.
4887 * Note the purposeful assymetry of cpu vs. chip accesses. For
4888 * posting buffers we only dirty the first cache line of the RX
4889 * descriptor (containing the address). Whereas for the RX status
4890 * buffers the cpu only reads the last cacheline of the RX descriptor
4891 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4893 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4894 u32 opaque_key, u32 dest_idx_unmasked)
4896 struct tg3_rx_buffer_desc *desc;
4897 struct ring_info *map;
4898 struct sk_buff *skb;
4899 dma_addr_t mapping;
4900 int skb_size, dest_idx;
4902 switch (opaque_key) {
4903 case RXD_OPAQUE_RING_STD:
4904 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4905 desc = &tpr->rx_std[dest_idx];
4906 map = &tpr->rx_std_buffers[dest_idx];
4907 skb_size = tp->rx_pkt_map_sz;
4908 break;
4910 case RXD_OPAQUE_RING_JUMBO:
4911 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4912 desc = &tpr->rx_jmb[dest_idx].std;
4913 map = &tpr->rx_jmb_buffers[dest_idx];
4914 skb_size = TG3_RX_JMB_MAP_SZ;
4915 break;
4917 default:
4918 return -EINVAL;
4921 /* Do not overwrite any of the map or rp information
4922 * until we are sure we can commit to a new buffer.
4924 * Callers depend upon this behavior and assume that
4925 * we leave everything unchanged if we fail.
4927 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4928 if (skb == NULL)
4929 return -ENOMEM;
4931 skb_reserve(skb, tp->rx_offset);
4933 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4934 PCI_DMA_FROMDEVICE);
4935 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4936 dev_kfree_skb(skb);
4937 return -EIO;
4940 map->skb = skb;
4941 dma_unmap_addr_set(map, mapping, mapping);
4943 desc->addr_hi = ((u64)mapping >> 32);
4944 desc->addr_lo = ((u64)mapping & 0xffffffff);
4946 return skb_size;
4949 /* We only need to move over in the address because the other
4950 * members of the RX descriptor are invariant. See notes above
4951 * tg3_alloc_rx_skb for full details.
4953 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4954 struct tg3_rx_prodring_set *dpr,
4955 u32 opaque_key, int src_idx,
4956 u32 dest_idx_unmasked)
4958 struct tg3 *tp = tnapi->tp;
4959 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4960 struct ring_info *src_map, *dest_map;
4961 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4962 int dest_idx;
4964 switch (opaque_key) {
4965 case RXD_OPAQUE_RING_STD:
4966 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4967 dest_desc = &dpr->rx_std[dest_idx];
4968 dest_map = &dpr->rx_std_buffers[dest_idx];
4969 src_desc = &spr->rx_std[src_idx];
4970 src_map = &spr->rx_std_buffers[src_idx];
4971 break;
4973 case RXD_OPAQUE_RING_JUMBO:
4974 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4975 dest_desc = &dpr->rx_jmb[dest_idx].std;
4976 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4977 src_desc = &spr->rx_jmb[src_idx].std;
4978 src_map = &spr->rx_jmb_buffers[src_idx];
4979 break;
4981 default:
4982 return;
4985 dest_map->skb = src_map->skb;
4986 dma_unmap_addr_set(dest_map, mapping,
4987 dma_unmap_addr(src_map, mapping));
4988 dest_desc->addr_hi = src_desc->addr_hi;
4989 dest_desc->addr_lo = src_desc->addr_lo;
4991 /* Ensure that the update to the skb happens after the physical
4992 * addresses have been transferred to the new BD location.
4994 smp_wmb();
4996 src_map->skb = NULL;
4999 /* The RX ring scheme is composed of multiple rings which post fresh
5000 * buffers to the chip, and one special ring the chip uses to report
5001 * status back to the host.
5003 * The special ring reports the status of received packets to the
5004 * host. The chip does not write into the original descriptor the
5005 * RX buffer was obtained from. The chip simply takes the original
5006 * descriptor as provided by the host, updates the status and length
5007 * field, then writes this into the next status ring entry.
5009 * Each ring the host uses to post buffers to the chip is described
5010 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5011 * it is first placed into the on-chip ram. When the packet's length
5012 * is known, it walks down the TG3_BDINFO entries to select the ring.
5013 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5014 * which is within the range of the new packet's length is chosen.
5016 * The "separate ring for rx status" scheme may sound queer, but it makes
5017 * sense from a cache coherency perspective. If only the host writes
5018 * to the buffer post rings, and only the chip writes to the rx status
5019 * rings, then cache lines never move beyond shared-modified state.
5020 * If both the host and chip were to write into the same ring, cache line
5021 * eviction could occur since both entities want it in an exclusive state.
5023 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5025 struct tg3 *tp = tnapi->tp;
5026 u32 work_mask, rx_std_posted = 0;
5027 u32 std_prod_idx, jmb_prod_idx;
5028 u32 sw_idx = tnapi->rx_rcb_ptr;
5029 u16 hw_idx;
5030 int received;
5031 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5033 hw_idx = *(tnapi->rx_rcb_prod_idx);
5035 * We need to order the read of hw_idx and the read of
5036 * the opaque cookie.
5038 rmb();
5039 work_mask = 0;
5040 received = 0;
5041 std_prod_idx = tpr->rx_std_prod_idx;
5042 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5043 while (sw_idx != hw_idx && budget > 0) {
5044 struct ring_info *ri;
5045 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5046 unsigned int len;
5047 struct sk_buff *skb;
5048 dma_addr_t dma_addr;
5049 u32 opaque_key, desc_idx, *post_ptr;
5051 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5052 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5053 if (opaque_key == RXD_OPAQUE_RING_STD) {
5054 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5055 dma_addr = dma_unmap_addr(ri, mapping);
5056 skb = ri->skb;
5057 post_ptr = &std_prod_idx;
5058 rx_std_posted++;
5059 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5060 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5061 dma_addr = dma_unmap_addr(ri, mapping);
5062 skb = ri->skb;
5063 post_ptr = &jmb_prod_idx;
5064 } else
5065 goto next_pkt_nopost;
5067 work_mask |= opaque_key;
5069 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5070 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5071 drop_it:
5072 tg3_recycle_rx(tnapi, tpr, opaque_key,
5073 desc_idx, *post_ptr);
5074 drop_it_no_recycle:
5075 /* Other statistics kept track of by card. */
5076 tp->rx_dropped++;
5077 goto next_pkt;
5080 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5081 ETH_FCS_LEN;
5083 if (len > TG3_RX_COPY_THRESH(tp)) {
5084 int skb_size;
5086 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5087 *post_ptr);
5088 if (skb_size < 0)
5089 goto drop_it;
5091 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5092 PCI_DMA_FROMDEVICE);
5094 /* Ensure that the update to the skb happens
5095 * after the usage of the old DMA mapping.
5097 smp_wmb();
5099 ri->skb = NULL;
5101 skb_put(skb, len);
5102 } else {
5103 struct sk_buff *copy_skb;
5105 tg3_recycle_rx(tnapi, tpr, opaque_key,
5106 desc_idx, *post_ptr);
5108 copy_skb = netdev_alloc_skb(tp->dev, len +
5109 TG3_RAW_IP_ALIGN);
5110 if (copy_skb == NULL)
5111 goto drop_it_no_recycle;
5113 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5114 skb_put(copy_skb, len);
5115 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5116 skb_copy_from_linear_data(skb, copy_skb->data, len);
5117 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5119 /* We'll reuse the original ring buffer. */
5120 skb = copy_skb;
5123 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5124 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5125 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5126 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5127 skb->ip_summed = CHECKSUM_UNNECESSARY;
5128 else
5129 skb_checksum_none_assert(skb);
5131 skb->protocol = eth_type_trans(skb, tp->dev);
5133 if (len > (tp->dev->mtu + ETH_HLEN) &&
5134 skb->protocol != htons(ETH_P_8021Q)) {
5135 dev_kfree_skb(skb);
5136 goto drop_it_no_recycle;
5139 if (desc->type_flags & RXD_FLAG_VLAN &&
5140 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5141 __vlan_hwaccel_put_tag(skb,
5142 desc->err_vlan & RXD_VLAN_MASK);
5144 napi_gro_receive(&tnapi->napi, skb);
5146 received++;
5147 budget--;
5149 next_pkt:
5150 (*post_ptr)++;
5152 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5153 tpr->rx_std_prod_idx = std_prod_idx &
5154 tp->rx_std_ring_mask;
5155 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5156 tpr->rx_std_prod_idx);
5157 work_mask &= ~RXD_OPAQUE_RING_STD;
5158 rx_std_posted = 0;
5160 next_pkt_nopost:
5161 sw_idx++;
5162 sw_idx &= tp->rx_ret_ring_mask;
5164 /* Refresh hw_idx to see if there is new work */
5165 if (sw_idx == hw_idx) {
5166 hw_idx = *(tnapi->rx_rcb_prod_idx);
5167 rmb();
5171 /* ACK the status ring. */
5172 tnapi->rx_rcb_ptr = sw_idx;
5173 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5175 /* Refill RX ring(s). */
5176 if (!tg3_flag(tp, ENABLE_RSS)) {
5177 if (work_mask & RXD_OPAQUE_RING_STD) {
5178 tpr->rx_std_prod_idx = std_prod_idx &
5179 tp->rx_std_ring_mask;
5180 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5181 tpr->rx_std_prod_idx);
5183 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5184 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5185 tp->rx_jmb_ring_mask;
5186 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5187 tpr->rx_jmb_prod_idx);
5189 mmiowb();
5190 } else if (work_mask) {
5191 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5192 * updated before the producer indices can be updated.
5194 smp_wmb();
5196 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5197 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5199 if (tnapi != &tp->napi[1])
5200 napi_schedule(&tp->napi[1].napi);
5203 return received;
5206 static void tg3_poll_link(struct tg3 *tp)
5208 /* handle link change and other phy events */
5209 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5210 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5212 if (sblk->status & SD_STATUS_LINK_CHG) {
5213 sblk->status = SD_STATUS_UPDATED |
5214 (sblk->status & ~SD_STATUS_LINK_CHG);
5215 spin_lock(&tp->lock);
5216 if (tg3_flag(tp, USE_PHYLIB)) {
5217 tw32_f(MAC_STATUS,
5218 (MAC_STATUS_SYNC_CHANGED |
5219 MAC_STATUS_CFG_CHANGED |
5220 MAC_STATUS_MI_COMPLETION |
5221 MAC_STATUS_LNKSTATE_CHANGED));
5222 udelay(40);
5223 } else
5224 tg3_setup_phy(tp, 0);
5225 spin_unlock(&tp->lock);
5230 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5231 struct tg3_rx_prodring_set *dpr,
5232 struct tg3_rx_prodring_set *spr)
5234 u32 si, di, cpycnt, src_prod_idx;
5235 int i, err = 0;
5237 while (1) {
5238 src_prod_idx = spr->rx_std_prod_idx;
5240 /* Make sure updates to the rx_std_buffers[] entries and the
5241 * standard producer index are seen in the correct order.
5243 smp_rmb();
5245 if (spr->rx_std_cons_idx == src_prod_idx)
5246 break;
5248 if (spr->rx_std_cons_idx < src_prod_idx)
5249 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5250 else
5251 cpycnt = tp->rx_std_ring_mask + 1 -
5252 spr->rx_std_cons_idx;
5254 cpycnt = min(cpycnt,
5255 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5257 si = spr->rx_std_cons_idx;
5258 di = dpr->rx_std_prod_idx;
5260 for (i = di; i < di + cpycnt; i++) {
5261 if (dpr->rx_std_buffers[i].skb) {
5262 cpycnt = i - di;
5263 err = -ENOSPC;
5264 break;
5268 if (!cpycnt)
5269 break;
5271 /* Ensure that updates to the rx_std_buffers ring and the
5272 * shadowed hardware producer ring from tg3_recycle_skb() are
5273 * ordered correctly WRT the skb check above.
5275 smp_rmb();
5277 memcpy(&dpr->rx_std_buffers[di],
5278 &spr->rx_std_buffers[si],
5279 cpycnt * sizeof(struct ring_info));
5281 for (i = 0; i < cpycnt; i++, di++, si++) {
5282 struct tg3_rx_buffer_desc *sbd, *dbd;
5283 sbd = &spr->rx_std[si];
5284 dbd = &dpr->rx_std[di];
5285 dbd->addr_hi = sbd->addr_hi;
5286 dbd->addr_lo = sbd->addr_lo;
5289 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5290 tp->rx_std_ring_mask;
5291 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5292 tp->rx_std_ring_mask;
5295 while (1) {
5296 src_prod_idx = spr->rx_jmb_prod_idx;
5298 /* Make sure updates to the rx_jmb_buffers[] entries and
5299 * the jumbo producer index are seen in the correct order.
5301 smp_rmb();
5303 if (spr->rx_jmb_cons_idx == src_prod_idx)
5304 break;
5306 if (spr->rx_jmb_cons_idx < src_prod_idx)
5307 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5308 else
5309 cpycnt = tp->rx_jmb_ring_mask + 1 -
5310 spr->rx_jmb_cons_idx;
5312 cpycnt = min(cpycnt,
5313 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5315 si = spr->rx_jmb_cons_idx;
5316 di = dpr->rx_jmb_prod_idx;
5318 for (i = di; i < di + cpycnt; i++) {
5319 if (dpr->rx_jmb_buffers[i].skb) {
5320 cpycnt = i - di;
5321 err = -ENOSPC;
5322 break;
5326 if (!cpycnt)
5327 break;
5329 /* Ensure that updates to the rx_jmb_buffers ring and the
5330 * shadowed hardware producer ring from tg3_recycle_skb() are
5331 * ordered correctly WRT the skb check above.
5333 smp_rmb();
5335 memcpy(&dpr->rx_jmb_buffers[di],
5336 &spr->rx_jmb_buffers[si],
5337 cpycnt * sizeof(struct ring_info));
5339 for (i = 0; i < cpycnt; i++, di++, si++) {
5340 struct tg3_rx_buffer_desc *sbd, *dbd;
5341 sbd = &spr->rx_jmb[si].std;
5342 dbd = &dpr->rx_jmb[di].std;
5343 dbd->addr_hi = sbd->addr_hi;
5344 dbd->addr_lo = sbd->addr_lo;
5347 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5348 tp->rx_jmb_ring_mask;
5349 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5350 tp->rx_jmb_ring_mask;
5353 return err;
5356 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5358 struct tg3 *tp = tnapi->tp;
5360 /* run TX completion thread */
5361 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5362 tg3_tx(tnapi);
5363 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5364 return work_done;
5367 /* run RX thread, within the bounds set by NAPI.
5368 * All RX "locking" is done by ensuring outside
5369 * code synchronizes with tg3->napi.poll()
5371 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5372 work_done += tg3_rx(tnapi, budget - work_done);
5374 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5375 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5376 int i, err = 0;
5377 u32 std_prod_idx = dpr->rx_std_prod_idx;
5378 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5380 for (i = 1; i < tp->irq_cnt; i++)
5381 err |= tg3_rx_prodring_xfer(tp, dpr,
5382 &tp->napi[i].prodring);
5384 wmb();
5386 if (std_prod_idx != dpr->rx_std_prod_idx)
5387 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5388 dpr->rx_std_prod_idx);
5390 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5391 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5392 dpr->rx_jmb_prod_idx);
5394 mmiowb();
5396 if (err)
5397 tw32_f(HOSTCC_MODE, tp->coal_now);
5400 return work_done;
5403 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5405 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5406 struct tg3 *tp = tnapi->tp;
5407 int work_done = 0;
5408 struct tg3_hw_status *sblk = tnapi->hw_status;
5410 while (1) {
5411 work_done = tg3_poll_work(tnapi, work_done, budget);
5413 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5414 goto tx_recovery;
5416 if (unlikely(work_done >= budget))
5417 break;
5419 /* tp->last_tag is used in tg3_int_reenable() below
5420 * to tell the hw how much work has been processed,
5421 * so we must read it before checking for more work.
5423 tnapi->last_tag = sblk->status_tag;
5424 tnapi->last_irq_tag = tnapi->last_tag;
5425 rmb();
5427 /* check for RX/TX work to do */
5428 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5429 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5430 napi_complete(napi);
5431 /* Reenable interrupts. */
5432 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5433 mmiowb();
5434 break;
5438 return work_done;
5440 tx_recovery:
5441 /* work_done is guaranteed to be less than budget. */
5442 napi_complete(napi);
5443 schedule_work(&tp->reset_task);
5444 return work_done;
5447 static void tg3_process_error(struct tg3 *tp)
5449 u32 val;
5450 bool real_error = false;
5452 if (tg3_flag(tp, ERROR_PROCESSED))
5453 return;
5455 /* Check Flow Attention register */
5456 val = tr32(HOSTCC_FLOW_ATTN);
5457 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5458 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5459 real_error = true;
5462 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5463 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5464 real_error = true;
5467 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5468 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5469 real_error = true;
5472 if (!real_error)
5473 return;
5475 tg3_dump_state(tp);
5477 tg3_flag_set(tp, ERROR_PROCESSED);
5478 schedule_work(&tp->reset_task);
5481 static int tg3_poll(struct napi_struct *napi, int budget)
5483 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5484 struct tg3 *tp = tnapi->tp;
5485 int work_done = 0;
5486 struct tg3_hw_status *sblk = tnapi->hw_status;
5488 while (1) {
5489 if (sblk->status & SD_STATUS_ERROR)
5490 tg3_process_error(tp);
5492 tg3_poll_link(tp);
5494 work_done = tg3_poll_work(tnapi, work_done, budget);
5496 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5497 goto tx_recovery;
5499 if (unlikely(work_done >= budget))
5500 break;
5502 if (tg3_flag(tp, TAGGED_STATUS)) {
5503 /* tp->last_tag is used in tg3_int_reenable() below
5504 * to tell the hw how much work has been processed,
5505 * so we must read it before checking for more work.
5507 tnapi->last_tag = sblk->status_tag;
5508 tnapi->last_irq_tag = tnapi->last_tag;
5509 rmb();
5510 } else
5511 sblk->status &= ~SD_STATUS_UPDATED;
5513 if (likely(!tg3_has_work(tnapi))) {
5514 napi_complete(napi);
5515 tg3_int_reenable(tnapi);
5516 break;
5520 return work_done;
5522 tx_recovery:
5523 /* work_done is guaranteed to be less than budget. */
5524 napi_complete(napi);
5525 schedule_work(&tp->reset_task);
5526 return work_done;
5529 static void tg3_napi_disable(struct tg3 *tp)
5531 int i;
5533 for (i = tp->irq_cnt - 1; i >= 0; i--)
5534 napi_disable(&tp->napi[i].napi);
5537 static void tg3_napi_enable(struct tg3 *tp)
5539 int i;
5541 for (i = 0; i < tp->irq_cnt; i++)
5542 napi_enable(&tp->napi[i].napi);
5545 static void tg3_napi_init(struct tg3 *tp)
5547 int i;
5549 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5550 for (i = 1; i < tp->irq_cnt; i++)
5551 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5554 static void tg3_napi_fini(struct tg3 *tp)
5556 int i;
5558 for (i = 0; i < tp->irq_cnt; i++)
5559 netif_napi_del(&tp->napi[i].napi);
5562 static inline void tg3_netif_stop(struct tg3 *tp)
5564 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5565 tg3_napi_disable(tp);
5566 netif_tx_disable(tp->dev);
5569 static inline void tg3_netif_start(struct tg3 *tp)
5571 /* NOTE: unconditional netif_tx_wake_all_queues is only
5572 * appropriate so long as all callers are assured to
5573 * have free tx slots (such as after tg3_init_hw)
5575 netif_tx_wake_all_queues(tp->dev);
5577 tg3_napi_enable(tp);
5578 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5579 tg3_enable_ints(tp);
5582 static void tg3_irq_quiesce(struct tg3 *tp)
5584 int i;
5586 BUG_ON(tp->irq_sync);
5588 tp->irq_sync = 1;
5589 smp_mb();
5591 for (i = 0; i < tp->irq_cnt; i++)
5592 synchronize_irq(tp->napi[i].irq_vec);
5595 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5596 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5597 * with as well. Most of the time, this is not necessary except when
5598 * shutting down the device.
5600 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5602 spin_lock_bh(&tp->lock);
5603 if (irq_sync)
5604 tg3_irq_quiesce(tp);
5607 static inline void tg3_full_unlock(struct tg3 *tp)
5609 spin_unlock_bh(&tp->lock);
5612 /* One-shot MSI handler - Chip automatically disables interrupt
5613 * after sending MSI so driver doesn't have to do it.
5615 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5617 struct tg3_napi *tnapi = dev_id;
5618 struct tg3 *tp = tnapi->tp;
5620 prefetch(tnapi->hw_status);
5621 if (tnapi->rx_rcb)
5622 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5624 if (likely(!tg3_irq_sync(tp)))
5625 napi_schedule(&tnapi->napi);
5627 return IRQ_HANDLED;
5630 /* MSI ISR - No need to check for interrupt sharing and no need to
5631 * flush status block and interrupt mailbox. PCI ordering rules
5632 * guarantee that MSI will arrive after the status block.
5634 static irqreturn_t tg3_msi(int irq, void *dev_id)
5636 struct tg3_napi *tnapi = dev_id;
5637 struct tg3 *tp = tnapi->tp;
5639 prefetch(tnapi->hw_status);
5640 if (tnapi->rx_rcb)
5641 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5643 * Writing any value to intr-mbox-0 clears PCI INTA# and
5644 * chip-internal interrupt pending events.
5645 * Writing non-zero to intr-mbox-0 additional tells the
5646 * NIC to stop sending us irqs, engaging "in-intr-handler"
5647 * event coalescing.
5649 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5650 if (likely(!tg3_irq_sync(tp)))
5651 napi_schedule(&tnapi->napi);
5653 return IRQ_RETVAL(1);
5656 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5658 struct tg3_napi *tnapi = dev_id;
5659 struct tg3 *tp = tnapi->tp;
5660 struct tg3_hw_status *sblk = tnapi->hw_status;
5661 unsigned int handled = 1;
5663 /* In INTx mode, it is possible for the interrupt to arrive at
5664 * the CPU before the status block posted prior to the interrupt.
5665 * Reading the PCI State register will confirm whether the
5666 * interrupt is ours and will flush the status block.
5668 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5669 if (tg3_flag(tp, CHIP_RESETTING) ||
5670 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5671 handled = 0;
5672 goto out;
5677 * Writing any value to intr-mbox-0 clears PCI INTA# and
5678 * chip-internal interrupt pending events.
5679 * Writing non-zero to intr-mbox-0 additional tells the
5680 * NIC to stop sending us irqs, engaging "in-intr-handler"
5681 * event coalescing.
5683 * Flush the mailbox to de-assert the IRQ immediately to prevent
5684 * spurious interrupts. The flush impacts performance but
5685 * excessive spurious interrupts can be worse in some cases.
5687 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5688 if (tg3_irq_sync(tp))
5689 goto out;
5690 sblk->status &= ~SD_STATUS_UPDATED;
5691 if (likely(tg3_has_work(tnapi))) {
5692 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5693 napi_schedule(&tnapi->napi);
5694 } else {
5695 /* No work, shared interrupt perhaps? re-enable
5696 * interrupts, and flush that PCI write
5698 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5699 0x00000000);
5701 out:
5702 return IRQ_RETVAL(handled);
5705 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5707 struct tg3_napi *tnapi = dev_id;
5708 struct tg3 *tp = tnapi->tp;
5709 struct tg3_hw_status *sblk = tnapi->hw_status;
5710 unsigned int handled = 1;
5712 /* In INTx mode, it is possible for the interrupt to arrive at
5713 * the CPU before the status block posted prior to the interrupt.
5714 * Reading the PCI State register will confirm whether the
5715 * interrupt is ours and will flush the status block.
5717 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5718 if (tg3_flag(tp, CHIP_RESETTING) ||
5719 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5720 handled = 0;
5721 goto out;
5726 * writing any value to intr-mbox-0 clears PCI INTA# and
5727 * chip-internal interrupt pending events.
5728 * writing non-zero to intr-mbox-0 additional tells the
5729 * NIC to stop sending us irqs, engaging "in-intr-handler"
5730 * event coalescing.
5732 * Flush the mailbox to de-assert the IRQ immediately to prevent
5733 * spurious interrupts. The flush impacts performance but
5734 * excessive spurious interrupts can be worse in some cases.
5736 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5739 * In a shared interrupt configuration, sometimes other devices'
5740 * interrupts will scream. We record the current status tag here
5741 * so that the above check can report that the screaming interrupts
5742 * are unhandled. Eventually they will be silenced.
5744 tnapi->last_irq_tag = sblk->status_tag;
5746 if (tg3_irq_sync(tp))
5747 goto out;
5749 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5751 napi_schedule(&tnapi->napi);
5753 out:
5754 return IRQ_RETVAL(handled);
5757 /* ISR for interrupt test */
5758 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5760 struct tg3_napi *tnapi = dev_id;
5761 struct tg3 *tp = tnapi->tp;
5762 struct tg3_hw_status *sblk = tnapi->hw_status;
5764 if ((sblk->status & SD_STATUS_UPDATED) ||
5765 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5766 tg3_disable_ints(tp);
5767 return IRQ_RETVAL(1);
5769 return IRQ_RETVAL(0);
5772 static int tg3_init_hw(struct tg3 *, int);
5773 static int tg3_halt(struct tg3 *, int, int);
5775 /* Restart hardware after configuration changes, self-test, etc.
5776 * Invoked with tp->lock held.
5778 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5779 __releases(tp->lock)
5780 __acquires(tp->lock)
5782 int err;
5784 err = tg3_init_hw(tp, reset_phy);
5785 if (err) {
5786 netdev_err(tp->dev,
5787 "Failed to re-initialize device, aborting\n");
5788 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5789 tg3_full_unlock(tp);
5790 del_timer_sync(&tp->timer);
5791 tp->irq_sync = 0;
5792 tg3_napi_enable(tp);
5793 dev_close(tp->dev);
5794 tg3_full_lock(tp, 0);
5796 return err;
5799 #ifdef CONFIG_NET_POLL_CONTROLLER
5800 static void tg3_poll_controller(struct net_device *dev)
5802 int i;
5803 struct tg3 *tp = netdev_priv(dev);
5805 for (i = 0; i < tp->irq_cnt; i++)
5806 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5808 #endif
5810 static void tg3_reset_task(struct work_struct *work)
5812 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5813 int err;
5814 unsigned int restart_timer;
5816 tg3_full_lock(tp, 0);
5818 if (!netif_running(tp->dev)) {
5819 tg3_full_unlock(tp);
5820 return;
5823 tg3_full_unlock(tp);
5825 tg3_phy_stop(tp);
5827 tg3_netif_stop(tp);
5829 tg3_full_lock(tp, 1);
5831 restart_timer = tg3_flag(tp, RESTART_TIMER);
5832 tg3_flag_clear(tp, RESTART_TIMER);
5834 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5835 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5836 tp->write32_rx_mbox = tg3_write_flush_reg32;
5837 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5838 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5841 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5842 err = tg3_init_hw(tp, 1);
5843 if (err)
5844 goto out;
5846 tg3_netif_start(tp);
5848 if (restart_timer)
5849 mod_timer(&tp->timer, jiffies + 1);
5851 out:
5852 tg3_full_unlock(tp);
5854 if (!err)
5855 tg3_phy_start(tp);
5858 static void tg3_tx_timeout(struct net_device *dev)
5860 struct tg3 *tp = netdev_priv(dev);
5862 if (netif_msg_tx_err(tp)) {
5863 netdev_err(dev, "transmit timed out, resetting\n");
5864 tg3_dump_state(tp);
5867 schedule_work(&tp->reset_task);
5870 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5871 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5873 u32 base = (u32) mapping & 0xffffffff;
5875 return (base > 0xffffdcc0) && (base + len + 8 < base);
5878 /* Test for DMA addresses > 40-bit */
5879 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5880 int len)
5882 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5883 if (tg3_flag(tp, 40BIT_DMA_BUG))
5884 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5885 return 0;
5886 #else
5887 return 0;
5888 #endif
5891 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5892 dma_addr_t mapping, int len, u32 flags,
5893 u32 mss_and_is_end)
5895 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5896 int is_end = (mss_and_is_end & 0x1);
5897 u32 mss = (mss_and_is_end >> 1);
5898 u32 vlan_tag = 0;
5900 if (is_end)
5901 flags |= TXD_FLAG_END;
5902 if (flags & TXD_FLAG_VLAN) {
5903 vlan_tag = flags >> 16;
5904 flags &= 0xffff;
5906 vlan_tag |= (mss << TXD_MSS_SHIFT);
5908 txd->addr_hi = ((u64) mapping >> 32);
5909 txd->addr_lo = ((u64) mapping & 0xffffffff);
5910 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5911 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5914 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5915 struct sk_buff *skb, int last)
5917 int i;
5918 u32 entry = tnapi->tx_prod;
5919 struct ring_info *txb = &tnapi->tx_buffers[entry];
5921 pci_unmap_single(tnapi->tp->pdev,
5922 dma_unmap_addr(txb, mapping),
5923 skb_headlen(skb),
5924 PCI_DMA_TODEVICE);
5925 for (i = 0; i < last; i++) {
5926 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5928 entry = NEXT_TX(entry);
5929 txb = &tnapi->tx_buffers[entry];
5931 pci_unmap_page(tnapi->tp->pdev,
5932 dma_unmap_addr(txb, mapping),
5933 frag->size, PCI_DMA_TODEVICE);
5937 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5938 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5939 struct sk_buff *skb,
5940 u32 base_flags, u32 mss)
5942 struct tg3 *tp = tnapi->tp;
5943 struct sk_buff *new_skb;
5944 dma_addr_t new_addr = 0;
5945 u32 entry = tnapi->tx_prod;
5946 int ret = 0;
5948 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5949 new_skb = skb_copy(skb, GFP_ATOMIC);
5950 else {
5951 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5953 new_skb = skb_copy_expand(skb,
5954 skb_headroom(skb) + more_headroom,
5955 skb_tailroom(skb), GFP_ATOMIC);
5958 if (!new_skb) {
5959 ret = -1;
5960 } else {
5961 /* New SKB is guaranteed to be linear. */
5962 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5963 PCI_DMA_TODEVICE);
5964 /* Make sure the mapping succeeded */
5965 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5966 ret = -1;
5967 dev_kfree_skb(new_skb);
5969 /* Make sure new skb does not cross any 4G boundaries.
5970 * Drop the packet if it does.
5972 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5973 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5974 PCI_DMA_TODEVICE);
5975 ret = -1;
5976 dev_kfree_skb(new_skb);
5977 } else {
5978 tnapi->tx_buffers[entry].skb = new_skb;
5979 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5980 mapping, new_addr);
5982 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5983 base_flags, 1 | (mss << 1));
5987 dev_kfree_skb(skb);
5989 return ret;
5992 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5994 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5995 * TSO header is greater than 80 bytes.
5997 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5999 struct sk_buff *segs, *nskb;
6000 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6002 /* Estimate the number of fragments in the worst case */
6003 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6004 netif_stop_queue(tp->dev);
6006 /* netif_tx_stop_queue() must be done before checking
6007 * checking tx index in tg3_tx_avail() below, because in
6008 * tg3_tx(), we update tx index before checking for
6009 * netif_tx_queue_stopped().
6011 smp_mb();
6012 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6013 return NETDEV_TX_BUSY;
6015 netif_wake_queue(tp->dev);
6018 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6019 if (IS_ERR(segs))
6020 goto tg3_tso_bug_end;
6022 do {
6023 nskb = segs;
6024 segs = segs->next;
6025 nskb->next = NULL;
6026 tg3_start_xmit(nskb, tp->dev);
6027 } while (segs);
6029 tg3_tso_bug_end:
6030 dev_kfree_skb(skb);
6032 return NETDEV_TX_OK;
6035 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6036 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6038 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6040 struct tg3 *tp = netdev_priv(dev);
6041 u32 len, entry, base_flags, mss;
6042 int i = -1, would_hit_hwbug;
6043 dma_addr_t mapping;
6044 struct tg3_napi *tnapi;
6045 struct netdev_queue *txq;
6046 unsigned int last;
6048 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6049 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6050 if (tg3_flag(tp, ENABLE_TSS))
6051 tnapi++;
6053 /* We are running in BH disabled context with netif_tx_lock
6054 * and TX reclaim runs via tp->napi.poll inside of a software
6055 * interrupt. Furthermore, IRQ processing runs lockless so we have
6056 * no IRQ context deadlocks to worry about either. Rejoice!
6058 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6059 if (!netif_tx_queue_stopped(txq)) {
6060 netif_tx_stop_queue(txq);
6062 /* This is a hard error, log it. */
6063 netdev_err(dev,
6064 "BUG! Tx Ring full when queue awake!\n");
6066 return NETDEV_TX_BUSY;
6069 entry = tnapi->tx_prod;
6070 base_flags = 0;
6071 if (skb->ip_summed == CHECKSUM_PARTIAL)
6072 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6074 mss = skb_shinfo(skb)->gso_size;
6075 if (mss) {
6076 struct iphdr *iph;
6077 u32 tcp_opt_len, hdr_len;
6079 if (skb_header_cloned(skb) &&
6080 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6081 dev_kfree_skb(skb);
6082 goto out_unlock;
6085 iph = ip_hdr(skb);
6086 tcp_opt_len = tcp_optlen(skb);
6088 if (skb_is_gso_v6(skb)) {
6089 hdr_len = skb_headlen(skb) - ETH_HLEN;
6090 } else {
6091 u32 ip_tcp_len;
6093 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6094 hdr_len = ip_tcp_len + tcp_opt_len;
6096 iph->check = 0;
6097 iph->tot_len = htons(mss + hdr_len);
6100 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6101 tg3_flag(tp, TSO_BUG))
6102 return tg3_tso_bug(tp, skb);
6104 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6105 TXD_FLAG_CPU_POST_DMA);
6107 if (tg3_flag(tp, HW_TSO_1) ||
6108 tg3_flag(tp, HW_TSO_2) ||
6109 tg3_flag(tp, HW_TSO_3)) {
6110 tcp_hdr(skb)->check = 0;
6111 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6112 } else
6113 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6114 iph->daddr, 0,
6115 IPPROTO_TCP,
6118 if (tg3_flag(tp, HW_TSO_3)) {
6119 mss |= (hdr_len & 0xc) << 12;
6120 if (hdr_len & 0x10)
6121 base_flags |= 0x00000010;
6122 base_flags |= (hdr_len & 0x3e0) << 5;
6123 } else if (tg3_flag(tp, HW_TSO_2))
6124 mss |= hdr_len << 9;
6125 else if (tg3_flag(tp, HW_TSO_1) ||
6126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6127 if (tcp_opt_len || iph->ihl > 5) {
6128 int tsflags;
6130 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6131 mss |= (tsflags << 11);
6133 } else {
6134 if (tcp_opt_len || iph->ihl > 5) {
6135 int tsflags;
6137 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6138 base_flags |= tsflags << 12;
6143 if (vlan_tx_tag_present(skb))
6144 base_flags |= (TXD_FLAG_VLAN |
6145 (vlan_tx_tag_get(skb) << 16));
6147 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6148 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6149 base_flags |= TXD_FLAG_JMB_PKT;
6151 len = skb_headlen(skb);
6153 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6154 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6155 dev_kfree_skb(skb);
6156 goto out_unlock;
6159 tnapi->tx_buffers[entry].skb = skb;
6160 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6162 would_hit_hwbug = 0;
6164 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6165 would_hit_hwbug = 1;
6167 if (tg3_4g_overflow_test(mapping, len))
6168 would_hit_hwbug = 1;
6170 if (tg3_40bit_overflow_test(tp, mapping, len))
6171 would_hit_hwbug = 1;
6173 if (tg3_flag(tp, 5701_DMA_BUG))
6174 would_hit_hwbug = 1;
6176 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6177 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6179 entry = NEXT_TX(entry);
6181 /* Now loop through additional data fragments, and queue them. */
6182 if (skb_shinfo(skb)->nr_frags > 0) {
6183 last = skb_shinfo(skb)->nr_frags - 1;
6184 for (i = 0; i <= last; i++) {
6185 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6187 len = frag->size;
6188 mapping = pci_map_page(tp->pdev,
6189 frag->page,
6190 frag->page_offset,
6191 len, PCI_DMA_TODEVICE);
6193 tnapi->tx_buffers[entry].skb = NULL;
6194 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6195 mapping);
6196 if (pci_dma_mapping_error(tp->pdev, mapping))
6197 goto dma_error;
6199 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6200 len <= 8)
6201 would_hit_hwbug = 1;
6203 if (tg3_4g_overflow_test(mapping, len))
6204 would_hit_hwbug = 1;
6206 if (tg3_40bit_overflow_test(tp, mapping, len))
6207 would_hit_hwbug = 1;
6209 if (tg3_flag(tp, HW_TSO_1) ||
6210 tg3_flag(tp, HW_TSO_2) ||
6211 tg3_flag(tp, HW_TSO_3))
6212 tg3_set_txd(tnapi, entry, mapping, len,
6213 base_flags, (i == last)|(mss << 1));
6214 else
6215 tg3_set_txd(tnapi, entry, mapping, len,
6216 base_flags, (i == last));
6218 entry = NEXT_TX(entry);
6222 if (would_hit_hwbug) {
6223 tg3_skb_error_unmap(tnapi, skb, i);
6225 /* If the workaround fails due to memory/mapping
6226 * failure, silently drop this packet.
6228 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6229 goto out_unlock;
6231 entry = NEXT_TX(tnapi->tx_prod);
6234 skb_tx_timestamp(skb);
6236 /* Packets are ready, update Tx producer idx local and on card. */
6237 tw32_tx_mbox(tnapi->prodmbox, entry);
6239 tnapi->tx_prod = entry;
6240 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6241 netif_tx_stop_queue(txq);
6243 /* netif_tx_stop_queue() must be done before checking
6244 * checking tx index in tg3_tx_avail() below, because in
6245 * tg3_tx(), we update tx index before checking for
6246 * netif_tx_queue_stopped().
6248 smp_mb();
6249 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6250 netif_tx_wake_queue(txq);
6253 out_unlock:
6254 mmiowb();
6256 return NETDEV_TX_OK;
6258 dma_error:
6259 tg3_skb_error_unmap(tnapi, skb, i);
6260 dev_kfree_skb(skb);
6261 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6262 return NETDEV_TX_OK;
6265 static void tg3_set_loopback(struct net_device *dev, u32 features)
6267 struct tg3 *tp = netdev_priv(dev);
6269 if (features & NETIF_F_LOOPBACK) {
6270 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6271 return;
6274 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6275 * loopback mode if Half-Duplex mode was negotiated earlier.
6277 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6279 /* Enable internal MAC loopback mode */
6280 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6281 spin_lock_bh(&tp->lock);
6282 tw32(MAC_MODE, tp->mac_mode);
6283 netif_carrier_on(tp->dev);
6284 spin_unlock_bh(&tp->lock);
6285 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6286 } else {
6287 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6288 return;
6290 /* Disable internal MAC loopback mode */
6291 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6292 spin_lock_bh(&tp->lock);
6293 tw32(MAC_MODE, tp->mac_mode);
6294 /* Force link status check */
6295 tg3_setup_phy(tp, 1);
6296 spin_unlock_bh(&tp->lock);
6297 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6301 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6303 struct tg3 *tp = netdev_priv(dev);
6305 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6306 features &= ~NETIF_F_ALL_TSO;
6308 return features;
6311 static int tg3_set_features(struct net_device *dev, u32 features)
6313 u32 changed = dev->features ^ features;
6315 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6316 tg3_set_loopback(dev, features);
6318 return 0;
6321 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6322 int new_mtu)
6324 dev->mtu = new_mtu;
6326 if (new_mtu > ETH_DATA_LEN) {
6327 if (tg3_flag(tp, 5780_CLASS)) {
6328 netdev_update_features(dev);
6329 tg3_flag_clear(tp, TSO_CAPABLE);
6330 } else {
6331 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6333 } else {
6334 if (tg3_flag(tp, 5780_CLASS)) {
6335 tg3_flag_set(tp, TSO_CAPABLE);
6336 netdev_update_features(dev);
6338 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6342 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6344 struct tg3 *tp = netdev_priv(dev);
6345 int err;
6347 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6348 return -EINVAL;
6350 if (!netif_running(dev)) {
6351 /* We'll just catch it later when the
6352 * device is up'd.
6354 tg3_set_mtu(dev, tp, new_mtu);
6355 return 0;
6358 tg3_phy_stop(tp);
6360 tg3_netif_stop(tp);
6362 tg3_full_lock(tp, 1);
6364 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6366 tg3_set_mtu(dev, tp, new_mtu);
6368 err = tg3_restart_hw(tp, 0);
6370 if (!err)
6371 tg3_netif_start(tp);
6373 tg3_full_unlock(tp);
6375 if (!err)
6376 tg3_phy_start(tp);
6378 return err;
6381 static void tg3_rx_prodring_free(struct tg3 *tp,
6382 struct tg3_rx_prodring_set *tpr)
6384 int i;
6386 if (tpr != &tp->napi[0].prodring) {
6387 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6388 i = (i + 1) & tp->rx_std_ring_mask)
6389 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6390 tp->rx_pkt_map_sz);
6392 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6393 for (i = tpr->rx_jmb_cons_idx;
6394 i != tpr->rx_jmb_prod_idx;
6395 i = (i + 1) & tp->rx_jmb_ring_mask) {
6396 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6397 TG3_RX_JMB_MAP_SZ);
6401 return;
6404 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6405 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6406 tp->rx_pkt_map_sz);
6408 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6409 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6410 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6411 TG3_RX_JMB_MAP_SZ);
6415 /* Initialize rx rings for packet processing.
6417 * The chip has been shut down and the driver detached from
6418 * the networking, so no interrupts or new tx packets will
6419 * end up in the driver. tp->{tx,}lock are held and thus
6420 * we may not sleep.
6422 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6423 struct tg3_rx_prodring_set *tpr)
6425 u32 i, rx_pkt_dma_sz;
6427 tpr->rx_std_cons_idx = 0;
6428 tpr->rx_std_prod_idx = 0;
6429 tpr->rx_jmb_cons_idx = 0;
6430 tpr->rx_jmb_prod_idx = 0;
6432 if (tpr != &tp->napi[0].prodring) {
6433 memset(&tpr->rx_std_buffers[0], 0,
6434 TG3_RX_STD_BUFF_RING_SIZE(tp));
6435 if (tpr->rx_jmb_buffers)
6436 memset(&tpr->rx_jmb_buffers[0], 0,
6437 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6438 goto done;
6441 /* Zero out all descriptors. */
6442 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6444 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6445 if (tg3_flag(tp, 5780_CLASS) &&
6446 tp->dev->mtu > ETH_DATA_LEN)
6447 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6448 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6450 /* Initialize invariants of the rings, we only set this
6451 * stuff once. This works because the card does not
6452 * write into the rx buffer posting rings.
6454 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6455 struct tg3_rx_buffer_desc *rxd;
6457 rxd = &tpr->rx_std[i];
6458 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6459 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6460 rxd->opaque = (RXD_OPAQUE_RING_STD |
6461 (i << RXD_OPAQUE_INDEX_SHIFT));
6464 /* Now allocate fresh SKBs for each rx ring. */
6465 for (i = 0; i < tp->rx_pending; i++) {
6466 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6467 netdev_warn(tp->dev,
6468 "Using a smaller RX standard ring. Only "
6469 "%d out of %d buffers were allocated "
6470 "successfully\n", i, tp->rx_pending);
6471 if (i == 0)
6472 goto initfail;
6473 tp->rx_pending = i;
6474 break;
6478 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6479 goto done;
6481 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6483 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6484 goto done;
6486 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6487 struct tg3_rx_buffer_desc *rxd;
6489 rxd = &tpr->rx_jmb[i].std;
6490 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6491 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6492 RXD_FLAG_JUMBO;
6493 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6494 (i << RXD_OPAQUE_INDEX_SHIFT));
6497 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6498 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6499 netdev_warn(tp->dev,
6500 "Using a smaller RX jumbo ring. Only %d "
6501 "out of %d buffers were allocated "
6502 "successfully\n", i, tp->rx_jumbo_pending);
6503 if (i == 0)
6504 goto initfail;
6505 tp->rx_jumbo_pending = i;
6506 break;
6510 done:
6511 return 0;
6513 initfail:
6514 tg3_rx_prodring_free(tp, tpr);
6515 return -ENOMEM;
6518 static void tg3_rx_prodring_fini(struct tg3 *tp,
6519 struct tg3_rx_prodring_set *tpr)
6521 kfree(tpr->rx_std_buffers);
6522 tpr->rx_std_buffers = NULL;
6523 kfree(tpr->rx_jmb_buffers);
6524 tpr->rx_jmb_buffers = NULL;
6525 if (tpr->rx_std) {
6526 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6527 tpr->rx_std, tpr->rx_std_mapping);
6528 tpr->rx_std = NULL;
6530 if (tpr->rx_jmb) {
6531 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6532 tpr->rx_jmb, tpr->rx_jmb_mapping);
6533 tpr->rx_jmb = NULL;
6537 static int tg3_rx_prodring_init(struct tg3 *tp,
6538 struct tg3_rx_prodring_set *tpr)
6540 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6541 GFP_KERNEL);
6542 if (!tpr->rx_std_buffers)
6543 return -ENOMEM;
6545 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6546 TG3_RX_STD_RING_BYTES(tp),
6547 &tpr->rx_std_mapping,
6548 GFP_KERNEL);
6549 if (!tpr->rx_std)
6550 goto err_out;
6552 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6553 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6554 GFP_KERNEL);
6555 if (!tpr->rx_jmb_buffers)
6556 goto err_out;
6558 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6559 TG3_RX_JMB_RING_BYTES(tp),
6560 &tpr->rx_jmb_mapping,
6561 GFP_KERNEL);
6562 if (!tpr->rx_jmb)
6563 goto err_out;
6566 return 0;
6568 err_out:
6569 tg3_rx_prodring_fini(tp, tpr);
6570 return -ENOMEM;
6573 /* Free up pending packets in all rx/tx rings.
6575 * The chip has been shut down and the driver detached from
6576 * the networking, so no interrupts or new tx packets will
6577 * end up in the driver. tp->{tx,}lock is not held and we are not
6578 * in an interrupt context and thus may sleep.
6580 static void tg3_free_rings(struct tg3 *tp)
6582 int i, j;
6584 for (j = 0; j < tp->irq_cnt; j++) {
6585 struct tg3_napi *tnapi = &tp->napi[j];
6587 tg3_rx_prodring_free(tp, &tnapi->prodring);
6589 if (!tnapi->tx_buffers)
6590 continue;
6592 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6593 struct ring_info *txp;
6594 struct sk_buff *skb;
6595 unsigned int k;
6597 txp = &tnapi->tx_buffers[i];
6598 skb = txp->skb;
6600 if (skb == NULL) {
6601 i++;
6602 continue;
6605 pci_unmap_single(tp->pdev,
6606 dma_unmap_addr(txp, mapping),
6607 skb_headlen(skb),
6608 PCI_DMA_TODEVICE);
6609 txp->skb = NULL;
6611 i++;
6613 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6614 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6615 pci_unmap_page(tp->pdev,
6616 dma_unmap_addr(txp, mapping),
6617 skb_shinfo(skb)->frags[k].size,
6618 PCI_DMA_TODEVICE);
6619 i++;
6622 dev_kfree_skb_any(skb);
6627 /* Initialize tx/rx rings for packet processing.
6629 * The chip has been shut down and the driver detached from
6630 * the networking, so no interrupts or new tx packets will
6631 * end up in the driver. tp->{tx,}lock are held and thus
6632 * we may not sleep.
6634 static int tg3_init_rings(struct tg3 *tp)
6636 int i;
6638 /* Free up all the SKBs. */
6639 tg3_free_rings(tp);
6641 for (i = 0; i < tp->irq_cnt; i++) {
6642 struct tg3_napi *tnapi = &tp->napi[i];
6644 tnapi->last_tag = 0;
6645 tnapi->last_irq_tag = 0;
6646 tnapi->hw_status->status = 0;
6647 tnapi->hw_status->status_tag = 0;
6648 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6650 tnapi->tx_prod = 0;
6651 tnapi->tx_cons = 0;
6652 if (tnapi->tx_ring)
6653 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6655 tnapi->rx_rcb_ptr = 0;
6656 if (tnapi->rx_rcb)
6657 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6659 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6660 tg3_free_rings(tp);
6661 return -ENOMEM;
6665 return 0;
6669 * Must not be invoked with interrupt sources disabled and
6670 * the hardware shutdown down.
6672 static void tg3_free_consistent(struct tg3 *tp)
6674 int i;
6676 for (i = 0; i < tp->irq_cnt; i++) {
6677 struct tg3_napi *tnapi = &tp->napi[i];
6679 if (tnapi->tx_ring) {
6680 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6681 tnapi->tx_ring, tnapi->tx_desc_mapping);
6682 tnapi->tx_ring = NULL;
6685 kfree(tnapi->tx_buffers);
6686 tnapi->tx_buffers = NULL;
6688 if (tnapi->rx_rcb) {
6689 dma_free_coherent(&tp->pdev->dev,
6690 TG3_RX_RCB_RING_BYTES(tp),
6691 tnapi->rx_rcb,
6692 tnapi->rx_rcb_mapping);
6693 tnapi->rx_rcb = NULL;
6696 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6698 if (tnapi->hw_status) {
6699 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6700 tnapi->hw_status,
6701 tnapi->status_mapping);
6702 tnapi->hw_status = NULL;
6706 if (tp->hw_stats) {
6707 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6708 tp->hw_stats, tp->stats_mapping);
6709 tp->hw_stats = NULL;
6714 * Must not be invoked with interrupt sources disabled and
6715 * the hardware shutdown down. Can sleep.
6717 static int tg3_alloc_consistent(struct tg3 *tp)
6719 int i;
6721 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6722 sizeof(struct tg3_hw_stats),
6723 &tp->stats_mapping,
6724 GFP_KERNEL);
6725 if (!tp->hw_stats)
6726 goto err_out;
6728 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6730 for (i = 0; i < tp->irq_cnt; i++) {
6731 struct tg3_napi *tnapi = &tp->napi[i];
6732 struct tg3_hw_status *sblk;
6734 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6735 TG3_HW_STATUS_SIZE,
6736 &tnapi->status_mapping,
6737 GFP_KERNEL);
6738 if (!tnapi->hw_status)
6739 goto err_out;
6741 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6742 sblk = tnapi->hw_status;
6744 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6745 goto err_out;
6747 /* If multivector TSS is enabled, vector 0 does not handle
6748 * tx interrupts. Don't allocate any resources for it.
6750 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6751 (i && tg3_flag(tp, ENABLE_TSS))) {
6752 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6753 TG3_TX_RING_SIZE,
6754 GFP_KERNEL);
6755 if (!tnapi->tx_buffers)
6756 goto err_out;
6758 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6759 TG3_TX_RING_BYTES,
6760 &tnapi->tx_desc_mapping,
6761 GFP_KERNEL);
6762 if (!tnapi->tx_ring)
6763 goto err_out;
6767 * When RSS is enabled, the status block format changes
6768 * slightly. The "rx_jumbo_consumer", "reserved",
6769 * and "rx_mini_consumer" members get mapped to the
6770 * other three rx return ring producer indexes.
6772 switch (i) {
6773 default:
6774 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6775 break;
6776 case 2:
6777 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6778 break;
6779 case 3:
6780 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6781 break;
6782 case 4:
6783 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6784 break;
6788 * If multivector RSS is enabled, vector 0 does not handle
6789 * rx or tx interrupts. Don't allocate any resources for it.
6791 if (!i && tg3_flag(tp, ENABLE_RSS))
6792 continue;
6794 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6795 TG3_RX_RCB_RING_BYTES(tp),
6796 &tnapi->rx_rcb_mapping,
6797 GFP_KERNEL);
6798 if (!tnapi->rx_rcb)
6799 goto err_out;
6801 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6804 return 0;
6806 err_out:
6807 tg3_free_consistent(tp);
6808 return -ENOMEM;
6811 #define MAX_WAIT_CNT 1000
6813 /* To stop a block, clear the enable bit and poll till it
6814 * clears. tp->lock is held.
6816 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6818 unsigned int i;
6819 u32 val;
6821 if (tg3_flag(tp, 5705_PLUS)) {
6822 switch (ofs) {
6823 case RCVLSC_MODE:
6824 case DMAC_MODE:
6825 case MBFREE_MODE:
6826 case BUFMGR_MODE:
6827 case MEMARB_MODE:
6828 /* We can't enable/disable these bits of the
6829 * 5705/5750, just say success.
6831 return 0;
6833 default:
6834 break;
6838 val = tr32(ofs);
6839 val &= ~enable_bit;
6840 tw32_f(ofs, val);
6842 for (i = 0; i < MAX_WAIT_CNT; i++) {
6843 udelay(100);
6844 val = tr32(ofs);
6845 if ((val & enable_bit) == 0)
6846 break;
6849 if (i == MAX_WAIT_CNT && !silent) {
6850 dev_err(&tp->pdev->dev,
6851 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6852 ofs, enable_bit);
6853 return -ENODEV;
6856 return 0;
6859 /* tp->lock is held. */
6860 static int tg3_abort_hw(struct tg3 *tp, int silent)
6862 int i, err;
6864 tg3_disable_ints(tp);
6866 tp->rx_mode &= ~RX_MODE_ENABLE;
6867 tw32_f(MAC_RX_MODE, tp->rx_mode);
6868 udelay(10);
6870 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6871 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6872 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6873 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6874 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6875 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6877 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6878 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6879 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6880 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6881 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6882 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6883 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6885 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6886 tw32_f(MAC_MODE, tp->mac_mode);
6887 udelay(40);
6889 tp->tx_mode &= ~TX_MODE_ENABLE;
6890 tw32_f(MAC_TX_MODE, tp->tx_mode);
6892 for (i = 0; i < MAX_WAIT_CNT; i++) {
6893 udelay(100);
6894 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6895 break;
6897 if (i >= MAX_WAIT_CNT) {
6898 dev_err(&tp->pdev->dev,
6899 "%s timed out, TX_MODE_ENABLE will not clear "
6900 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6901 err |= -ENODEV;
6904 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6905 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6906 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6908 tw32(FTQ_RESET, 0xffffffff);
6909 tw32(FTQ_RESET, 0x00000000);
6911 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6912 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6914 for (i = 0; i < tp->irq_cnt; i++) {
6915 struct tg3_napi *tnapi = &tp->napi[i];
6916 if (tnapi->hw_status)
6917 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6919 if (tp->hw_stats)
6920 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6922 return err;
6925 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6927 int i;
6928 u32 apedata;
6930 /* NCSI does not support APE events */
6931 if (tg3_flag(tp, APE_HAS_NCSI))
6932 return;
6934 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6935 if (apedata != APE_SEG_SIG_MAGIC)
6936 return;
6938 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6939 if (!(apedata & APE_FW_STATUS_READY))
6940 return;
6942 /* Wait for up to 1 millisecond for APE to service previous event. */
6943 for (i = 0; i < 10; i++) {
6944 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6945 return;
6947 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6949 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6950 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6951 event | APE_EVENT_STATUS_EVENT_PENDING);
6953 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6955 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6956 break;
6958 udelay(100);
6961 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6962 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6965 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6967 u32 event;
6968 u32 apedata;
6970 if (!tg3_flag(tp, ENABLE_APE))
6971 return;
6973 switch (kind) {
6974 case RESET_KIND_INIT:
6975 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6976 APE_HOST_SEG_SIG_MAGIC);
6977 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6978 APE_HOST_SEG_LEN_MAGIC);
6979 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6980 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6981 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6982 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6983 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6984 APE_HOST_BEHAV_NO_PHYLOCK);
6985 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6986 TG3_APE_HOST_DRVR_STATE_START);
6988 event = APE_EVENT_STATUS_STATE_START;
6989 break;
6990 case RESET_KIND_SHUTDOWN:
6991 /* With the interface we are currently using,
6992 * APE does not track driver state. Wiping
6993 * out the HOST SEGMENT SIGNATURE forces
6994 * the APE to assume OS absent status.
6996 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6998 if (device_may_wakeup(&tp->pdev->dev) &&
6999 tg3_flag(tp, WOL_ENABLE)) {
7000 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7001 TG3_APE_HOST_WOL_SPEED_AUTO);
7002 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7003 } else
7004 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7006 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7008 event = APE_EVENT_STATUS_STATE_UNLOAD;
7009 break;
7010 case RESET_KIND_SUSPEND:
7011 event = APE_EVENT_STATUS_STATE_SUSPEND;
7012 break;
7013 default:
7014 return;
7017 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7019 tg3_ape_send_event(tp, event);
7022 /* tp->lock is held. */
7023 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7025 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7026 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7028 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7029 switch (kind) {
7030 case RESET_KIND_INIT:
7031 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7032 DRV_STATE_START);
7033 break;
7035 case RESET_KIND_SHUTDOWN:
7036 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7037 DRV_STATE_UNLOAD);
7038 break;
7040 case RESET_KIND_SUSPEND:
7041 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7042 DRV_STATE_SUSPEND);
7043 break;
7045 default:
7046 break;
7050 if (kind == RESET_KIND_INIT ||
7051 kind == RESET_KIND_SUSPEND)
7052 tg3_ape_driver_state_change(tp, kind);
7055 /* tp->lock is held. */
7056 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7058 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7059 switch (kind) {
7060 case RESET_KIND_INIT:
7061 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7062 DRV_STATE_START_DONE);
7063 break;
7065 case RESET_KIND_SHUTDOWN:
7066 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7067 DRV_STATE_UNLOAD_DONE);
7068 break;
7070 default:
7071 break;
7075 if (kind == RESET_KIND_SHUTDOWN)
7076 tg3_ape_driver_state_change(tp, kind);
7079 /* tp->lock is held. */
7080 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7082 if (tg3_flag(tp, ENABLE_ASF)) {
7083 switch (kind) {
7084 case RESET_KIND_INIT:
7085 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7086 DRV_STATE_START);
7087 break;
7089 case RESET_KIND_SHUTDOWN:
7090 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7091 DRV_STATE_UNLOAD);
7092 break;
7094 case RESET_KIND_SUSPEND:
7095 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7096 DRV_STATE_SUSPEND);
7097 break;
7099 default:
7100 break;
7105 static int tg3_poll_fw(struct tg3 *tp)
7107 int i;
7108 u32 val;
7110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7111 /* Wait up to 20ms for init done. */
7112 for (i = 0; i < 200; i++) {
7113 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7114 return 0;
7115 udelay(100);
7117 return -ENODEV;
7120 /* Wait for firmware initialization to complete. */
7121 for (i = 0; i < 100000; i++) {
7122 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7123 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7124 break;
7125 udelay(10);
7128 /* Chip might not be fitted with firmware. Some Sun onboard
7129 * parts are configured like that. So don't signal the timeout
7130 * of the above loop as an error, but do report the lack of
7131 * running firmware once.
7133 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7134 tg3_flag_set(tp, NO_FWARE_REPORTED);
7136 netdev_info(tp->dev, "No firmware running\n");
7139 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7140 /* The 57765 A0 needs a little more
7141 * time to do some important work.
7143 mdelay(10);
7146 return 0;
7149 /* Save PCI command register before chip reset */
7150 static void tg3_save_pci_state(struct tg3 *tp)
7152 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7155 /* Restore PCI state after chip reset */
7156 static void tg3_restore_pci_state(struct tg3 *tp)
7158 u32 val;
7160 /* Re-enable indirect register accesses. */
7161 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7162 tp->misc_host_ctrl);
7164 /* Set MAX PCI retry to zero. */
7165 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7166 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7167 tg3_flag(tp, PCIX_MODE))
7168 val |= PCISTATE_RETRY_SAME_DMA;
7169 /* Allow reads and writes to the APE register and memory space. */
7170 if (tg3_flag(tp, ENABLE_APE))
7171 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7172 PCISTATE_ALLOW_APE_SHMEM_WR |
7173 PCISTATE_ALLOW_APE_PSPACE_WR;
7174 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7176 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7178 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7179 if (tg3_flag(tp, PCI_EXPRESS))
7180 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7181 else {
7182 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7183 tp->pci_cacheline_sz);
7184 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7185 tp->pci_lat_timer);
7189 /* Make sure PCI-X relaxed ordering bit is clear. */
7190 if (tg3_flag(tp, PCIX_MODE)) {
7191 u16 pcix_cmd;
7193 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7194 &pcix_cmd);
7195 pcix_cmd &= ~PCI_X_CMD_ERO;
7196 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7197 pcix_cmd);
7200 if (tg3_flag(tp, 5780_CLASS)) {
7202 /* Chip reset on 5780 will reset MSI enable bit,
7203 * so need to restore it.
7205 if (tg3_flag(tp, USING_MSI)) {
7206 u16 ctrl;
7208 pci_read_config_word(tp->pdev,
7209 tp->msi_cap + PCI_MSI_FLAGS,
7210 &ctrl);
7211 pci_write_config_word(tp->pdev,
7212 tp->msi_cap + PCI_MSI_FLAGS,
7213 ctrl | PCI_MSI_FLAGS_ENABLE);
7214 val = tr32(MSGINT_MODE);
7215 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7220 static void tg3_stop_fw(struct tg3 *);
7222 /* tp->lock is held. */
7223 static int tg3_chip_reset(struct tg3 *tp)
7225 u32 val;
7226 void (*write_op)(struct tg3 *, u32, u32);
7227 int i, err;
7229 tg3_nvram_lock(tp);
7231 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7233 /* No matching tg3_nvram_unlock() after this because
7234 * chip reset below will undo the nvram lock.
7236 tp->nvram_lock_cnt = 0;
7238 /* GRC_MISC_CFG core clock reset will clear the memory
7239 * enable bit in PCI register 4 and the MSI enable bit
7240 * on some chips, so we save relevant registers here.
7242 tg3_save_pci_state(tp);
7244 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7245 tg3_flag(tp, 5755_PLUS))
7246 tw32(GRC_FASTBOOT_PC, 0);
7249 * We must avoid the readl() that normally takes place.
7250 * It locks machines, causes machine checks, and other
7251 * fun things. So, temporarily disable the 5701
7252 * hardware workaround, while we do the reset.
7254 write_op = tp->write32;
7255 if (write_op == tg3_write_flush_reg32)
7256 tp->write32 = tg3_write32;
7258 /* Prevent the irq handler from reading or writing PCI registers
7259 * during chip reset when the memory enable bit in the PCI command
7260 * register may be cleared. The chip does not generate interrupt
7261 * at this time, but the irq handler may still be called due to irq
7262 * sharing or irqpoll.
7264 tg3_flag_set(tp, CHIP_RESETTING);
7265 for (i = 0; i < tp->irq_cnt; i++) {
7266 struct tg3_napi *tnapi = &tp->napi[i];
7267 if (tnapi->hw_status) {
7268 tnapi->hw_status->status = 0;
7269 tnapi->hw_status->status_tag = 0;
7271 tnapi->last_tag = 0;
7272 tnapi->last_irq_tag = 0;
7274 smp_mb();
7276 for (i = 0; i < tp->irq_cnt; i++)
7277 synchronize_irq(tp->napi[i].irq_vec);
7279 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7280 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7281 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7284 /* do the reset */
7285 val = GRC_MISC_CFG_CORECLK_RESET;
7287 if (tg3_flag(tp, PCI_EXPRESS)) {
7288 /* Force PCIe 1.0a mode */
7289 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7290 !tg3_flag(tp, 57765_PLUS) &&
7291 tr32(TG3_PCIE_PHY_TSTCTL) ==
7292 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7293 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7295 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7296 tw32(GRC_MISC_CFG, (1 << 29));
7297 val |= (1 << 29);
7301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7302 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7303 tw32(GRC_VCPU_EXT_CTRL,
7304 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7307 /* Manage gphy power for all CPMU absent PCIe devices. */
7308 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7309 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7311 tw32(GRC_MISC_CFG, val);
7313 /* restore 5701 hardware bug workaround write method */
7314 tp->write32 = write_op;
7316 /* Unfortunately, we have to delay before the PCI read back.
7317 * Some 575X chips even will not respond to a PCI cfg access
7318 * when the reset command is given to the chip.
7320 * How do these hardware designers expect things to work
7321 * properly if the PCI write is posted for a long period
7322 * of time? It is always necessary to have some method by
7323 * which a register read back can occur to push the write
7324 * out which does the reset.
7326 * For most tg3 variants the trick below was working.
7327 * Ho hum...
7329 udelay(120);
7331 /* Flush PCI posted writes. The normal MMIO registers
7332 * are inaccessible at this time so this is the only
7333 * way to make this reliably (actually, this is no longer
7334 * the case, see above). I tried to use indirect
7335 * register read/write but this upset some 5701 variants.
7337 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7339 udelay(120);
7341 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7342 u16 val16;
7344 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7345 int i;
7346 u32 cfg_val;
7348 /* Wait for link training to complete. */
7349 for (i = 0; i < 5000; i++)
7350 udelay(100);
7352 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7353 pci_write_config_dword(tp->pdev, 0xc4,
7354 cfg_val | (1 << 15));
7357 /* Clear the "no snoop" and "relaxed ordering" bits. */
7358 pci_read_config_word(tp->pdev,
7359 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7360 &val16);
7361 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7362 PCI_EXP_DEVCTL_NOSNOOP_EN);
7364 * Older PCIe devices only support the 128 byte
7365 * MPS setting. Enforce the restriction.
7367 if (!tg3_flag(tp, CPMU_PRESENT))
7368 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7369 pci_write_config_word(tp->pdev,
7370 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7371 val16);
7373 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7375 /* Clear error status */
7376 pci_write_config_word(tp->pdev,
7377 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7378 PCI_EXP_DEVSTA_CED |
7379 PCI_EXP_DEVSTA_NFED |
7380 PCI_EXP_DEVSTA_FED |
7381 PCI_EXP_DEVSTA_URD);
7384 tg3_restore_pci_state(tp);
7386 tg3_flag_clear(tp, CHIP_RESETTING);
7387 tg3_flag_clear(tp, ERROR_PROCESSED);
7389 val = 0;
7390 if (tg3_flag(tp, 5780_CLASS))
7391 val = tr32(MEMARB_MODE);
7392 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7394 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7395 tg3_stop_fw(tp);
7396 tw32(0x5000, 0x400);
7399 tw32(GRC_MODE, tp->grc_mode);
7401 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7402 val = tr32(0xc4);
7404 tw32(0xc4, val | (1 << 15));
7407 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7409 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7410 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7411 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7412 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7415 if (tg3_flag(tp, ENABLE_APE))
7416 tp->mac_mode = MAC_MODE_APE_TX_EN |
7417 MAC_MODE_APE_RX_EN |
7418 MAC_MODE_TDE_ENABLE;
7420 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7421 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7422 val = tp->mac_mode;
7423 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7424 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7425 val = tp->mac_mode;
7426 } else
7427 val = 0;
7429 tw32_f(MAC_MODE, val);
7430 udelay(40);
7432 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7434 err = tg3_poll_fw(tp);
7435 if (err)
7436 return err;
7438 tg3_mdio_start(tp);
7440 if (tg3_flag(tp, PCI_EXPRESS) &&
7441 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7442 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7443 !tg3_flag(tp, 57765_PLUS)) {
7444 val = tr32(0x7c00);
7446 tw32(0x7c00, val | (1 << 25));
7449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7450 val = tr32(TG3_CPMU_CLCK_ORIDE);
7451 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7454 /* Reprobe ASF enable state. */
7455 tg3_flag_clear(tp, ENABLE_ASF);
7456 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7457 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7458 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7459 u32 nic_cfg;
7461 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7462 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7463 tg3_flag_set(tp, ENABLE_ASF);
7464 tp->last_event_jiffies = jiffies;
7465 if (tg3_flag(tp, 5750_PLUS))
7466 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7470 return 0;
7473 /* tp->lock is held. */
7474 static void tg3_stop_fw(struct tg3 *tp)
7476 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7477 /* Wait for RX cpu to ACK the previous event. */
7478 tg3_wait_for_event_ack(tp);
7480 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7482 tg3_generate_fw_event(tp);
7484 /* Wait for RX cpu to ACK this event. */
7485 tg3_wait_for_event_ack(tp);
7489 /* tp->lock is held. */
7490 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7492 int err;
7494 tg3_stop_fw(tp);
7496 tg3_write_sig_pre_reset(tp, kind);
7498 tg3_abort_hw(tp, silent);
7499 err = tg3_chip_reset(tp);
7501 __tg3_set_mac_addr(tp, 0);
7503 tg3_write_sig_legacy(tp, kind);
7504 tg3_write_sig_post_reset(tp, kind);
7506 if (err)
7507 return err;
7509 return 0;
7512 #define RX_CPU_SCRATCH_BASE 0x30000
7513 #define RX_CPU_SCRATCH_SIZE 0x04000
7514 #define TX_CPU_SCRATCH_BASE 0x34000
7515 #define TX_CPU_SCRATCH_SIZE 0x04000
7517 /* tp->lock is held. */
7518 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7520 int i;
7522 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7525 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7527 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7528 return 0;
7530 if (offset == RX_CPU_BASE) {
7531 for (i = 0; i < 10000; i++) {
7532 tw32(offset + CPU_STATE, 0xffffffff);
7533 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7534 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7535 break;
7538 tw32(offset + CPU_STATE, 0xffffffff);
7539 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7540 udelay(10);
7541 } else {
7542 for (i = 0; i < 10000; i++) {
7543 tw32(offset + CPU_STATE, 0xffffffff);
7544 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7545 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7546 break;
7550 if (i >= 10000) {
7551 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7552 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7553 return -ENODEV;
7556 /* Clear firmware's nvram arbitration. */
7557 if (tg3_flag(tp, NVRAM))
7558 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7559 return 0;
7562 struct fw_info {
7563 unsigned int fw_base;
7564 unsigned int fw_len;
7565 const __be32 *fw_data;
7568 /* tp->lock is held. */
7569 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7570 int cpu_scratch_size, struct fw_info *info)
7572 int err, lock_err, i;
7573 void (*write_op)(struct tg3 *, u32, u32);
7575 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7576 netdev_err(tp->dev,
7577 "%s: Trying to load TX cpu firmware which is 5705\n",
7578 __func__);
7579 return -EINVAL;
7582 if (tg3_flag(tp, 5705_PLUS))
7583 write_op = tg3_write_mem;
7584 else
7585 write_op = tg3_write_indirect_reg32;
7587 /* It is possible that bootcode is still loading at this point.
7588 * Get the nvram lock first before halting the cpu.
7590 lock_err = tg3_nvram_lock(tp);
7591 err = tg3_halt_cpu(tp, cpu_base);
7592 if (!lock_err)
7593 tg3_nvram_unlock(tp);
7594 if (err)
7595 goto out;
7597 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7598 write_op(tp, cpu_scratch_base + i, 0);
7599 tw32(cpu_base + CPU_STATE, 0xffffffff);
7600 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7601 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7602 write_op(tp, (cpu_scratch_base +
7603 (info->fw_base & 0xffff) +
7604 (i * sizeof(u32))),
7605 be32_to_cpu(info->fw_data[i]));
7607 err = 0;
7609 out:
7610 return err;
7613 /* tp->lock is held. */
7614 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7616 struct fw_info info;
7617 const __be32 *fw_data;
7618 int err, i;
7620 fw_data = (void *)tp->fw->data;
7622 /* Firmware blob starts with version numbers, followed by
7623 start address and length. We are setting complete length.
7624 length = end_address_of_bss - start_address_of_text.
7625 Remainder is the blob to be loaded contiguously
7626 from start address. */
7628 info.fw_base = be32_to_cpu(fw_data[1]);
7629 info.fw_len = tp->fw->size - 12;
7630 info.fw_data = &fw_data[3];
7632 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7633 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7634 &info);
7635 if (err)
7636 return err;
7638 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7639 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7640 &info);
7641 if (err)
7642 return err;
7644 /* Now startup only the RX cpu. */
7645 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7646 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7648 for (i = 0; i < 5; i++) {
7649 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7650 break;
7651 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7652 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7653 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7654 udelay(1000);
7656 if (i >= 5) {
7657 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7658 "should be %08x\n", __func__,
7659 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7660 return -ENODEV;
7662 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7663 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7665 return 0;
7668 /* tp->lock is held. */
7669 static int tg3_load_tso_firmware(struct tg3 *tp)
7671 struct fw_info info;
7672 const __be32 *fw_data;
7673 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7674 int err, i;
7676 if (tg3_flag(tp, HW_TSO_1) ||
7677 tg3_flag(tp, HW_TSO_2) ||
7678 tg3_flag(tp, HW_TSO_3))
7679 return 0;
7681 fw_data = (void *)tp->fw->data;
7683 /* Firmware blob starts with version numbers, followed by
7684 start address and length. We are setting complete length.
7685 length = end_address_of_bss - start_address_of_text.
7686 Remainder is the blob to be loaded contiguously
7687 from start address. */
7689 info.fw_base = be32_to_cpu(fw_data[1]);
7690 cpu_scratch_size = tp->fw_len;
7691 info.fw_len = tp->fw->size - 12;
7692 info.fw_data = &fw_data[3];
7694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7695 cpu_base = RX_CPU_BASE;
7696 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7697 } else {
7698 cpu_base = TX_CPU_BASE;
7699 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7700 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7703 err = tg3_load_firmware_cpu(tp, cpu_base,
7704 cpu_scratch_base, cpu_scratch_size,
7705 &info);
7706 if (err)
7707 return err;
7709 /* Now startup the cpu. */
7710 tw32(cpu_base + CPU_STATE, 0xffffffff);
7711 tw32_f(cpu_base + CPU_PC, info.fw_base);
7713 for (i = 0; i < 5; i++) {
7714 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7715 break;
7716 tw32(cpu_base + CPU_STATE, 0xffffffff);
7717 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7718 tw32_f(cpu_base + CPU_PC, info.fw_base);
7719 udelay(1000);
7721 if (i >= 5) {
7722 netdev_err(tp->dev,
7723 "%s fails to set CPU PC, is %08x should be %08x\n",
7724 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7725 return -ENODEV;
7727 tw32(cpu_base + CPU_STATE, 0xffffffff);
7728 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7729 return 0;
7733 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7735 struct tg3 *tp = netdev_priv(dev);
7736 struct sockaddr *addr = p;
7737 int err = 0, skip_mac_1 = 0;
7739 if (!is_valid_ether_addr(addr->sa_data))
7740 return -EINVAL;
7742 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7744 if (!netif_running(dev))
7745 return 0;
7747 if (tg3_flag(tp, ENABLE_ASF)) {
7748 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7750 addr0_high = tr32(MAC_ADDR_0_HIGH);
7751 addr0_low = tr32(MAC_ADDR_0_LOW);
7752 addr1_high = tr32(MAC_ADDR_1_HIGH);
7753 addr1_low = tr32(MAC_ADDR_1_LOW);
7755 /* Skip MAC addr 1 if ASF is using it. */
7756 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7757 !(addr1_high == 0 && addr1_low == 0))
7758 skip_mac_1 = 1;
7760 spin_lock_bh(&tp->lock);
7761 __tg3_set_mac_addr(tp, skip_mac_1);
7762 spin_unlock_bh(&tp->lock);
7764 return err;
7767 /* tp->lock is held. */
7768 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7769 dma_addr_t mapping, u32 maxlen_flags,
7770 u32 nic_addr)
7772 tg3_write_mem(tp,
7773 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7774 ((u64) mapping >> 32));
7775 tg3_write_mem(tp,
7776 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7777 ((u64) mapping & 0xffffffff));
7778 tg3_write_mem(tp,
7779 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7780 maxlen_flags);
7782 if (!tg3_flag(tp, 5705_PLUS))
7783 tg3_write_mem(tp,
7784 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7785 nic_addr);
7788 static void __tg3_set_rx_mode(struct net_device *);
7789 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7791 int i;
7793 if (!tg3_flag(tp, ENABLE_TSS)) {
7794 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7795 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7796 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7797 } else {
7798 tw32(HOSTCC_TXCOL_TICKS, 0);
7799 tw32(HOSTCC_TXMAX_FRAMES, 0);
7800 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7803 if (!tg3_flag(tp, ENABLE_RSS)) {
7804 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7805 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7806 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7807 } else {
7808 tw32(HOSTCC_RXCOL_TICKS, 0);
7809 tw32(HOSTCC_RXMAX_FRAMES, 0);
7810 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7813 if (!tg3_flag(tp, 5705_PLUS)) {
7814 u32 val = ec->stats_block_coalesce_usecs;
7816 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7817 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7819 if (!netif_carrier_ok(tp->dev))
7820 val = 0;
7822 tw32(HOSTCC_STAT_COAL_TICKS, val);
7825 for (i = 0; i < tp->irq_cnt - 1; i++) {
7826 u32 reg;
7828 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7829 tw32(reg, ec->rx_coalesce_usecs);
7830 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7831 tw32(reg, ec->rx_max_coalesced_frames);
7832 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7833 tw32(reg, ec->rx_max_coalesced_frames_irq);
7835 if (tg3_flag(tp, ENABLE_TSS)) {
7836 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7837 tw32(reg, ec->tx_coalesce_usecs);
7838 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7839 tw32(reg, ec->tx_max_coalesced_frames);
7840 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7841 tw32(reg, ec->tx_max_coalesced_frames_irq);
7845 for (; i < tp->irq_max - 1; i++) {
7846 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7847 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7848 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7850 if (tg3_flag(tp, ENABLE_TSS)) {
7851 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7852 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7853 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7858 /* tp->lock is held. */
7859 static void tg3_rings_reset(struct tg3 *tp)
7861 int i;
7862 u32 stblk, txrcb, rxrcb, limit;
7863 struct tg3_napi *tnapi = &tp->napi[0];
7865 /* Disable all transmit rings but the first. */
7866 if (!tg3_flag(tp, 5705_PLUS))
7867 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7868 else if (tg3_flag(tp, 5717_PLUS))
7869 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7870 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7871 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7872 else
7873 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7875 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7876 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7877 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7878 BDINFO_FLAGS_DISABLED);
7881 /* Disable all receive return rings but the first. */
7882 if (tg3_flag(tp, 5717_PLUS))
7883 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7884 else if (!tg3_flag(tp, 5705_PLUS))
7885 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7886 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7887 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7888 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7889 else
7890 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7892 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7893 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7894 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7895 BDINFO_FLAGS_DISABLED);
7897 /* Disable interrupts */
7898 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7899 tp->napi[0].chk_msi_cnt = 0;
7900 tp->napi[0].last_rx_cons = 0;
7901 tp->napi[0].last_tx_cons = 0;
7903 /* Zero mailbox registers. */
7904 if (tg3_flag(tp, SUPPORT_MSIX)) {
7905 for (i = 1; i < tp->irq_max; i++) {
7906 tp->napi[i].tx_prod = 0;
7907 tp->napi[i].tx_cons = 0;
7908 if (tg3_flag(tp, ENABLE_TSS))
7909 tw32_mailbox(tp->napi[i].prodmbox, 0);
7910 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7911 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7912 tp->napi[0].chk_msi_cnt = 0;
7913 tp->napi[i].last_rx_cons = 0;
7914 tp->napi[i].last_tx_cons = 0;
7916 if (!tg3_flag(tp, ENABLE_TSS))
7917 tw32_mailbox(tp->napi[0].prodmbox, 0);
7918 } else {
7919 tp->napi[0].tx_prod = 0;
7920 tp->napi[0].tx_cons = 0;
7921 tw32_mailbox(tp->napi[0].prodmbox, 0);
7922 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7925 /* Make sure the NIC-based send BD rings are disabled. */
7926 if (!tg3_flag(tp, 5705_PLUS)) {
7927 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7928 for (i = 0; i < 16; i++)
7929 tw32_tx_mbox(mbox + i * 8, 0);
7932 txrcb = NIC_SRAM_SEND_RCB;
7933 rxrcb = NIC_SRAM_RCV_RET_RCB;
7935 /* Clear status block in ram. */
7936 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7938 /* Set status block DMA address */
7939 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7940 ((u64) tnapi->status_mapping >> 32));
7941 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7942 ((u64) tnapi->status_mapping & 0xffffffff));
7944 if (tnapi->tx_ring) {
7945 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7946 (TG3_TX_RING_SIZE <<
7947 BDINFO_FLAGS_MAXLEN_SHIFT),
7948 NIC_SRAM_TX_BUFFER_DESC);
7949 txrcb += TG3_BDINFO_SIZE;
7952 if (tnapi->rx_rcb) {
7953 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7954 (tp->rx_ret_ring_mask + 1) <<
7955 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7956 rxrcb += TG3_BDINFO_SIZE;
7959 stblk = HOSTCC_STATBLCK_RING1;
7961 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7962 u64 mapping = (u64)tnapi->status_mapping;
7963 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7964 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7966 /* Clear status block in ram. */
7967 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7969 if (tnapi->tx_ring) {
7970 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7971 (TG3_TX_RING_SIZE <<
7972 BDINFO_FLAGS_MAXLEN_SHIFT),
7973 NIC_SRAM_TX_BUFFER_DESC);
7974 txrcb += TG3_BDINFO_SIZE;
7977 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7978 ((tp->rx_ret_ring_mask + 1) <<
7979 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7981 stblk += 8;
7982 rxrcb += TG3_BDINFO_SIZE;
7986 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7988 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7990 if (!tg3_flag(tp, 5750_PLUS) ||
7991 tg3_flag(tp, 5780_CLASS) ||
7992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7993 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7994 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7995 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7996 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7997 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7998 else
7999 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8001 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8002 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8004 val = min(nic_rep_thresh, host_rep_thresh);
8005 tw32(RCVBDI_STD_THRESH, val);
8007 if (tg3_flag(tp, 57765_PLUS))
8008 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8010 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8011 return;
8013 if (!tg3_flag(tp, 5705_PLUS))
8014 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8015 else
8016 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8018 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8020 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8021 tw32(RCVBDI_JUMBO_THRESH, val);
8023 if (tg3_flag(tp, 57765_PLUS))
8024 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8027 /* tp->lock is held. */
8028 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8030 u32 val, rdmac_mode;
8031 int i, err, limit;
8032 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8034 tg3_disable_ints(tp);
8036 tg3_stop_fw(tp);
8038 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8040 if (tg3_flag(tp, INIT_COMPLETE))
8041 tg3_abort_hw(tp, 1);
8043 /* Enable MAC control of LPI */
8044 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8045 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8046 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8047 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8049 tw32_f(TG3_CPMU_EEE_CTRL,
8050 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8052 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8053 TG3_CPMU_EEEMD_LPI_IN_TX |
8054 TG3_CPMU_EEEMD_LPI_IN_RX |
8055 TG3_CPMU_EEEMD_EEE_ENABLE;
8057 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8058 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8060 if (tg3_flag(tp, ENABLE_APE))
8061 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8063 tw32_f(TG3_CPMU_EEE_MODE, val);
8065 tw32_f(TG3_CPMU_EEE_DBTMR1,
8066 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8067 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8069 tw32_f(TG3_CPMU_EEE_DBTMR2,
8070 TG3_CPMU_DBTMR2_APE_TX_2047US |
8071 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8074 if (reset_phy)
8075 tg3_phy_reset(tp);
8077 err = tg3_chip_reset(tp);
8078 if (err)
8079 return err;
8081 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8083 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8084 val = tr32(TG3_CPMU_CTRL);
8085 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8086 tw32(TG3_CPMU_CTRL, val);
8088 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8089 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8090 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8091 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8093 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8094 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8095 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8096 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8098 val = tr32(TG3_CPMU_HST_ACC);
8099 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8100 val |= CPMU_HST_ACC_MACCLK_6_25;
8101 tw32(TG3_CPMU_HST_ACC, val);
8104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8105 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8106 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8107 PCIE_PWR_MGMT_L1_THRESH_4MS;
8108 tw32(PCIE_PWR_MGMT_THRESH, val);
8110 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8111 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8113 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8115 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8116 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8119 if (tg3_flag(tp, L1PLLPD_EN)) {
8120 u32 grc_mode = tr32(GRC_MODE);
8122 /* Access the lower 1K of PL PCIE block registers. */
8123 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8124 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8126 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8127 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8128 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8130 tw32(GRC_MODE, grc_mode);
8133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8134 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8135 u32 grc_mode = tr32(GRC_MODE);
8137 /* Access the lower 1K of PL PCIE block registers. */
8138 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8139 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8141 val = tr32(TG3_PCIE_TLDLPL_PORT +
8142 TG3_PCIE_PL_LO_PHYCTL5);
8143 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8144 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8146 tw32(GRC_MODE, grc_mode);
8149 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8150 u32 grc_mode = tr32(GRC_MODE);
8152 /* Access the lower 1K of DL PCIE block registers. */
8153 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8154 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8156 val = tr32(TG3_PCIE_TLDLPL_PORT +
8157 TG3_PCIE_DL_LO_FTSMAX);
8158 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8159 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8160 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8162 tw32(GRC_MODE, grc_mode);
8165 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8166 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8167 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8168 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8171 /* This works around an issue with Athlon chipsets on
8172 * B3 tigon3 silicon. This bit has no effect on any
8173 * other revision. But do not set this on PCI Express
8174 * chips and don't even touch the clocks if the CPMU is present.
8176 if (!tg3_flag(tp, CPMU_PRESENT)) {
8177 if (!tg3_flag(tp, PCI_EXPRESS))
8178 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8179 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8182 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8183 tg3_flag(tp, PCIX_MODE)) {
8184 val = tr32(TG3PCI_PCISTATE);
8185 val |= PCISTATE_RETRY_SAME_DMA;
8186 tw32(TG3PCI_PCISTATE, val);
8189 if (tg3_flag(tp, ENABLE_APE)) {
8190 /* Allow reads and writes to the
8191 * APE register and memory space.
8193 val = tr32(TG3PCI_PCISTATE);
8194 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8195 PCISTATE_ALLOW_APE_SHMEM_WR |
8196 PCISTATE_ALLOW_APE_PSPACE_WR;
8197 tw32(TG3PCI_PCISTATE, val);
8200 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8201 /* Enable some hw fixes. */
8202 val = tr32(TG3PCI_MSI_DATA);
8203 val |= (1 << 26) | (1 << 28) | (1 << 29);
8204 tw32(TG3PCI_MSI_DATA, val);
8207 /* Descriptor ring init may make accesses to the
8208 * NIC SRAM area to setup the TX descriptors, so we
8209 * can only do this after the hardware has been
8210 * successfully reset.
8212 err = tg3_init_rings(tp);
8213 if (err)
8214 return err;
8216 if (tg3_flag(tp, 57765_PLUS)) {
8217 val = tr32(TG3PCI_DMA_RW_CTRL) &
8218 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8219 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8220 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8221 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8222 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8223 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8224 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8225 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8226 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8227 /* This value is determined during the probe time DMA
8228 * engine test, tg3_test_dma.
8230 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8233 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8234 GRC_MODE_4X_NIC_SEND_RINGS |
8235 GRC_MODE_NO_TX_PHDR_CSUM |
8236 GRC_MODE_NO_RX_PHDR_CSUM);
8237 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8239 /* Pseudo-header checksum is done by hardware logic and not
8240 * the offload processers, so make the chip do the pseudo-
8241 * header checksums on receive. For transmit it is more
8242 * convenient to do the pseudo-header checksum in software
8243 * as Linux does that on transmit for us in all cases.
8245 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8247 tw32(GRC_MODE,
8248 tp->grc_mode |
8249 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8251 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8252 val = tr32(GRC_MISC_CFG);
8253 val &= ~0xff;
8254 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8255 tw32(GRC_MISC_CFG, val);
8257 /* Initialize MBUF/DESC pool. */
8258 if (tg3_flag(tp, 5750_PLUS)) {
8259 /* Do nothing. */
8260 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8261 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8262 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8263 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8264 else
8265 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8266 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8267 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8268 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8269 int fw_len;
8271 fw_len = tp->fw_len;
8272 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8273 tw32(BUFMGR_MB_POOL_ADDR,
8274 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8275 tw32(BUFMGR_MB_POOL_SIZE,
8276 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8279 if (tp->dev->mtu <= ETH_DATA_LEN) {
8280 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8281 tp->bufmgr_config.mbuf_read_dma_low_water);
8282 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8283 tp->bufmgr_config.mbuf_mac_rx_low_water);
8284 tw32(BUFMGR_MB_HIGH_WATER,
8285 tp->bufmgr_config.mbuf_high_water);
8286 } else {
8287 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8288 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8289 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8290 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8291 tw32(BUFMGR_MB_HIGH_WATER,
8292 tp->bufmgr_config.mbuf_high_water_jumbo);
8294 tw32(BUFMGR_DMA_LOW_WATER,
8295 tp->bufmgr_config.dma_low_water);
8296 tw32(BUFMGR_DMA_HIGH_WATER,
8297 tp->bufmgr_config.dma_high_water);
8299 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8300 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8301 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8302 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8303 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8304 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8305 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8306 tw32(BUFMGR_MODE, val);
8307 for (i = 0; i < 2000; i++) {
8308 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8309 break;
8310 udelay(10);
8312 if (i >= 2000) {
8313 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8314 return -ENODEV;
8317 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8318 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8320 tg3_setup_rxbd_thresholds(tp);
8322 /* Initialize TG3_BDINFO's at:
8323 * RCVDBDI_STD_BD: standard eth size rx ring
8324 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8325 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8327 * like so:
8328 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8329 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8330 * ring attribute flags
8331 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8333 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8334 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8336 * The size of each ring is fixed in the firmware, but the location is
8337 * configurable.
8339 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8340 ((u64) tpr->rx_std_mapping >> 32));
8341 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8342 ((u64) tpr->rx_std_mapping & 0xffffffff));
8343 if (!tg3_flag(tp, 5717_PLUS))
8344 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8345 NIC_SRAM_RX_BUFFER_DESC);
8347 /* Disable the mini ring */
8348 if (!tg3_flag(tp, 5705_PLUS))
8349 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8350 BDINFO_FLAGS_DISABLED);
8352 /* Program the jumbo buffer descriptor ring control
8353 * blocks on those devices that have them.
8355 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8356 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8358 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8359 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8360 ((u64) tpr->rx_jmb_mapping >> 32));
8361 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8362 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8363 val = TG3_RX_JMB_RING_SIZE(tp) <<
8364 BDINFO_FLAGS_MAXLEN_SHIFT;
8365 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8366 val | BDINFO_FLAGS_USE_EXT_RECV);
8367 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8368 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8369 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8370 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8371 } else {
8372 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8373 BDINFO_FLAGS_DISABLED);
8376 if (tg3_flag(tp, 57765_PLUS)) {
8377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8378 val = TG3_RX_STD_MAX_SIZE_5700;
8379 else
8380 val = TG3_RX_STD_MAX_SIZE_5717;
8381 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8382 val |= (TG3_RX_STD_DMA_SZ << 2);
8383 } else
8384 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8385 } else
8386 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8388 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8390 tpr->rx_std_prod_idx = tp->rx_pending;
8391 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8393 tpr->rx_jmb_prod_idx =
8394 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8395 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8397 tg3_rings_reset(tp);
8399 /* Initialize MAC address and backoff seed. */
8400 __tg3_set_mac_addr(tp, 0);
8402 /* MTU + ethernet header + FCS + optional VLAN tag */
8403 tw32(MAC_RX_MTU_SIZE,
8404 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8406 /* The slot time is changed by tg3_setup_phy if we
8407 * run at gigabit with half duplex.
8409 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8410 (6 << TX_LENGTHS_IPG_SHIFT) |
8411 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8414 val |= tr32(MAC_TX_LENGTHS) &
8415 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8416 TX_LENGTHS_CNT_DWN_VAL_MSK);
8418 tw32(MAC_TX_LENGTHS, val);
8420 /* Receive rules. */
8421 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8422 tw32(RCVLPC_CONFIG, 0x0181);
8424 /* Calculate RDMAC_MODE setting early, we need it to determine
8425 * the RCVLPC_STATE_ENABLE mask.
8427 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8428 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8429 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8430 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8431 RDMAC_MODE_LNGREAD_ENAB);
8433 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8434 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8437 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8438 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8439 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8440 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8441 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8444 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8445 if (tg3_flag(tp, TSO_CAPABLE) &&
8446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8447 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8448 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8449 !tg3_flag(tp, IS_5788)) {
8450 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8454 if (tg3_flag(tp, PCI_EXPRESS))
8455 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8457 if (tg3_flag(tp, HW_TSO_1) ||
8458 tg3_flag(tp, HW_TSO_2) ||
8459 tg3_flag(tp, HW_TSO_3))
8460 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8462 if (tg3_flag(tp, 57765_PLUS) ||
8463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8464 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8465 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8468 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8474 tg3_flag(tp, 57765_PLUS)) {
8475 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8478 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8479 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8480 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8481 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8482 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8483 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8485 tw32(TG3_RDMA_RSRVCTRL_REG,
8486 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8491 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8492 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8493 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8494 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8497 /* Receive/send statistics. */
8498 if (tg3_flag(tp, 5750_PLUS)) {
8499 val = tr32(RCVLPC_STATS_ENABLE);
8500 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8501 tw32(RCVLPC_STATS_ENABLE, val);
8502 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8503 tg3_flag(tp, TSO_CAPABLE)) {
8504 val = tr32(RCVLPC_STATS_ENABLE);
8505 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8506 tw32(RCVLPC_STATS_ENABLE, val);
8507 } else {
8508 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8510 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8511 tw32(SNDDATAI_STATSENAB, 0xffffff);
8512 tw32(SNDDATAI_STATSCTRL,
8513 (SNDDATAI_SCTRL_ENABLE |
8514 SNDDATAI_SCTRL_FASTUPD));
8516 /* Setup host coalescing engine. */
8517 tw32(HOSTCC_MODE, 0);
8518 for (i = 0; i < 2000; i++) {
8519 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8520 break;
8521 udelay(10);
8524 __tg3_set_coalesce(tp, &tp->coal);
8526 if (!tg3_flag(tp, 5705_PLUS)) {
8527 /* Status/statistics block address. See tg3_timer,
8528 * the tg3_periodic_fetch_stats call there, and
8529 * tg3_get_stats to see how this works for 5705/5750 chips.
8531 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8532 ((u64) tp->stats_mapping >> 32));
8533 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8534 ((u64) tp->stats_mapping & 0xffffffff));
8535 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8537 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8539 /* Clear statistics and status block memory areas */
8540 for (i = NIC_SRAM_STATS_BLK;
8541 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8542 i += sizeof(u32)) {
8543 tg3_write_mem(tp, i, 0);
8544 udelay(40);
8548 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8550 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8551 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8552 if (!tg3_flag(tp, 5705_PLUS))
8553 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8555 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8556 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8557 /* reset to prevent losing 1st rx packet intermittently */
8558 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8559 udelay(10);
8562 if (tg3_flag(tp, ENABLE_APE))
8563 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8564 else
8565 tp->mac_mode = 0;
8566 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8567 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8568 if (!tg3_flag(tp, 5705_PLUS) &&
8569 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8570 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8571 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8572 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8573 udelay(40);
8575 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8576 * If TG3_FLAG_IS_NIC is zero, we should read the
8577 * register to preserve the GPIO settings for LOMs. The GPIOs,
8578 * whether used as inputs or outputs, are set by boot code after
8579 * reset.
8581 if (!tg3_flag(tp, IS_NIC)) {
8582 u32 gpio_mask;
8584 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8585 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8586 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8588 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8589 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8590 GRC_LCLCTRL_GPIO_OUTPUT3;
8592 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8593 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8595 tp->grc_local_ctrl &= ~gpio_mask;
8596 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8598 /* GPIO1 must be driven high for eeprom write protect */
8599 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8600 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8601 GRC_LCLCTRL_GPIO_OUTPUT1);
8603 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8604 udelay(100);
8606 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8607 val = tr32(MSGINT_MODE);
8608 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8609 tw32(MSGINT_MODE, val);
8612 if (!tg3_flag(tp, 5705_PLUS)) {
8613 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8614 udelay(40);
8617 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8618 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8619 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8620 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8621 WDMAC_MODE_LNGREAD_ENAB);
8623 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8624 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8625 if (tg3_flag(tp, TSO_CAPABLE) &&
8626 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8627 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8628 /* nothing */
8629 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8630 !tg3_flag(tp, IS_5788)) {
8631 val |= WDMAC_MODE_RX_ACCEL;
8635 /* Enable host coalescing bug fix */
8636 if (tg3_flag(tp, 5755_PLUS))
8637 val |= WDMAC_MODE_STATUS_TAG_FIX;
8639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8640 val |= WDMAC_MODE_BURST_ALL_DATA;
8642 tw32_f(WDMAC_MODE, val);
8643 udelay(40);
8645 if (tg3_flag(tp, PCIX_MODE)) {
8646 u16 pcix_cmd;
8648 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8649 &pcix_cmd);
8650 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8651 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8652 pcix_cmd |= PCI_X_CMD_READ_2K;
8653 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8654 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8655 pcix_cmd |= PCI_X_CMD_READ_2K;
8657 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8658 pcix_cmd);
8661 tw32_f(RDMAC_MODE, rdmac_mode);
8662 udelay(40);
8664 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8665 if (!tg3_flag(tp, 5705_PLUS))
8666 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8669 tw32(SNDDATAC_MODE,
8670 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8671 else
8672 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8674 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8675 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8676 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8677 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8678 val |= RCVDBDI_MODE_LRG_RING_SZ;
8679 tw32(RCVDBDI_MODE, val);
8680 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8681 if (tg3_flag(tp, HW_TSO_1) ||
8682 tg3_flag(tp, HW_TSO_2) ||
8683 tg3_flag(tp, HW_TSO_3))
8684 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8685 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8686 if (tg3_flag(tp, ENABLE_TSS))
8687 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8688 tw32(SNDBDI_MODE, val);
8689 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8691 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8692 err = tg3_load_5701_a0_firmware_fix(tp);
8693 if (err)
8694 return err;
8697 if (tg3_flag(tp, TSO_CAPABLE)) {
8698 err = tg3_load_tso_firmware(tp);
8699 if (err)
8700 return err;
8703 tp->tx_mode = TX_MODE_ENABLE;
8705 if (tg3_flag(tp, 5755_PLUS) ||
8706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8707 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8710 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8711 tp->tx_mode &= ~val;
8712 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8715 tw32_f(MAC_TX_MODE, tp->tx_mode);
8716 udelay(100);
8718 if (tg3_flag(tp, ENABLE_RSS)) {
8719 u32 reg = MAC_RSS_INDIR_TBL_0;
8720 u8 *ent = (u8 *)&val;
8722 /* Setup the indirection table */
8723 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8724 int idx = i % sizeof(val);
8726 ent[idx] = i % (tp->irq_cnt - 1);
8727 if (idx == sizeof(val) - 1) {
8728 tw32(reg, val);
8729 reg += 4;
8733 /* Setup the "secret" hash key. */
8734 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8735 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8736 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8737 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8738 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8739 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8740 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8741 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8742 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8743 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8746 tp->rx_mode = RX_MODE_ENABLE;
8747 if (tg3_flag(tp, 5755_PLUS))
8748 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8750 if (tg3_flag(tp, ENABLE_RSS))
8751 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8752 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8753 RX_MODE_RSS_IPV6_HASH_EN |
8754 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8755 RX_MODE_RSS_IPV4_HASH_EN |
8756 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8758 tw32_f(MAC_RX_MODE, tp->rx_mode);
8759 udelay(10);
8761 tw32(MAC_LED_CTRL, tp->led_ctrl);
8763 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8764 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8765 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8766 udelay(10);
8768 tw32_f(MAC_RX_MODE, tp->rx_mode);
8769 udelay(10);
8771 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8772 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8773 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8774 /* Set drive transmission level to 1.2V */
8775 /* only if the signal pre-emphasis bit is not set */
8776 val = tr32(MAC_SERDES_CFG);
8777 val &= 0xfffff000;
8778 val |= 0x880;
8779 tw32(MAC_SERDES_CFG, val);
8781 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8782 tw32(MAC_SERDES_CFG, 0x616000);
8785 /* Prevent chip from dropping frames when flow control
8786 * is enabled.
8788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8789 val = 1;
8790 else
8791 val = 2;
8792 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8795 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8796 /* Use hardware link auto-negotiation */
8797 tg3_flag_set(tp, HW_AUTONEG);
8800 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8801 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8802 u32 tmp;
8804 tmp = tr32(SERDES_RX_CTRL);
8805 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8806 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8807 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8808 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8811 if (!tg3_flag(tp, USE_PHYLIB)) {
8812 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8813 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8814 tp->link_config.speed = tp->link_config.orig_speed;
8815 tp->link_config.duplex = tp->link_config.orig_duplex;
8816 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8819 err = tg3_setup_phy(tp, 0);
8820 if (err)
8821 return err;
8823 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8824 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8825 u32 tmp;
8827 /* Clear CRC stats. */
8828 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8829 tg3_writephy(tp, MII_TG3_TEST1,
8830 tmp | MII_TG3_TEST1_CRC_EN);
8831 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8836 __tg3_set_rx_mode(tp->dev);
8838 /* Initialize receive rules. */
8839 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8840 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8841 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8842 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8844 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8845 limit = 8;
8846 else
8847 limit = 16;
8848 if (tg3_flag(tp, ENABLE_ASF))
8849 limit -= 4;
8850 switch (limit) {
8851 case 16:
8852 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8853 case 15:
8854 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8855 case 14:
8856 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8857 case 13:
8858 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8859 case 12:
8860 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8861 case 11:
8862 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8863 case 10:
8864 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8865 case 9:
8866 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8867 case 8:
8868 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8869 case 7:
8870 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8871 case 6:
8872 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8873 case 5:
8874 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8875 case 4:
8876 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8877 case 3:
8878 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8879 case 2:
8880 case 1:
8882 default:
8883 break;
8886 if (tg3_flag(tp, ENABLE_APE))
8887 /* Write our heartbeat update interval to APE. */
8888 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8889 APE_HOST_HEARTBEAT_INT_DISABLE);
8891 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8893 return 0;
8896 /* Called at device open time to get the chip ready for
8897 * packet processing. Invoked with tp->lock held.
8899 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8901 tg3_switch_clocks(tp);
8903 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8905 return tg3_reset_hw(tp, reset_phy);
8908 #define TG3_STAT_ADD32(PSTAT, REG) \
8909 do { u32 __val = tr32(REG); \
8910 (PSTAT)->low += __val; \
8911 if ((PSTAT)->low < __val) \
8912 (PSTAT)->high += 1; \
8913 } while (0)
8915 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8917 struct tg3_hw_stats *sp = tp->hw_stats;
8919 if (!netif_carrier_ok(tp->dev))
8920 return;
8922 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8923 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8924 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8925 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8926 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8927 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8928 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8929 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8930 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8931 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8932 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8933 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8934 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8936 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8937 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8938 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8939 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8940 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8941 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8942 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8943 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8944 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8945 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8946 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8947 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8948 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8949 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8951 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8952 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8953 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8954 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8955 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8956 } else {
8957 u32 val = tr32(HOSTCC_FLOW_ATTN);
8958 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8959 if (val) {
8960 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8961 sp->rx_discards.low += val;
8962 if (sp->rx_discards.low < val)
8963 sp->rx_discards.high += 1;
8965 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8967 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8970 static void tg3_chk_missed_msi(struct tg3 *tp)
8972 u32 i;
8974 for (i = 0; i < tp->irq_cnt; i++) {
8975 struct tg3_napi *tnapi = &tp->napi[i];
8977 if (tg3_has_work(tnapi)) {
8978 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8979 tnapi->last_tx_cons == tnapi->tx_cons) {
8980 if (tnapi->chk_msi_cnt < 1) {
8981 tnapi->chk_msi_cnt++;
8982 return;
8984 tw32_mailbox(tnapi->int_mbox,
8985 tnapi->last_tag << 24);
8988 tnapi->chk_msi_cnt = 0;
8989 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8990 tnapi->last_tx_cons = tnapi->tx_cons;
8994 static void tg3_timer(unsigned long __opaque)
8996 struct tg3 *tp = (struct tg3 *) __opaque;
8998 if (tp->irq_sync)
8999 goto restart_timer;
9001 spin_lock(&tp->lock);
9003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9004 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9005 tg3_chk_missed_msi(tp);
9007 if (!tg3_flag(tp, TAGGED_STATUS)) {
9008 /* All of this garbage is because when using non-tagged
9009 * IRQ status the mailbox/status_block protocol the chip
9010 * uses with the cpu is race prone.
9012 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9013 tw32(GRC_LOCAL_CTRL,
9014 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9015 } else {
9016 tw32(HOSTCC_MODE, tp->coalesce_mode |
9017 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9020 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9021 tg3_flag_set(tp, RESTART_TIMER);
9022 spin_unlock(&tp->lock);
9023 schedule_work(&tp->reset_task);
9024 return;
9028 /* This part only runs once per second. */
9029 if (!--tp->timer_counter) {
9030 if (tg3_flag(tp, 5705_PLUS))
9031 tg3_periodic_fetch_stats(tp);
9033 if (tp->setlpicnt && !--tp->setlpicnt)
9034 tg3_phy_eee_enable(tp);
9036 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9037 u32 mac_stat;
9038 int phy_event;
9040 mac_stat = tr32(MAC_STATUS);
9042 phy_event = 0;
9043 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9044 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9045 phy_event = 1;
9046 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9047 phy_event = 1;
9049 if (phy_event)
9050 tg3_setup_phy(tp, 0);
9051 } else if (tg3_flag(tp, POLL_SERDES)) {
9052 u32 mac_stat = tr32(MAC_STATUS);
9053 int need_setup = 0;
9055 if (netif_carrier_ok(tp->dev) &&
9056 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9057 need_setup = 1;
9059 if (!netif_carrier_ok(tp->dev) &&
9060 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9061 MAC_STATUS_SIGNAL_DET))) {
9062 need_setup = 1;
9064 if (need_setup) {
9065 if (!tp->serdes_counter) {
9066 tw32_f(MAC_MODE,
9067 (tp->mac_mode &
9068 ~MAC_MODE_PORT_MODE_MASK));
9069 udelay(40);
9070 tw32_f(MAC_MODE, tp->mac_mode);
9071 udelay(40);
9073 tg3_setup_phy(tp, 0);
9075 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9076 tg3_flag(tp, 5780_CLASS)) {
9077 tg3_serdes_parallel_detect(tp);
9080 tp->timer_counter = tp->timer_multiplier;
9083 /* Heartbeat is only sent once every 2 seconds.
9085 * The heartbeat is to tell the ASF firmware that the host
9086 * driver is still alive. In the event that the OS crashes,
9087 * ASF needs to reset the hardware to free up the FIFO space
9088 * that may be filled with rx packets destined for the host.
9089 * If the FIFO is full, ASF will no longer function properly.
9091 * Unintended resets have been reported on real time kernels
9092 * where the timer doesn't run on time. Netpoll will also have
9093 * same problem.
9095 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9096 * to check the ring condition when the heartbeat is expiring
9097 * before doing the reset. This will prevent most unintended
9098 * resets.
9100 if (!--tp->asf_counter) {
9101 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9102 tg3_wait_for_event_ack(tp);
9104 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9105 FWCMD_NICDRV_ALIVE3);
9106 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9107 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9108 TG3_FW_UPDATE_TIMEOUT_SEC);
9110 tg3_generate_fw_event(tp);
9112 tp->asf_counter = tp->asf_multiplier;
9115 spin_unlock(&tp->lock);
9117 restart_timer:
9118 tp->timer.expires = jiffies + tp->timer_offset;
9119 add_timer(&tp->timer);
9122 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9124 irq_handler_t fn;
9125 unsigned long flags;
9126 char *name;
9127 struct tg3_napi *tnapi = &tp->napi[irq_num];
9129 if (tp->irq_cnt == 1)
9130 name = tp->dev->name;
9131 else {
9132 name = &tnapi->irq_lbl[0];
9133 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9134 name[IFNAMSIZ-1] = 0;
9137 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9138 fn = tg3_msi;
9139 if (tg3_flag(tp, 1SHOT_MSI))
9140 fn = tg3_msi_1shot;
9141 flags = 0;
9142 } else {
9143 fn = tg3_interrupt;
9144 if (tg3_flag(tp, TAGGED_STATUS))
9145 fn = tg3_interrupt_tagged;
9146 flags = IRQF_SHARED;
9149 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9152 static int tg3_test_interrupt(struct tg3 *tp)
9154 struct tg3_napi *tnapi = &tp->napi[0];
9155 struct net_device *dev = tp->dev;
9156 int err, i, intr_ok = 0;
9157 u32 val;
9159 if (!netif_running(dev))
9160 return -ENODEV;
9162 tg3_disable_ints(tp);
9164 free_irq(tnapi->irq_vec, tnapi);
9167 * Turn off MSI one shot mode. Otherwise this test has no
9168 * observable way to know whether the interrupt was delivered.
9170 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9171 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9172 tw32(MSGINT_MODE, val);
9175 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9176 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9177 if (err)
9178 return err;
9180 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9181 tg3_enable_ints(tp);
9183 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9184 tnapi->coal_now);
9186 for (i = 0; i < 5; i++) {
9187 u32 int_mbox, misc_host_ctrl;
9189 int_mbox = tr32_mailbox(tnapi->int_mbox);
9190 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9192 if ((int_mbox != 0) ||
9193 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9194 intr_ok = 1;
9195 break;
9198 msleep(10);
9201 tg3_disable_ints(tp);
9203 free_irq(tnapi->irq_vec, tnapi);
9205 err = tg3_request_irq(tp, 0);
9207 if (err)
9208 return err;
9210 if (intr_ok) {
9211 /* Reenable MSI one shot mode. */
9212 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9213 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9214 tw32(MSGINT_MODE, val);
9216 return 0;
9219 return -EIO;
9222 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9223 * successfully restored
9225 static int tg3_test_msi(struct tg3 *tp)
9227 int err;
9228 u16 pci_cmd;
9230 if (!tg3_flag(tp, USING_MSI))
9231 return 0;
9233 /* Turn off SERR reporting in case MSI terminates with Master
9234 * Abort.
9236 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9237 pci_write_config_word(tp->pdev, PCI_COMMAND,
9238 pci_cmd & ~PCI_COMMAND_SERR);
9240 err = tg3_test_interrupt(tp);
9242 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9244 if (!err)
9245 return 0;
9247 /* other failures */
9248 if (err != -EIO)
9249 return err;
9251 /* MSI test failed, go back to INTx mode */
9252 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9253 "to INTx mode. Please report this failure to the PCI "
9254 "maintainer and include system chipset information\n");
9256 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9258 pci_disable_msi(tp->pdev);
9260 tg3_flag_clear(tp, USING_MSI);
9261 tp->napi[0].irq_vec = tp->pdev->irq;
9263 err = tg3_request_irq(tp, 0);
9264 if (err)
9265 return err;
9267 /* Need to reset the chip because the MSI cycle may have terminated
9268 * with Master Abort.
9270 tg3_full_lock(tp, 1);
9272 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9273 err = tg3_init_hw(tp, 1);
9275 tg3_full_unlock(tp);
9277 if (err)
9278 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9280 return err;
9283 static int tg3_request_firmware(struct tg3 *tp)
9285 const __be32 *fw_data;
9287 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9288 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9289 tp->fw_needed);
9290 return -ENOENT;
9293 fw_data = (void *)tp->fw->data;
9295 /* Firmware blob starts with version numbers, followed by
9296 * start address and _full_ length including BSS sections
9297 * (which must be longer than the actual data, of course
9300 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9301 if (tp->fw_len < (tp->fw->size - 12)) {
9302 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9303 tp->fw_len, tp->fw_needed);
9304 release_firmware(tp->fw);
9305 tp->fw = NULL;
9306 return -EINVAL;
9309 /* We no longer need firmware; we have it. */
9310 tp->fw_needed = NULL;
9311 return 0;
9314 static bool tg3_enable_msix(struct tg3 *tp)
9316 int i, rc, cpus = num_online_cpus();
9317 struct msix_entry msix_ent[tp->irq_max];
9319 if (cpus == 1)
9320 /* Just fallback to the simpler MSI mode. */
9321 return false;
9324 * We want as many rx rings enabled as there are cpus.
9325 * The first MSIX vector only deals with link interrupts, etc,
9326 * so we add one to the number of vectors we are requesting.
9328 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9330 for (i = 0; i < tp->irq_max; i++) {
9331 msix_ent[i].entry = i;
9332 msix_ent[i].vector = 0;
9335 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9336 if (rc < 0) {
9337 return false;
9338 } else if (rc != 0) {
9339 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9340 return false;
9341 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9342 tp->irq_cnt, rc);
9343 tp->irq_cnt = rc;
9346 for (i = 0; i < tp->irq_max; i++)
9347 tp->napi[i].irq_vec = msix_ent[i].vector;
9349 netif_set_real_num_tx_queues(tp->dev, 1);
9350 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9351 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9352 pci_disable_msix(tp->pdev);
9353 return false;
9356 if (tp->irq_cnt > 1) {
9357 tg3_flag_set(tp, ENABLE_RSS);
9359 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9360 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9361 tg3_flag_set(tp, ENABLE_TSS);
9362 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9366 return true;
9369 static void tg3_ints_init(struct tg3 *tp)
9371 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9372 !tg3_flag(tp, TAGGED_STATUS)) {
9373 /* All MSI supporting chips should support tagged
9374 * status. Assert that this is the case.
9376 netdev_warn(tp->dev,
9377 "MSI without TAGGED_STATUS? Not using MSI\n");
9378 goto defcfg;
9381 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9382 tg3_flag_set(tp, USING_MSIX);
9383 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9384 tg3_flag_set(tp, USING_MSI);
9386 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9387 u32 msi_mode = tr32(MSGINT_MODE);
9388 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9389 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9390 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9392 defcfg:
9393 if (!tg3_flag(tp, USING_MSIX)) {
9394 tp->irq_cnt = 1;
9395 tp->napi[0].irq_vec = tp->pdev->irq;
9396 netif_set_real_num_tx_queues(tp->dev, 1);
9397 netif_set_real_num_rx_queues(tp->dev, 1);
9401 static void tg3_ints_fini(struct tg3 *tp)
9403 if (tg3_flag(tp, USING_MSIX))
9404 pci_disable_msix(tp->pdev);
9405 else if (tg3_flag(tp, USING_MSI))
9406 pci_disable_msi(tp->pdev);
9407 tg3_flag_clear(tp, USING_MSI);
9408 tg3_flag_clear(tp, USING_MSIX);
9409 tg3_flag_clear(tp, ENABLE_RSS);
9410 tg3_flag_clear(tp, ENABLE_TSS);
9413 static int tg3_open(struct net_device *dev)
9415 struct tg3 *tp = netdev_priv(dev);
9416 int i, err;
9418 if (tp->fw_needed) {
9419 err = tg3_request_firmware(tp);
9420 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9421 if (err)
9422 return err;
9423 } else if (err) {
9424 netdev_warn(tp->dev, "TSO capability disabled\n");
9425 tg3_flag_clear(tp, TSO_CAPABLE);
9426 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9427 netdev_notice(tp->dev, "TSO capability restored\n");
9428 tg3_flag_set(tp, TSO_CAPABLE);
9432 netif_carrier_off(tp->dev);
9434 err = tg3_power_up(tp);
9435 if (err)
9436 return err;
9438 tg3_full_lock(tp, 0);
9440 tg3_disable_ints(tp);
9441 tg3_flag_clear(tp, INIT_COMPLETE);
9443 tg3_full_unlock(tp);
9446 * Setup interrupts first so we know how
9447 * many NAPI resources to allocate
9449 tg3_ints_init(tp);
9451 /* The placement of this call is tied
9452 * to the setup and use of Host TX descriptors.
9454 err = tg3_alloc_consistent(tp);
9455 if (err)
9456 goto err_out1;
9458 tg3_napi_init(tp);
9460 tg3_napi_enable(tp);
9462 for (i = 0; i < tp->irq_cnt; i++) {
9463 struct tg3_napi *tnapi = &tp->napi[i];
9464 err = tg3_request_irq(tp, i);
9465 if (err) {
9466 for (i--; i >= 0; i--)
9467 free_irq(tnapi->irq_vec, tnapi);
9468 break;
9472 if (err)
9473 goto err_out2;
9475 tg3_full_lock(tp, 0);
9477 err = tg3_init_hw(tp, 1);
9478 if (err) {
9479 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9480 tg3_free_rings(tp);
9481 } else {
9482 if (tg3_flag(tp, TAGGED_STATUS) &&
9483 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9484 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9485 tp->timer_offset = HZ;
9486 else
9487 tp->timer_offset = HZ / 10;
9489 BUG_ON(tp->timer_offset > HZ);
9490 tp->timer_counter = tp->timer_multiplier =
9491 (HZ / tp->timer_offset);
9492 tp->asf_counter = tp->asf_multiplier =
9493 ((HZ / tp->timer_offset) * 2);
9495 init_timer(&tp->timer);
9496 tp->timer.expires = jiffies + tp->timer_offset;
9497 tp->timer.data = (unsigned long) tp;
9498 tp->timer.function = tg3_timer;
9501 tg3_full_unlock(tp);
9503 if (err)
9504 goto err_out3;
9506 if (tg3_flag(tp, USING_MSI)) {
9507 err = tg3_test_msi(tp);
9509 if (err) {
9510 tg3_full_lock(tp, 0);
9511 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9512 tg3_free_rings(tp);
9513 tg3_full_unlock(tp);
9515 goto err_out2;
9518 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9519 u32 val = tr32(PCIE_TRANSACTION_CFG);
9521 tw32(PCIE_TRANSACTION_CFG,
9522 val | PCIE_TRANS_CFG_1SHOT_MSI);
9526 tg3_phy_start(tp);
9528 tg3_full_lock(tp, 0);
9530 add_timer(&tp->timer);
9531 tg3_flag_set(tp, INIT_COMPLETE);
9532 tg3_enable_ints(tp);
9534 tg3_full_unlock(tp);
9536 netif_tx_start_all_queues(dev);
9539 * Reset loopback feature if it was turned on while the device was down
9540 * make sure that it's installed properly now.
9542 if (dev->features & NETIF_F_LOOPBACK)
9543 tg3_set_loopback(dev, dev->features);
9545 return 0;
9547 err_out3:
9548 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9549 struct tg3_napi *tnapi = &tp->napi[i];
9550 free_irq(tnapi->irq_vec, tnapi);
9553 err_out2:
9554 tg3_napi_disable(tp);
9555 tg3_napi_fini(tp);
9556 tg3_free_consistent(tp);
9558 err_out1:
9559 tg3_ints_fini(tp);
9560 tg3_frob_aux_power(tp, false);
9561 pci_set_power_state(tp->pdev, PCI_D3hot);
9562 return err;
9565 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9566 struct rtnl_link_stats64 *);
9567 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9569 static int tg3_close(struct net_device *dev)
9571 int i;
9572 struct tg3 *tp = netdev_priv(dev);
9574 tg3_napi_disable(tp);
9575 cancel_work_sync(&tp->reset_task);
9577 netif_tx_stop_all_queues(dev);
9579 del_timer_sync(&tp->timer);
9581 tg3_phy_stop(tp);
9583 tg3_full_lock(tp, 1);
9585 tg3_disable_ints(tp);
9587 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9588 tg3_free_rings(tp);
9589 tg3_flag_clear(tp, INIT_COMPLETE);
9591 tg3_full_unlock(tp);
9593 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9594 struct tg3_napi *tnapi = &tp->napi[i];
9595 free_irq(tnapi->irq_vec, tnapi);
9598 tg3_ints_fini(tp);
9600 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9602 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9603 sizeof(tp->estats_prev));
9605 tg3_napi_fini(tp);
9607 tg3_free_consistent(tp);
9609 tg3_power_down(tp);
9611 netif_carrier_off(tp->dev);
9613 return 0;
9616 static inline u64 get_stat64(tg3_stat64_t *val)
9618 return ((u64)val->high << 32) | ((u64)val->low);
9621 static u64 calc_crc_errors(struct tg3 *tp)
9623 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9625 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9626 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9628 u32 val;
9630 spin_lock_bh(&tp->lock);
9631 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9632 tg3_writephy(tp, MII_TG3_TEST1,
9633 val | MII_TG3_TEST1_CRC_EN);
9634 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9635 } else
9636 val = 0;
9637 spin_unlock_bh(&tp->lock);
9639 tp->phy_crc_errors += val;
9641 return tp->phy_crc_errors;
9644 return get_stat64(&hw_stats->rx_fcs_errors);
9647 #define ESTAT_ADD(member) \
9648 estats->member = old_estats->member + \
9649 get_stat64(&hw_stats->member)
9651 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9653 struct tg3_ethtool_stats *estats = &tp->estats;
9654 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9655 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9657 if (!hw_stats)
9658 return old_estats;
9660 ESTAT_ADD(rx_octets);
9661 ESTAT_ADD(rx_fragments);
9662 ESTAT_ADD(rx_ucast_packets);
9663 ESTAT_ADD(rx_mcast_packets);
9664 ESTAT_ADD(rx_bcast_packets);
9665 ESTAT_ADD(rx_fcs_errors);
9666 ESTAT_ADD(rx_align_errors);
9667 ESTAT_ADD(rx_xon_pause_rcvd);
9668 ESTAT_ADD(rx_xoff_pause_rcvd);
9669 ESTAT_ADD(rx_mac_ctrl_rcvd);
9670 ESTAT_ADD(rx_xoff_entered);
9671 ESTAT_ADD(rx_frame_too_long_errors);
9672 ESTAT_ADD(rx_jabbers);
9673 ESTAT_ADD(rx_undersize_packets);
9674 ESTAT_ADD(rx_in_length_errors);
9675 ESTAT_ADD(rx_out_length_errors);
9676 ESTAT_ADD(rx_64_or_less_octet_packets);
9677 ESTAT_ADD(rx_65_to_127_octet_packets);
9678 ESTAT_ADD(rx_128_to_255_octet_packets);
9679 ESTAT_ADD(rx_256_to_511_octet_packets);
9680 ESTAT_ADD(rx_512_to_1023_octet_packets);
9681 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9682 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9683 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9684 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9685 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9687 ESTAT_ADD(tx_octets);
9688 ESTAT_ADD(tx_collisions);
9689 ESTAT_ADD(tx_xon_sent);
9690 ESTAT_ADD(tx_xoff_sent);
9691 ESTAT_ADD(tx_flow_control);
9692 ESTAT_ADD(tx_mac_errors);
9693 ESTAT_ADD(tx_single_collisions);
9694 ESTAT_ADD(tx_mult_collisions);
9695 ESTAT_ADD(tx_deferred);
9696 ESTAT_ADD(tx_excessive_collisions);
9697 ESTAT_ADD(tx_late_collisions);
9698 ESTAT_ADD(tx_collide_2times);
9699 ESTAT_ADD(tx_collide_3times);
9700 ESTAT_ADD(tx_collide_4times);
9701 ESTAT_ADD(tx_collide_5times);
9702 ESTAT_ADD(tx_collide_6times);
9703 ESTAT_ADD(tx_collide_7times);
9704 ESTAT_ADD(tx_collide_8times);
9705 ESTAT_ADD(tx_collide_9times);
9706 ESTAT_ADD(tx_collide_10times);
9707 ESTAT_ADD(tx_collide_11times);
9708 ESTAT_ADD(tx_collide_12times);
9709 ESTAT_ADD(tx_collide_13times);
9710 ESTAT_ADD(tx_collide_14times);
9711 ESTAT_ADD(tx_collide_15times);
9712 ESTAT_ADD(tx_ucast_packets);
9713 ESTAT_ADD(tx_mcast_packets);
9714 ESTAT_ADD(tx_bcast_packets);
9715 ESTAT_ADD(tx_carrier_sense_errors);
9716 ESTAT_ADD(tx_discards);
9717 ESTAT_ADD(tx_errors);
9719 ESTAT_ADD(dma_writeq_full);
9720 ESTAT_ADD(dma_write_prioq_full);
9721 ESTAT_ADD(rxbds_empty);
9722 ESTAT_ADD(rx_discards);
9723 ESTAT_ADD(rx_errors);
9724 ESTAT_ADD(rx_threshold_hit);
9726 ESTAT_ADD(dma_readq_full);
9727 ESTAT_ADD(dma_read_prioq_full);
9728 ESTAT_ADD(tx_comp_queue_full);
9730 ESTAT_ADD(ring_set_send_prod_index);
9731 ESTAT_ADD(ring_status_update);
9732 ESTAT_ADD(nic_irqs);
9733 ESTAT_ADD(nic_avoided_irqs);
9734 ESTAT_ADD(nic_tx_threshold_hit);
9736 ESTAT_ADD(mbuf_lwm_thresh_hit);
9738 return estats;
9741 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9742 struct rtnl_link_stats64 *stats)
9744 struct tg3 *tp = netdev_priv(dev);
9745 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9746 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9748 if (!hw_stats)
9749 return old_stats;
9751 stats->rx_packets = old_stats->rx_packets +
9752 get_stat64(&hw_stats->rx_ucast_packets) +
9753 get_stat64(&hw_stats->rx_mcast_packets) +
9754 get_stat64(&hw_stats->rx_bcast_packets);
9756 stats->tx_packets = old_stats->tx_packets +
9757 get_stat64(&hw_stats->tx_ucast_packets) +
9758 get_stat64(&hw_stats->tx_mcast_packets) +
9759 get_stat64(&hw_stats->tx_bcast_packets);
9761 stats->rx_bytes = old_stats->rx_bytes +
9762 get_stat64(&hw_stats->rx_octets);
9763 stats->tx_bytes = old_stats->tx_bytes +
9764 get_stat64(&hw_stats->tx_octets);
9766 stats->rx_errors = old_stats->rx_errors +
9767 get_stat64(&hw_stats->rx_errors);
9768 stats->tx_errors = old_stats->tx_errors +
9769 get_stat64(&hw_stats->tx_errors) +
9770 get_stat64(&hw_stats->tx_mac_errors) +
9771 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9772 get_stat64(&hw_stats->tx_discards);
9774 stats->multicast = old_stats->multicast +
9775 get_stat64(&hw_stats->rx_mcast_packets);
9776 stats->collisions = old_stats->collisions +
9777 get_stat64(&hw_stats->tx_collisions);
9779 stats->rx_length_errors = old_stats->rx_length_errors +
9780 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9781 get_stat64(&hw_stats->rx_undersize_packets);
9783 stats->rx_over_errors = old_stats->rx_over_errors +
9784 get_stat64(&hw_stats->rxbds_empty);
9785 stats->rx_frame_errors = old_stats->rx_frame_errors +
9786 get_stat64(&hw_stats->rx_align_errors);
9787 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9788 get_stat64(&hw_stats->tx_discards);
9789 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9790 get_stat64(&hw_stats->tx_carrier_sense_errors);
9792 stats->rx_crc_errors = old_stats->rx_crc_errors +
9793 calc_crc_errors(tp);
9795 stats->rx_missed_errors = old_stats->rx_missed_errors +
9796 get_stat64(&hw_stats->rx_discards);
9798 stats->rx_dropped = tp->rx_dropped;
9800 return stats;
9803 static inline u32 calc_crc(unsigned char *buf, int len)
9805 u32 reg;
9806 u32 tmp;
9807 int j, k;
9809 reg = 0xffffffff;
9811 for (j = 0; j < len; j++) {
9812 reg ^= buf[j];
9814 for (k = 0; k < 8; k++) {
9815 tmp = reg & 0x01;
9817 reg >>= 1;
9819 if (tmp)
9820 reg ^= 0xedb88320;
9824 return ~reg;
9827 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9829 /* accept or reject all multicast frames */
9830 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9831 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9832 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9833 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9836 static void __tg3_set_rx_mode(struct net_device *dev)
9838 struct tg3 *tp = netdev_priv(dev);
9839 u32 rx_mode;
9841 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9842 RX_MODE_KEEP_VLAN_TAG);
9844 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9845 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9846 * flag clear.
9848 if (!tg3_flag(tp, ENABLE_ASF))
9849 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9850 #endif
9852 if (dev->flags & IFF_PROMISC) {
9853 /* Promiscuous mode. */
9854 rx_mode |= RX_MODE_PROMISC;
9855 } else if (dev->flags & IFF_ALLMULTI) {
9856 /* Accept all multicast. */
9857 tg3_set_multi(tp, 1);
9858 } else if (netdev_mc_empty(dev)) {
9859 /* Reject all multicast. */
9860 tg3_set_multi(tp, 0);
9861 } else {
9862 /* Accept one or more multicast(s). */
9863 struct netdev_hw_addr *ha;
9864 u32 mc_filter[4] = { 0, };
9865 u32 regidx;
9866 u32 bit;
9867 u32 crc;
9869 netdev_for_each_mc_addr(ha, dev) {
9870 crc = calc_crc(ha->addr, ETH_ALEN);
9871 bit = ~crc & 0x7f;
9872 regidx = (bit & 0x60) >> 5;
9873 bit &= 0x1f;
9874 mc_filter[regidx] |= (1 << bit);
9877 tw32(MAC_HASH_REG_0, mc_filter[0]);
9878 tw32(MAC_HASH_REG_1, mc_filter[1]);
9879 tw32(MAC_HASH_REG_2, mc_filter[2]);
9880 tw32(MAC_HASH_REG_3, mc_filter[3]);
9883 if (rx_mode != tp->rx_mode) {
9884 tp->rx_mode = rx_mode;
9885 tw32_f(MAC_RX_MODE, rx_mode);
9886 udelay(10);
9890 static void tg3_set_rx_mode(struct net_device *dev)
9892 struct tg3 *tp = netdev_priv(dev);
9894 if (!netif_running(dev))
9895 return;
9897 tg3_full_lock(tp, 0);
9898 __tg3_set_rx_mode(dev);
9899 tg3_full_unlock(tp);
9902 static int tg3_get_regs_len(struct net_device *dev)
9904 return TG3_REG_BLK_SIZE;
9907 static void tg3_get_regs(struct net_device *dev,
9908 struct ethtool_regs *regs, void *_p)
9910 struct tg3 *tp = netdev_priv(dev);
9912 regs->version = 0;
9914 memset(_p, 0, TG3_REG_BLK_SIZE);
9916 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9917 return;
9919 tg3_full_lock(tp, 0);
9921 tg3_dump_legacy_regs(tp, (u32 *)_p);
9923 tg3_full_unlock(tp);
9926 static int tg3_get_eeprom_len(struct net_device *dev)
9928 struct tg3 *tp = netdev_priv(dev);
9930 return tp->nvram_size;
9933 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9935 struct tg3 *tp = netdev_priv(dev);
9936 int ret;
9937 u8 *pd;
9938 u32 i, offset, len, b_offset, b_count;
9939 __be32 val;
9941 if (tg3_flag(tp, NO_NVRAM))
9942 return -EINVAL;
9944 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9945 return -EAGAIN;
9947 offset = eeprom->offset;
9948 len = eeprom->len;
9949 eeprom->len = 0;
9951 eeprom->magic = TG3_EEPROM_MAGIC;
9953 if (offset & 3) {
9954 /* adjustments to start on required 4 byte boundary */
9955 b_offset = offset & 3;
9956 b_count = 4 - b_offset;
9957 if (b_count > len) {
9958 /* i.e. offset=1 len=2 */
9959 b_count = len;
9961 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9962 if (ret)
9963 return ret;
9964 memcpy(data, ((char *)&val) + b_offset, b_count);
9965 len -= b_count;
9966 offset += b_count;
9967 eeprom->len += b_count;
9970 /* read bytes up to the last 4 byte boundary */
9971 pd = &data[eeprom->len];
9972 for (i = 0; i < (len - (len & 3)); i += 4) {
9973 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9974 if (ret) {
9975 eeprom->len += i;
9976 return ret;
9978 memcpy(pd + i, &val, 4);
9980 eeprom->len += i;
9982 if (len & 3) {
9983 /* read last bytes not ending on 4 byte boundary */
9984 pd = &data[eeprom->len];
9985 b_count = len & 3;
9986 b_offset = offset + len - b_count;
9987 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9988 if (ret)
9989 return ret;
9990 memcpy(pd, &val, b_count);
9991 eeprom->len += b_count;
9993 return 0;
9996 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9998 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10000 struct tg3 *tp = netdev_priv(dev);
10001 int ret;
10002 u32 offset, len, b_offset, odd_len;
10003 u8 *buf;
10004 __be32 start, end;
10006 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10007 return -EAGAIN;
10009 if (tg3_flag(tp, NO_NVRAM) ||
10010 eeprom->magic != TG3_EEPROM_MAGIC)
10011 return -EINVAL;
10013 offset = eeprom->offset;
10014 len = eeprom->len;
10016 if ((b_offset = (offset & 3))) {
10017 /* adjustments to start on required 4 byte boundary */
10018 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10019 if (ret)
10020 return ret;
10021 len += b_offset;
10022 offset &= ~3;
10023 if (len < 4)
10024 len = 4;
10027 odd_len = 0;
10028 if (len & 3) {
10029 /* adjustments to end on required 4 byte boundary */
10030 odd_len = 1;
10031 len = (len + 3) & ~3;
10032 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10033 if (ret)
10034 return ret;
10037 buf = data;
10038 if (b_offset || odd_len) {
10039 buf = kmalloc(len, GFP_KERNEL);
10040 if (!buf)
10041 return -ENOMEM;
10042 if (b_offset)
10043 memcpy(buf, &start, 4);
10044 if (odd_len)
10045 memcpy(buf+len-4, &end, 4);
10046 memcpy(buf + b_offset, data, eeprom->len);
10049 ret = tg3_nvram_write_block(tp, offset, len, buf);
10051 if (buf != data)
10052 kfree(buf);
10054 return ret;
10057 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10059 struct tg3 *tp = netdev_priv(dev);
10061 if (tg3_flag(tp, USE_PHYLIB)) {
10062 struct phy_device *phydev;
10063 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10064 return -EAGAIN;
10065 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10066 return phy_ethtool_gset(phydev, cmd);
10069 cmd->supported = (SUPPORTED_Autoneg);
10071 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10072 cmd->supported |= (SUPPORTED_1000baseT_Half |
10073 SUPPORTED_1000baseT_Full);
10075 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10076 cmd->supported |= (SUPPORTED_100baseT_Half |
10077 SUPPORTED_100baseT_Full |
10078 SUPPORTED_10baseT_Half |
10079 SUPPORTED_10baseT_Full |
10080 SUPPORTED_TP);
10081 cmd->port = PORT_TP;
10082 } else {
10083 cmd->supported |= SUPPORTED_FIBRE;
10084 cmd->port = PORT_FIBRE;
10087 cmd->advertising = tp->link_config.advertising;
10088 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10089 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10090 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10091 cmd->advertising |= ADVERTISED_Pause;
10092 } else {
10093 cmd->advertising |= ADVERTISED_Pause |
10094 ADVERTISED_Asym_Pause;
10096 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10097 cmd->advertising |= ADVERTISED_Asym_Pause;
10100 if (netif_running(dev)) {
10101 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10102 cmd->duplex = tp->link_config.active_duplex;
10103 } else {
10104 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10105 cmd->duplex = DUPLEX_INVALID;
10107 cmd->phy_address = tp->phy_addr;
10108 cmd->transceiver = XCVR_INTERNAL;
10109 cmd->autoneg = tp->link_config.autoneg;
10110 cmd->maxtxpkt = 0;
10111 cmd->maxrxpkt = 0;
10112 return 0;
10115 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10117 struct tg3 *tp = netdev_priv(dev);
10118 u32 speed = ethtool_cmd_speed(cmd);
10120 if (tg3_flag(tp, USE_PHYLIB)) {
10121 struct phy_device *phydev;
10122 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10123 return -EAGAIN;
10124 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10125 return phy_ethtool_sset(phydev, cmd);
10128 if (cmd->autoneg != AUTONEG_ENABLE &&
10129 cmd->autoneg != AUTONEG_DISABLE)
10130 return -EINVAL;
10132 if (cmd->autoneg == AUTONEG_DISABLE &&
10133 cmd->duplex != DUPLEX_FULL &&
10134 cmd->duplex != DUPLEX_HALF)
10135 return -EINVAL;
10137 if (cmd->autoneg == AUTONEG_ENABLE) {
10138 u32 mask = ADVERTISED_Autoneg |
10139 ADVERTISED_Pause |
10140 ADVERTISED_Asym_Pause;
10142 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10143 mask |= ADVERTISED_1000baseT_Half |
10144 ADVERTISED_1000baseT_Full;
10146 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10147 mask |= ADVERTISED_100baseT_Half |
10148 ADVERTISED_100baseT_Full |
10149 ADVERTISED_10baseT_Half |
10150 ADVERTISED_10baseT_Full |
10151 ADVERTISED_TP;
10152 else
10153 mask |= ADVERTISED_FIBRE;
10155 if (cmd->advertising & ~mask)
10156 return -EINVAL;
10158 mask &= (ADVERTISED_1000baseT_Half |
10159 ADVERTISED_1000baseT_Full |
10160 ADVERTISED_100baseT_Half |
10161 ADVERTISED_100baseT_Full |
10162 ADVERTISED_10baseT_Half |
10163 ADVERTISED_10baseT_Full);
10165 cmd->advertising &= mask;
10166 } else {
10167 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10168 if (speed != SPEED_1000)
10169 return -EINVAL;
10171 if (cmd->duplex != DUPLEX_FULL)
10172 return -EINVAL;
10173 } else {
10174 if (speed != SPEED_100 &&
10175 speed != SPEED_10)
10176 return -EINVAL;
10180 tg3_full_lock(tp, 0);
10182 tp->link_config.autoneg = cmd->autoneg;
10183 if (cmd->autoneg == AUTONEG_ENABLE) {
10184 tp->link_config.advertising = (cmd->advertising |
10185 ADVERTISED_Autoneg);
10186 tp->link_config.speed = SPEED_INVALID;
10187 tp->link_config.duplex = DUPLEX_INVALID;
10188 } else {
10189 tp->link_config.advertising = 0;
10190 tp->link_config.speed = speed;
10191 tp->link_config.duplex = cmd->duplex;
10194 tp->link_config.orig_speed = tp->link_config.speed;
10195 tp->link_config.orig_duplex = tp->link_config.duplex;
10196 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10198 if (netif_running(dev))
10199 tg3_setup_phy(tp, 1);
10201 tg3_full_unlock(tp);
10203 return 0;
10206 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10208 struct tg3 *tp = netdev_priv(dev);
10210 strcpy(info->driver, DRV_MODULE_NAME);
10211 strcpy(info->version, DRV_MODULE_VERSION);
10212 strcpy(info->fw_version, tp->fw_ver);
10213 strcpy(info->bus_info, pci_name(tp->pdev));
10216 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10218 struct tg3 *tp = netdev_priv(dev);
10220 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10221 wol->supported = WAKE_MAGIC;
10222 else
10223 wol->supported = 0;
10224 wol->wolopts = 0;
10225 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10226 wol->wolopts = WAKE_MAGIC;
10227 memset(&wol->sopass, 0, sizeof(wol->sopass));
10230 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10232 struct tg3 *tp = netdev_priv(dev);
10233 struct device *dp = &tp->pdev->dev;
10235 if (wol->wolopts & ~WAKE_MAGIC)
10236 return -EINVAL;
10237 if ((wol->wolopts & WAKE_MAGIC) &&
10238 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10239 return -EINVAL;
10241 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10243 spin_lock_bh(&tp->lock);
10244 if (device_may_wakeup(dp))
10245 tg3_flag_set(tp, WOL_ENABLE);
10246 else
10247 tg3_flag_clear(tp, WOL_ENABLE);
10248 spin_unlock_bh(&tp->lock);
10250 return 0;
10253 static u32 tg3_get_msglevel(struct net_device *dev)
10255 struct tg3 *tp = netdev_priv(dev);
10256 return tp->msg_enable;
10259 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10261 struct tg3 *tp = netdev_priv(dev);
10262 tp->msg_enable = value;
10265 static int tg3_nway_reset(struct net_device *dev)
10267 struct tg3 *tp = netdev_priv(dev);
10268 int r;
10270 if (!netif_running(dev))
10271 return -EAGAIN;
10273 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10274 return -EINVAL;
10276 if (tg3_flag(tp, USE_PHYLIB)) {
10277 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10278 return -EAGAIN;
10279 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10280 } else {
10281 u32 bmcr;
10283 spin_lock_bh(&tp->lock);
10284 r = -EINVAL;
10285 tg3_readphy(tp, MII_BMCR, &bmcr);
10286 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10287 ((bmcr & BMCR_ANENABLE) ||
10288 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10289 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10290 BMCR_ANENABLE);
10291 r = 0;
10293 spin_unlock_bh(&tp->lock);
10296 return r;
10299 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10301 struct tg3 *tp = netdev_priv(dev);
10303 ering->rx_max_pending = tp->rx_std_ring_mask;
10304 ering->rx_mini_max_pending = 0;
10305 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10306 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10307 else
10308 ering->rx_jumbo_max_pending = 0;
10310 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10312 ering->rx_pending = tp->rx_pending;
10313 ering->rx_mini_pending = 0;
10314 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10315 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10316 else
10317 ering->rx_jumbo_pending = 0;
10319 ering->tx_pending = tp->napi[0].tx_pending;
10322 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10324 struct tg3 *tp = netdev_priv(dev);
10325 int i, irq_sync = 0, err = 0;
10327 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10328 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10329 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10330 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10331 (tg3_flag(tp, TSO_BUG) &&
10332 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10333 return -EINVAL;
10335 if (netif_running(dev)) {
10336 tg3_phy_stop(tp);
10337 tg3_netif_stop(tp);
10338 irq_sync = 1;
10341 tg3_full_lock(tp, irq_sync);
10343 tp->rx_pending = ering->rx_pending;
10345 if (tg3_flag(tp, MAX_RXPEND_64) &&
10346 tp->rx_pending > 63)
10347 tp->rx_pending = 63;
10348 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10350 for (i = 0; i < tp->irq_max; i++)
10351 tp->napi[i].tx_pending = ering->tx_pending;
10353 if (netif_running(dev)) {
10354 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10355 err = tg3_restart_hw(tp, 1);
10356 if (!err)
10357 tg3_netif_start(tp);
10360 tg3_full_unlock(tp);
10362 if (irq_sync && !err)
10363 tg3_phy_start(tp);
10365 return err;
10368 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10370 struct tg3 *tp = netdev_priv(dev);
10372 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10374 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10375 epause->rx_pause = 1;
10376 else
10377 epause->rx_pause = 0;
10379 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10380 epause->tx_pause = 1;
10381 else
10382 epause->tx_pause = 0;
10385 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10387 struct tg3 *tp = netdev_priv(dev);
10388 int err = 0;
10390 if (tg3_flag(tp, USE_PHYLIB)) {
10391 u32 newadv;
10392 struct phy_device *phydev;
10394 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10396 if (!(phydev->supported & SUPPORTED_Pause) ||
10397 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10398 (epause->rx_pause != epause->tx_pause)))
10399 return -EINVAL;
10401 tp->link_config.flowctrl = 0;
10402 if (epause->rx_pause) {
10403 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10405 if (epause->tx_pause) {
10406 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10407 newadv = ADVERTISED_Pause;
10408 } else
10409 newadv = ADVERTISED_Pause |
10410 ADVERTISED_Asym_Pause;
10411 } else if (epause->tx_pause) {
10412 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10413 newadv = ADVERTISED_Asym_Pause;
10414 } else
10415 newadv = 0;
10417 if (epause->autoneg)
10418 tg3_flag_set(tp, PAUSE_AUTONEG);
10419 else
10420 tg3_flag_clear(tp, PAUSE_AUTONEG);
10422 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10423 u32 oldadv = phydev->advertising &
10424 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10425 if (oldadv != newadv) {
10426 phydev->advertising &=
10427 ~(ADVERTISED_Pause |
10428 ADVERTISED_Asym_Pause);
10429 phydev->advertising |= newadv;
10430 if (phydev->autoneg) {
10432 * Always renegotiate the link to
10433 * inform our link partner of our
10434 * flow control settings, even if the
10435 * flow control is forced. Let
10436 * tg3_adjust_link() do the final
10437 * flow control setup.
10439 return phy_start_aneg(phydev);
10443 if (!epause->autoneg)
10444 tg3_setup_flow_control(tp, 0, 0);
10445 } else {
10446 tp->link_config.orig_advertising &=
10447 ~(ADVERTISED_Pause |
10448 ADVERTISED_Asym_Pause);
10449 tp->link_config.orig_advertising |= newadv;
10451 } else {
10452 int irq_sync = 0;
10454 if (netif_running(dev)) {
10455 tg3_netif_stop(tp);
10456 irq_sync = 1;
10459 tg3_full_lock(tp, irq_sync);
10461 if (epause->autoneg)
10462 tg3_flag_set(tp, PAUSE_AUTONEG);
10463 else
10464 tg3_flag_clear(tp, PAUSE_AUTONEG);
10465 if (epause->rx_pause)
10466 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10467 else
10468 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10469 if (epause->tx_pause)
10470 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10471 else
10472 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10474 if (netif_running(dev)) {
10475 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10476 err = tg3_restart_hw(tp, 1);
10477 if (!err)
10478 tg3_netif_start(tp);
10481 tg3_full_unlock(tp);
10484 return err;
10487 static int tg3_get_sset_count(struct net_device *dev, int sset)
10489 switch (sset) {
10490 case ETH_SS_TEST:
10491 return TG3_NUM_TEST;
10492 case ETH_SS_STATS:
10493 return TG3_NUM_STATS;
10494 default:
10495 return -EOPNOTSUPP;
10499 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10501 switch (stringset) {
10502 case ETH_SS_STATS:
10503 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10504 break;
10505 case ETH_SS_TEST:
10506 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10507 break;
10508 default:
10509 WARN_ON(1); /* we need a WARN() */
10510 break;
10514 static int tg3_set_phys_id(struct net_device *dev,
10515 enum ethtool_phys_id_state state)
10517 struct tg3 *tp = netdev_priv(dev);
10519 if (!netif_running(tp->dev))
10520 return -EAGAIN;
10522 switch (state) {
10523 case ETHTOOL_ID_ACTIVE:
10524 return 1; /* cycle on/off once per second */
10526 case ETHTOOL_ID_ON:
10527 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10528 LED_CTRL_1000MBPS_ON |
10529 LED_CTRL_100MBPS_ON |
10530 LED_CTRL_10MBPS_ON |
10531 LED_CTRL_TRAFFIC_OVERRIDE |
10532 LED_CTRL_TRAFFIC_BLINK |
10533 LED_CTRL_TRAFFIC_LED);
10534 break;
10536 case ETHTOOL_ID_OFF:
10537 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10538 LED_CTRL_TRAFFIC_OVERRIDE);
10539 break;
10541 case ETHTOOL_ID_INACTIVE:
10542 tw32(MAC_LED_CTRL, tp->led_ctrl);
10543 break;
10546 return 0;
10549 static void tg3_get_ethtool_stats(struct net_device *dev,
10550 struct ethtool_stats *estats, u64 *tmp_stats)
10552 struct tg3 *tp = netdev_priv(dev);
10553 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10556 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10558 int i;
10559 __be32 *buf;
10560 u32 offset = 0, len = 0;
10561 u32 magic, val;
10563 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10564 return NULL;
10566 if (magic == TG3_EEPROM_MAGIC) {
10567 for (offset = TG3_NVM_DIR_START;
10568 offset < TG3_NVM_DIR_END;
10569 offset += TG3_NVM_DIRENT_SIZE) {
10570 if (tg3_nvram_read(tp, offset, &val))
10571 return NULL;
10573 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10574 TG3_NVM_DIRTYPE_EXTVPD)
10575 break;
10578 if (offset != TG3_NVM_DIR_END) {
10579 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10580 if (tg3_nvram_read(tp, offset + 4, &offset))
10581 return NULL;
10583 offset = tg3_nvram_logical_addr(tp, offset);
10587 if (!offset || !len) {
10588 offset = TG3_NVM_VPD_OFF;
10589 len = TG3_NVM_VPD_LEN;
10592 buf = kmalloc(len, GFP_KERNEL);
10593 if (buf == NULL)
10594 return NULL;
10596 if (magic == TG3_EEPROM_MAGIC) {
10597 for (i = 0; i < len; i += 4) {
10598 /* The data is in little-endian format in NVRAM.
10599 * Use the big-endian read routines to preserve
10600 * the byte order as it exists in NVRAM.
10602 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10603 goto error;
10605 } else {
10606 u8 *ptr;
10607 ssize_t cnt;
10608 unsigned int pos = 0;
10610 ptr = (u8 *)&buf[0];
10611 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10612 cnt = pci_read_vpd(tp->pdev, pos,
10613 len - pos, ptr);
10614 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10615 cnt = 0;
10616 else if (cnt < 0)
10617 goto error;
10619 if (pos != len)
10620 goto error;
10623 return buf;
10625 error:
10626 kfree(buf);
10627 return NULL;
10630 #define NVRAM_TEST_SIZE 0x100
10631 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10632 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10633 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10634 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
10635 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
10636 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x4c
10637 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10638 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10640 static int tg3_test_nvram(struct tg3 *tp)
10642 u32 csum, magic;
10643 __be32 *buf;
10644 int i, j, k, err = 0, size;
10646 if (tg3_flag(tp, NO_NVRAM))
10647 return 0;
10649 if (tg3_nvram_read(tp, 0, &magic) != 0)
10650 return -EIO;
10652 if (magic == TG3_EEPROM_MAGIC)
10653 size = NVRAM_TEST_SIZE;
10654 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10655 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10656 TG3_EEPROM_SB_FORMAT_1) {
10657 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10658 case TG3_EEPROM_SB_REVISION_0:
10659 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10660 break;
10661 case TG3_EEPROM_SB_REVISION_2:
10662 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10663 break;
10664 case TG3_EEPROM_SB_REVISION_3:
10665 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10666 break;
10667 case TG3_EEPROM_SB_REVISION_4:
10668 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10669 break;
10670 case TG3_EEPROM_SB_REVISION_5:
10671 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10672 break;
10673 case TG3_EEPROM_SB_REVISION_6:
10674 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10675 break;
10676 default:
10677 return -EIO;
10679 } else
10680 return 0;
10681 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10682 size = NVRAM_SELFBOOT_HW_SIZE;
10683 else
10684 return -EIO;
10686 buf = kmalloc(size, GFP_KERNEL);
10687 if (buf == NULL)
10688 return -ENOMEM;
10690 err = -EIO;
10691 for (i = 0, j = 0; i < size; i += 4, j++) {
10692 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10693 if (err)
10694 break;
10696 if (i < size)
10697 goto out;
10699 /* Selfboot format */
10700 magic = be32_to_cpu(buf[0]);
10701 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10702 TG3_EEPROM_MAGIC_FW) {
10703 u8 *buf8 = (u8 *) buf, csum8 = 0;
10705 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10706 TG3_EEPROM_SB_REVISION_2) {
10707 /* For rev 2, the csum doesn't include the MBA. */
10708 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10709 csum8 += buf8[i];
10710 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10711 csum8 += buf8[i];
10712 } else {
10713 for (i = 0; i < size; i++)
10714 csum8 += buf8[i];
10717 if (csum8 == 0) {
10718 err = 0;
10719 goto out;
10722 err = -EIO;
10723 goto out;
10726 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10727 TG3_EEPROM_MAGIC_HW) {
10728 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10729 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10730 u8 *buf8 = (u8 *) buf;
10732 /* Separate the parity bits and the data bytes. */
10733 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10734 if ((i == 0) || (i == 8)) {
10735 int l;
10736 u8 msk;
10738 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10739 parity[k++] = buf8[i] & msk;
10740 i++;
10741 } else if (i == 16) {
10742 int l;
10743 u8 msk;
10745 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10746 parity[k++] = buf8[i] & msk;
10747 i++;
10749 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10750 parity[k++] = buf8[i] & msk;
10751 i++;
10753 data[j++] = buf8[i];
10756 err = -EIO;
10757 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10758 u8 hw8 = hweight8(data[i]);
10760 if ((hw8 & 0x1) && parity[i])
10761 goto out;
10762 else if (!(hw8 & 0x1) && !parity[i])
10763 goto out;
10765 err = 0;
10766 goto out;
10769 err = -EIO;
10771 /* Bootstrap checksum at offset 0x10 */
10772 csum = calc_crc((unsigned char *) buf, 0x10);
10773 if (csum != le32_to_cpu(buf[0x10/4]))
10774 goto out;
10776 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10777 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10778 if (csum != le32_to_cpu(buf[0xfc/4]))
10779 goto out;
10781 kfree(buf);
10783 buf = tg3_vpd_readblock(tp);
10784 if (!buf)
10785 return -ENOMEM;
10787 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10788 PCI_VPD_LRDT_RO_DATA);
10789 if (i > 0) {
10790 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10791 if (j < 0)
10792 goto out;
10794 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10795 goto out;
10797 i += PCI_VPD_LRDT_TAG_SIZE;
10798 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10799 PCI_VPD_RO_KEYWORD_CHKSUM);
10800 if (j > 0) {
10801 u8 csum8 = 0;
10803 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10805 for (i = 0; i <= j; i++)
10806 csum8 += ((u8 *)buf)[i];
10808 if (csum8)
10809 goto out;
10813 err = 0;
10815 out:
10816 kfree(buf);
10817 return err;
10820 #define TG3_SERDES_TIMEOUT_SEC 2
10821 #define TG3_COPPER_TIMEOUT_SEC 6
10823 static int tg3_test_link(struct tg3 *tp)
10825 int i, max;
10827 if (!netif_running(tp->dev))
10828 return -ENODEV;
10830 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10831 max = TG3_SERDES_TIMEOUT_SEC;
10832 else
10833 max = TG3_COPPER_TIMEOUT_SEC;
10835 for (i = 0; i < max; i++) {
10836 if (netif_carrier_ok(tp->dev))
10837 return 0;
10839 if (msleep_interruptible(1000))
10840 break;
10843 return -EIO;
10846 /* Only test the commonly used registers */
10847 static int tg3_test_registers(struct tg3 *tp)
10849 int i, is_5705, is_5750;
10850 u32 offset, read_mask, write_mask, val, save_val, read_val;
10851 static struct {
10852 u16 offset;
10853 u16 flags;
10854 #define TG3_FL_5705 0x1
10855 #define TG3_FL_NOT_5705 0x2
10856 #define TG3_FL_NOT_5788 0x4
10857 #define TG3_FL_NOT_5750 0x8
10858 u32 read_mask;
10859 u32 write_mask;
10860 } reg_tbl[] = {
10861 /* MAC Control Registers */
10862 { MAC_MODE, TG3_FL_NOT_5705,
10863 0x00000000, 0x00ef6f8c },
10864 { MAC_MODE, TG3_FL_5705,
10865 0x00000000, 0x01ef6b8c },
10866 { MAC_STATUS, TG3_FL_NOT_5705,
10867 0x03800107, 0x00000000 },
10868 { MAC_STATUS, TG3_FL_5705,
10869 0x03800100, 0x00000000 },
10870 { MAC_ADDR_0_HIGH, 0x0000,
10871 0x00000000, 0x0000ffff },
10872 { MAC_ADDR_0_LOW, 0x0000,
10873 0x00000000, 0xffffffff },
10874 { MAC_RX_MTU_SIZE, 0x0000,
10875 0x00000000, 0x0000ffff },
10876 { MAC_TX_MODE, 0x0000,
10877 0x00000000, 0x00000070 },
10878 { MAC_TX_LENGTHS, 0x0000,
10879 0x00000000, 0x00003fff },
10880 { MAC_RX_MODE, TG3_FL_NOT_5705,
10881 0x00000000, 0x000007fc },
10882 { MAC_RX_MODE, TG3_FL_5705,
10883 0x00000000, 0x000007dc },
10884 { MAC_HASH_REG_0, 0x0000,
10885 0x00000000, 0xffffffff },
10886 { MAC_HASH_REG_1, 0x0000,
10887 0x00000000, 0xffffffff },
10888 { MAC_HASH_REG_2, 0x0000,
10889 0x00000000, 0xffffffff },
10890 { MAC_HASH_REG_3, 0x0000,
10891 0x00000000, 0xffffffff },
10893 /* Receive Data and Receive BD Initiator Control Registers. */
10894 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10895 0x00000000, 0xffffffff },
10896 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10897 0x00000000, 0xffffffff },
10898 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10899 0x00000000, 0x00000003 },
10900 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10901 0x00000000, 0xffffffff },
10902 { RCVDBDI_STD_BD+0, 0x0000,
10903 0x00000000, 0xffffffff },
10904 { RCVDBDI_STD_BD+4, 0x0000,
10905 0x00000000, 0xffffffff },
10906 { RCVDBDI_STD_BD+8, 0x0000,
10907 0x00000000, 0xffff0002 },
10908 { RCVDBDI_STD_BD+0xc, 0x0000,
10909 0x00000000, 0xffffffff },
10911 /* Receive BD Initiator Control Registers. */
10912 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10913 0x00000000, 0xffffffff },
10914 { RCVBDI_STD_THRESH, TG3_FL_5705,
10915 0x00000000, 0x000003ff },
10916 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10917 0x00000000, 0xffffffff },
10919 /* Host Coalescing Control Registers. */
10920 { HOSTCC_MODE, TG3_FL_NOT_5705,
10921 0x00000000, 0x00000004 },
10922 { HOSTCC_MODE, TG3_FL_5705,
10923 0x00000000, 0x000000f6 },
10924 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10925 0x00000000, 0xffffffff },
10926 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10927 0x00000000, 0x000003ff },
10928 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10929 0x00000000, 0xffffffff },
10930 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10931 0x00000000, 0x000003ff },
10932 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10933 0x00000000, 0xffffffff },
10934 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10935 0x00000000, 0x000000ff },
10936 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10937 0x00000000, 0xffffffff },
10938 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10939 0x00000000, 0x000000ff },
10940 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10941 0x00000000, 0xffffffff },
10942 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10943 0x00000000, 0xffffffff },
10944 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10945 0x00000000, 0xffffffff },
10946 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10947 0x00000000, 0x000000ff },
10948 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10949 0x00000000, 0xffffffff },
10950 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10951 0x00000000, 0x000000ff },
10952 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10953 0x00000000, 0xffffffff },
10954 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10955 0x00000000, 0xffffffff },
10956 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10957 0x00000000, 0xffffffff },
10958 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10959 0x00000000, 0xffffffff },
10960 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10961 0x00000000, 0xffffffff },
10962 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10963 0xffffffff, 0x00000000 },
10964 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10965 0xffffffff, 0x00000000 },
10967 /* Buffer Manager Control Registers. */
10968 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10969 0x00000000, 0x007fff80 },
10970 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10971 0x00000000, 0x007fffff },
10972 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10973 0x00000000, 0x0000003f },
10974 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10975 0x00000000, 0x000001ff },
10976 { BUFMGR_MB_HIGH_WATER, 0x0000,
10977 0x00000000, 0x000001ff },
10978 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10979 0xffffffff, 0x00000000 },
10980 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10981 0xffffffff, 0x00000000 },
10983 /* Mailbox Registers */
10984 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10985 0x00000000, 0x000001ff },
10986 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10987 0x00000000, 0x000001ff },
10988 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10989 0x00000000, 0x000007ff },
10990 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10991 0x00000000, 0x000001ff },
10993 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10996 is_5705 = is_5750 = 0;
10997 if (tg3_flag(tp, 5705_PLUS)) {
10998 is_5705 = 1;
10999 if (tg3_flag(tp, 5750_PLUS))
11000 is_5750 = 1;
11003 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11004 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11005 continue;
11007 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11008 continue;
11010 if (tg3_flag(tp, IS_5788) &&
11011 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11012 continue;
11014 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11015 continue;
11017 offset = (u32) reg_tbl[i].offset;
11018 read_mask = reg_tbl[i].read_mask;
11019 write_mask = reg_tbl[i].write_mask;
11021 /* Save the original register content */
11022 save_val = tr32(offset);
11024 /* Determine the read-only value. */
11025 read_val = save_val & read_mask;
11027 /* Write zero to the register, then make sure the read-only bits
11028 * are not changed and the read/write bits are all zeros.
11030 tw32(offset, 0);
11032 val = tr32(offset);
11034 /* Test the read-only and read/write bits. */
11035 if (((val & read_mask) != read_val) || (val & write_mask))
11036 goto out;
11038 /* Write ones to all the bits defined by RdMask and WrMask, then
11039 * make sure the read-only bits are not changed and the
11040 * read/write bits are all ones.
11042 tw32(offset, read_mask | write_mask);
11044 val = tr32(offset);
11046 /* Test the read-only bits. */
11047 if ((val & read_mask) != read_val)
11048 goto out;
11050 /* Test the read/write bits. */
11051 if ((val & write_mask) != write_mask)
11052 goto out;
11054 tw32(offset, save_val);
11057 return 0;
11059 out:
11060 if (netif_msg_hw(tp))
11061 netdev_err(tp->dev,
11062 "Register test failed at offset %x\n", offset);
11063 tw32(offset, save_val);
11064 return -EIO;
11067 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11069 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11070 int i;
11071 u32 j;
11073 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11074 for (j = 0; j < len; j += 4) {
11075 u32 val;
11077 tg3_write_mem(tp, offset + j, test_pattern[i]);
11078 tg3_read_mem(tp, offset + j, &val);
11079 if (val != test_pattern[i])
11080 return -EIO;
11083 return 0;
11086 static int tg3_test_memory(struct tg3 *tp)
11088 static struct mem_entry {
11089 u32 offset;
11090 u32 len;
11091 } mem_tbl_570x[] = {
11092 { 0x00000000, 0x00b50},
11093 { 0x00002000, 0x1c000},
11094 { 0xffffffff, 0x00000}
11095 }, mem_tbl_5705[] = {
11096 { 0x00000100, 0x0000c},
11097 { 0x00000200, 0x00008},
11098 { 0x00004000, 0x00800},
11099 { 0x00006000, 0x01000},
11100 { 0x00008000, 0x02000},
11101 { 0x00010000, 0x0e000},
11102 { 0xffffffff, 0x00000}
11103 }, mem_tbl_5755[] = {
11104 { 0x00000200, 0x00008},
11105 { 0x00004000, 0x00800},
11106 { 0x00006000, 0x00800},
11107 { 0x00008000, 0x02000},
11108 { 0x00010000, 0x0c000},
11109 { 0xffffffff, 0x00000}
11110 }, mem_tbl_5906[] = {
11111 { 0x00000200, 0x00008},
11112 { 0x00004000, 0x00400},
11113 { 0x00006000, 0x00400},
11114 { 0x00008000, 0x01000},
11115 { 0x00010000, 0x01000},
11116 { 0xffffffff, 0x00000}
11117 }, mem_tbl_5717[] = {
11118 { 0x00000200, 0x00008},
11119 { 0x00010000, 0x0a000},
11120 { 0x00020000, 0x13c00},
11121 { 0xffffffff, 0x00000}
11122 }, mem_tbl_57765[] = {
11123 { 0x00000200, 0x00008},
11124 { 0x00004000, 0x00800},
11125 { 0x00006000, 0x09800},
11126 { 0x00010000, 0x0a000},
11127 { 0xffffffff, 0x00000}
11129 struct mem_entry *mem_tbl;
11130 int err = 0;
11131 int i;
11133 if (tg3_flag(tp, 5717_PLUS))
11134 mem_tbl = mem_tbl_5717;
11135 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11136 mem_tbl = mem_tbl_57765;
11137 else if (tg3_flag(tp, 5755_PLUS))
11138 mem_tbl = mem_tbl_5755;
11139 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11140 mem_tbl = mem_tbl_5906;
11141 else if (tg3_flag(tp, 5705_PLUS))
11142 mem_tbl = mem_tbl_5705;
11143 else
11144 mem_tbl = mem_tbl_570x;
11146 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11147 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11148 if (err)
11149 break;
11152 return err;
11155 #define TG3_MAC_LOOPBACK 0
11156 #define TG3_PHY_LOOPBACK 1
11157 #define TG3_TSO_LOOPBACK 2
11159 #define TG3_TSO_MSS 500
11161 #define TG3_TSO_IP_HDR_LEN 20
11162 #define TG3_TSO_TCP_HDR_LEN 20
11163 #define TG3_TSO_TCP_OPT_LEN 12
11165 static const u8 tg3_tso_header[] = {
11166 0x08, 0x00,
11167 0x45, 0x00, 0x00, 0x00,
11168 0x00, 0x00, 0x40, 0x00,
11169 0x40, 0x06, 0x00, 0x00,
11170 0x0a, 0x00, 0x00, 0x01,
11171 0x0a, 0x00, 0x00, 0x02,
11172 0x0d, 0x00, 0xe0, 0x00,
11173 0x00, 0x00, 0x01, 0x00,
11174 0x00, 0x00, 0x02, 0x00,
11175 0x80, 0x10, 0x10, 0x00,
11176 0x14, 0x09, 0x00, 0x00,
11177 0x01, 0x01, 0x08, 0x0a,
11178 0x11, 0x11, 0x11, 0x11,
11179 0x11, 0x11, 0x11, 0x11,
11182 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11184 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11185 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11186 struct sk_buff *skb, *rx_skb;
11187 u8 *tx_data;
11188 dma_addr_t map;
11189 int num_pkts, tx_len, rx_len, i, err;
11190 struct tg3_rx_buffer_desc *desc;
11191 struct tg3_napi *tnapi, *rnapi;
11192 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11194 tnapi = &tp->napi[0];
11195 rnapi = &tp->napi[0];
11196 if (tp->irq_cnt > 1) {
11197 if (tg3_flag(tp, ENABLE_RSS))
11198 rnapi = &tp->napi[1];
11199 if (tg3_flag(tp, ENABLE_TSS))
11200 tnapi = &tp->napi[1];
11202 coal_now = tnapi->coal_now | rnapi->coal_now;
11204 if (loopback_mode == TG3_MAC_LOOPBACK) {
11205 /* HW errata - mac loopback fails in some cases on 5780.
11206 * Normal traffic and PHY loopback are not affected by
11207 * errata. Also, the MAC loopback test is deprecated for
11208 * all newer ASIC revisions.
11210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11211 tg3_flag(tp, CPMU_PRESENT))
11212 return 0;
11214 mac_mode = tp->mac_mode &
11215 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11216 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11217 if (!tg3_flag(tp, 5705_PLUS))
11218 mac_mode |= MAC_MODE_LINK_POLARITY;
11219 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11220 mac_mode |= MAC_MODE_PORT_MODE_MII;
11221 else
11222 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11223 tw32(MAC_MODE, mac_mode);
11224 } else {
11225 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11226 tg3_phy_fet_toggle_apd(tp, false);
11227 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11228 } else
11229 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11231 tg3_phy_toggle_automdix(tp, 0);
11233 tg3_writephy(tp, MII_BMCR, val);
11234 udelay(40);
11236 mac_mode = tp->mac_mode &
11237 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11238 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11239 tg3_writephy(tp, MII_TG3_FET_PTEST,
11240 MII_TG3_FET_PTEST_FRC_TX_LINK |
11241 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11242 /* The write needs to be flushed for the AC131 */
11243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11244 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11245 mac_mode |= MAC_MODE_PORT_MODE_MII;
11246 } else
11247 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11249 /* reset to prevent losing 1st rx packet intermittently */
11250 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11251 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11252 udelay(10);
11253 tw32_f(MAC_RX_MODE, tp->rx_mode);
11255 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11256 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11257 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11258 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11259 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11260 mac_mode |= MAC_MODE_LINK_POLARITY;
11261 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11262 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11264 tw32(MAC_MODE, mac_mode);
11266 /* Wait for link */
11267 for (i = 0; i < 100; i++) {
11268 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11269 break;
11270 mdelay(1);
11274 err = -EIO;
11276 tx_len = pktsz;
11277 skb = netdev_alloc_skb(tp->dev, tx_len);
11278 if (!skb)
11279 return -ENOMEM;
11281 tx_data = skb_put(skb, tx_len);
11282 memcpy(tx_data, tp->dev->dev_addr, 6);
11283 memset(tx_data + 6, 0x0, 8);
11285 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11287 if (loopback_mode == TG3_TSO_LOOPBACK) {
11288 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11290 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11291 TG3_TSO_TCP_OPT_LEN;
11293 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11294 sizeof(tg3_tso_header));
11295 mss = TG3_TSO_MSS;
11297 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11298 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11300 /* Set the total length field in the IP header */
11301 iph->tot_len = htons((u16)(mss + hdr_len));
11303 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11304 TXD_FLAG_CPU_POST_DMA);
11306 if (tg3_flag(tp, HW_TSO_1) ||
11307 tg3_flag(tp, HW_TSO_2) ||
11308 tg3_flag(tp, HW_TSO_3)) {
11309 struct tcphdr *th;
11310 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11311 th = (struct tcphdr *)&tx_data[val];
11312 th->check = 0;
11313 } else
11314 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11316 if (tg3_flag(tp, HW_TSO_3)) {
11317 mss |= (hdr_len & 0xc) << 12;
11318 if (hdr_len & 0x10)
11319 base_flags |= 0x00000010;
11320 base_flags |= (hdr_len & 0x3e0) << 5;
11321 } else if (tg3_flag(tp, HW_TSO_2))
11322 mss |= hdr_len << 9;
11323 else if (tg3_flag(tp, HW_TSO_1) ||
11324 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11325 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11326 } else {
11327 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11330 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11331 } else {
11332 num_pkts = 1;
11333 data_off = ETH_HLEN;
11336 for (i = data_off; i < tx_len; i++)
11337 tx_data[i] = (u8) (i & 0xff);
11339 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11340 if (pci_dma_mapping_error(tp->pdev, map)) {
11341 dev_kfree_skb(skb);
11342 return -EIO;
11345 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11346 rnapi->coal_now);
11348 udelay(10);
11350 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11352 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11353 base_flags, (mss << 1) | 1);
11355 tnapi->tx_prod++;
11357 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11358 tr32_mailbox(tnapi->prodmbox);
11360 udelay(10);
11362 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11363 for (i = 0; i < 35; i++) {
11364 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11365 coal_now);
11367 udelay(10);
11369 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11370 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11371 if ((tx_idx == tnapi->tx_prod) &&
11372 (rx_idx == (rx_start_idx + num_pkts)))
11373 break;
11376 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11377 dev_kfree_skb(skb);
11379 if (tx_idx != tnapi->tx_prod)
11380 goto out;
11382 if (rx_idx != rx_start_idx + num_pkts)
11383 goto out;
11385 val = data_off;
11386 while (rx_idx != rx_start_idx) {
11387 desc = &rnapi->rx_rcb[rx_start_idx++];
11388 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11389 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11391 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11392 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11393 goto out;
11395 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11396 - ETH_FCS_LEN;
11398 if (loopback_mode != TG3_TSO_LOOPBACK) {
11399 if (rx_len != tx_len)
11400 goto out;
11402 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11403 if (opaque_key != RXD_OPAQUE_RING_STD)
11404 goto out;
11405 } else {
11406 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11407 goto out;
11409 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11410 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11411 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11412 goto out;
11415 if (opaque_key == RXD_OPAQUE_RING_STD) {
11416 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11417 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11418 mapping);
11419 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11420 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11421 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11422 mapping);
11423 } else
11424 goto out;
11426 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11427 PCI_DMA_FROMDEVICE);
11429 for (i = data_off; i < rx_len; i++, val++) {
11430 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11431 goto out;
11435 err = 0;
11437 /* tg3_free_rings will unmap and free the rx_skb */
11438 out:
11439 return err;
11442 #define TG3_STD_LOOPBACK_FAILED 1
11443 #define TG3_JMB_LOOPBACK_FAILED 2
11444 #define TG3_TSO_LOOPBACK_FAILED 4
11446 #define TG3_MAC_LOOPBACK_SHIFT 0
11447 #define TG3_PHY_LOOPBACK_SHIFT 4
11448 #define TG3_LOOPBACK_FAILED 0x00000077
11450 static int tg3_test_loopback(struct tg3 *tp)
11452 int err = 0;
11453 u32 eee_cap, cpmuctrl = 0;
11455 if (!netif_running(tp->dev))
11456 return TG3_LOOPBACK_FAILED;
11458 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11459 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11461 err = tg3_reset_hw(tp, 1);
11462 if (err) {
11463 err = TG3_LOOPBACK_FAILED;
11464 goto done;
11467 if (tg3_flag(tp, ENABLE_RSS)) {
11468 int i;
11470 /* Reroute all rx packets to the 1st queue */
11471 for (i = MAC_RSS_INDIR_TBL_0;
11472 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11473 tw32(i, 0x0);
11476 /* Turn off gphy autopowerdown. */
11477 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11478 tg3_phy_toggle_apd(tp, false);
11480 if (tg3_flag(tp, CPMU_PRESENT)) {
11481 int i;
11482 u32 status;
11484 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11486 /* Wait for up to 40 microseconds to acquire lock. */
11487 for (i = 0; i < 4; i++) {
11488 status = tr32(TG3_CPMU_MUTEX_GNT);
11489 if (status == CPMU_MUTEX_GNT_DRIVER)
11490 break;
11491 udelay(10);
11494 if (status != CPMU_MUTEX_GNT_DRIVER) {
11495 err = TG3_LOOPBACK_FAILED;
11496 goto done;
11499 /* Turn off link-based power management. */
11500 cpmuctrl = tr32(TG3_CPMU_CTRL);
11501 tw32(TG3_CPMU_CTRL,
11502 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11503 CPMU_CTRL_LINK_AWARE_MODE));
11506 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11507 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11509 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11510 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11511 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11513 if (tg3_flag(tp, CPMU_PRESENT)) {
11514 tw32(TG3_CPMU_CTRL, cpmuctrl);
11516 /* Release the mutex */
11517 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11520 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11521 !tg3_flag(tp, USE_PHYLIB)) {
11522 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11523 err |= TG3_STD_LOOPBACK_FAILED <<
11524 TG3_PHY_LOOPBACK_SHIFT;
11525 if (tg3_flag(tp, TSO_CAPABLE) &&
11526 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11527 err |= TG3_TSO_LOOPBACK_FAILED <<
11528 TG3_PHY_LOOPBACK_SHIFT;
11529 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11530 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11531 err |= TG3_JMB_LOOPBACK_FAILED <<
11532 TG3_PHY_LOOPBACK_SHIFT;
11535 /* Re-enable gphy autopowerdown. */
11536 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11537 tg3_phy_toggle_apd(tp, true);
11539 done:
11540 tp->phy_flags |= eee_cap;
11542 return err;
11545 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11546 u64 *data)
11548 struct tg3 *tp = netdev_priv(dev);
11550 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11551 tg3_power_up(tp)) {
11552 etest->flags |= ETH_TEST_FL_FAILED;
11553 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11554 return;
11557 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11559 if (tg3_test_nvram(tp) != 0) {
11560 etest->flags |= ETH_TEST_FL_FAILED;
11561 data[0] = 1;
11563 if (tg3_test_link(tp) != 0) {
11564 etest->flags |= ETH_TEST_FL_FAILED;
11565 data[1] = 1;
11567 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11568 int err, err2 = 0, irq_sync = 0;
11570 if (netif_running(dev)) {
11571 tg3_phy_stop(tp);
11572 tg3_netif_stop(tp);
11573 irq_sync = 1;
11576 tg3_full_lock(tp, irq_sync);
11578 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11579 err = tg3_nvram_lock(tp);
11580 tg3_halt_cpu(tp, RX_CPU_BASE);
11581 if (!tg3_flag(tp, 5705_PLUS))
11582 tg3_halt_cpu(tp, TX_CPU_BASE);
11583 if (!err)
11584 tg3_nvram_unlock(tp);
11586 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11587 tg3_phy_reset(tp);
11589 if (tg3_test_registers(tp) != 0) {
11590 etest->flags |= ETH_TEST_FL_FAILED;
11591 data[2] = 1;
11593 if (tg3_test_memory(tp) != 0) {
11594 etest->flags |= ETH_TEST_FL_FAILED;
11595 data[3] = 1;
11597 if ((data[4] = tg3_test_loopback(tp)) != 0)
11598 etest->flags |= ETH_TEST_FL_FAILED;
11600 tg3_full_unlock(tp);
11602 if (tg3_test_interrupt(tp) != 0) {
11603 etest->flags |= ETH_TEST_FL_FAILED;
11604 data[5] = 1;
11607 tg3_full_lock(tp, 0);
11609 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11610 if (netif_running(dev)) {
11611 tg3_flag_set(tp, INIT_COMPLETE);
11612 err2 = tg3_restart_hw(tp, 1);
11613 if (!err2)
11614 tg3_netif_start(tp);
11617 tg3_full_unlock(tp);
11619 if (irq_sync && !err2)
11620 tg3_phy_start(tp);
11622 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11623 tg3_power_down(tp);
11627 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11629 struct mii_ioctl_data *data = if_mii(ifr);
11630 struct tg3 *tp = netdev_priv(dev);
11631 int err;
11633 if (tg3_flag(tp, USE_PHYLIB)) {
11634 struct phy_device *phydev;
11635 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11636 return -EAGAIN;
11637 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11638 return phy_mii_ioctl(phydev, ifr, cmd);
11641 switch (cmd) {
11642 case SIOCGMIIPHY:
11643 data->phy_id = tp->phy_addr;
11645 /* fallthru */
11646 case SIOCGMIIREG: {
11647 u32 mii_regval;
11649 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11650 break; /* We have no PHY */
11652 if (!netif_running(dev))
11653 return -EAGAIN;
11655 spin_lock_bh(&tp->lock);
11656 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11657 spin_unlock_bh(&tp->lock);
11659 data->val_out = mii_regval;
11661 return err;
11664 case SIOCSMIIREG:
11665 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11666 break; /* We have no PHY */
11668 if (!netif_running(dev))
11669 return -EAGAIN;
11671 spin_lock_bh(&tp->lock);
11672 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11673 spin_unlock_bh(&tp->lock);
11675 return err;
11677 default:
11678 /* do nothing */
11679 break;
11681 return -EOPNOTSUPP;
11684 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11686 struct tg3 *tp = netdev_priv(dev);
11688 memcpy(ec, &tp->coal, sizeof(*ec));
11689 return 0;
11692 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11694 struct tg3 *tp = netdev_priv(dev);
11695 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11696 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11698 if (!tg3_flag(tp, 5705_PLUS)) {
11699 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11700 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11701 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11702 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11705 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11706 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11707 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11708 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11709 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11710 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11711 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11712 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11713 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11714 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11715 return -EINVAL;
11717 /* No rx interrupts will be generated if both are zero */
11718 if ((ec->rx_coalesce_usecs == 0) &&
11719 (ec->rx_max_coalesced_frames == 0))
11720 return -EINVAL;
11722 /* No tx interrupts will be generated if both are zero */
11723 if ((ec->tx_coalesce_usecs == 0) &&
11724 (ec->tx_max_coalesced_frames == 0))
11725 return -EINVAL;
11727 /* Only copy relevant parameters, ignore all others. */
11728 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11729 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11730 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11731 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11732 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11733 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11734 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11735 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11736 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11738 if (netif_running(dev)) {
11739 tg3_full_lock(tp, 0);
11740 __tg3_set_coalesce(tp, &tp->coal);
11741 tg3_full_unlock(tp);
11743 return 0;
11746 static const struct ethtool_ops tg3_ethtool_ops = {
11747 .get_settings = tg3_get_settings,
11748 .set_settings = tg3_set_settings,
11749 .get_drvinfo = tg3_get_drvinfo,
11750 .get_regs_len = tg3_get_regs_len,
11751 .get_regs = tg3_get_regs,
11752 .get_wol = tg3_get_wol,
11753 .set_wol = tg3_set_wol,
11754 .get_msglevel = tg3_get_msglevel,
11755 .set_msglevel = tg3_set_msglevel,
11756 .nway_reset = tg3_nway_reset,
11757 .get_link = ethtool_op_get_link,
11758 .get_eeprom_len = tg3_get_eeprom_len,
11759 .get_eeprom = tg3_get_eeprom,
11760 .set_eeprom = tg3_set_eeprom,
11761 .get_ringparam = tg3_get_ringparam,
11762 .set_ringparam = tg3_set_ringparam,
11763 .get_pauseparam = tg3_get_pauseparam,
11764 .set_pauseparam = tg3_set_pauseparam,
11765 .self_test = tg3_self_test,
11766 .get_strings = tg3_get_strings,
11767 .set_phys_id = tg3_set_phys_id,
11768 .get_ethtool_stats = tg3_get_ethtool_stats,
11769 .get_coalesce = tg3_get_coalesce,
11770 .set_coalesce = tg3_set_coalesce,
11771 .get_sset_count = tg3_get_sset_count,
11774 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11776 u32 cursize, val, magic;
11778 tp->nvram_size = EEPROM_CHIP_SIZE;
11780 if (tg3_nvram_read(tp, 0, &magic) != 0)
11781 return;
11783 if ((magic != TG3_EEPROM_MAGIC) &&
11784 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11785 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11786 return;
11789 * Size the chip by reading offsets at increasing powers of two.
11790 * When we encounter our validation signature, we know the addressing
11791 * has wrapped around, and thus have our chip size.
11793 cursize = 0x10;
11795 while (cursize < tp->nvram_size) {
11796 if (tg3_nvram_read(tp, cursize, &val) != 0)
11797 return;
11799 if (val == magic)
11800 break;
11802 cursize <<= 1;
11805 tp->nvram_size = cursize;
11808 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11810 u32 val;
11812 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11813 return;
11815 /* Selfboot format */
11816 if (val != TG3_EEPROM_MAGIC) {
11817 tg3_get_eeprom_size(tp);
11818 return;
11821 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11822 if (val != 0) {
11823 /* This is confusing. We want to operate on the
11824 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11825 * call will read from NVRAM and byteswap the data
11826 * according to the byteswapping settings for all
11827 * other register accesses. This ensures the data we
11828 * want will always reside in the lower 16-bits.
11829 * However, the data in NVRAM is in LE format, which
11830 * means the data from the NVRAM read will always be
11831 * opposite the endianness of the CPU. The 16-bit
11832 * byteswap then brings the data to CPU endianness.
11834 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11835 return;
11838 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11841 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11843 u32 nvcfg1;
11845 nvcfg1 = tr32(NVRAM_CFG1);
11846 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11847 tg3_flag_set(tp, FLASH);
11848 } else {
11849 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11850 tw32(NVRAM_CFG1, nvcfg1);
11853 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11854 tg3_flag(tp, 5780_CLASS)) {
11855 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11856 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11857 tp->nvram_jedecnum = JEDEC_ATMEL;
11858 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11859 tg3_flag_set(tp, NVRAM_BUFFERED);
11860 break;
11861 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11862 tp->nvram_jedecnum = JEDEC_ATMEL;
11863 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11864 break;
11865 case FLASH_VENDOR_ATMEL_EEPROM:
11866 tp->nvram_jedecnum = JEDEC_ATMEL;
11867 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11868 tg3_flag_set(tp, NVRAM_BUFFERED);
11869 break;
11870 case FLASH_VENDOR_ST:
11871 tp->nvram_jedecnum = JEDEC_ST;
11872 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11873 tg3_flag_set(tp, NVRAM_BUFFERED);
11874 break;
11875 case FLASH_VENDOR_SAIFUN:
11876 tp->nvram_jedecnum = JEDEC_SAIFUN;
11877 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11878 break;
11879 case FLASH_VENDOR_SST_SMALL:
11880 case FLASH_VENDOR_SST_LARGE:
11881 tp->nvram_jedecnum = JEDEC_SST;
11882 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11883 break;
11885 } else {
11886 tp->nvram_jedecnum = JEDEC_ATMEL;
11887 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11888 tg3_flag_set(tp, NVRAM_BUFFERED);
11892 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11894 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11895 case FLASH_5752PAGE_SIZE_256:
11896 tp->nvram_pagesize = 256;
11897 break;
11898 case FLASH_5752PAGE_SIZE_512:
11899 tp->nvram_pagesize = 512;
11900 break;
11901 case FLASH_5752PAGE_SIZE_1K:
11902 tp->nvram_pagesize = 1024;
11903 break;
11904 case FLASH_5752PAGE_SIZE_2K:
11905 tp->nvram_pagesize = 2048;
11906 break;
11907 case FLASH_5752PAGE_SIZE_4K:
11908 tp->nvram_pagesize = 4096;
11909 break;
11910 case FLASH_5752PAGE_SIZE_264:
11911 tp->nvram_pagesize = 264;
11912 break;
11913 case FLASH_5752PAGE_SIZE_528:
11914 tp->nvram_pagesize = 528;
11915 break;
11919 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11921 u32 nvcfg1;
11923 nvcfg1 = tr32(NVRAM_CFG1);
11925 /* NVRAM protection for TPM */
11926 if (nvcfg1 & (1 << 27))
11927 tg3_flag_set(tp, PROTECTED_NVRAM);
11929 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11930 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11931 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11932 tp->nvram_jedecnum = JEDEC_ATMEL;
11933 tg3_flag_set(tp, NVRAM_BUFFERED);
11934 break;
11935 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11936 tp->nvram_jedecnum = JEDEC_ATMEL;
11937 tg3_flag_set(tp, NVRAM_BUFFERED);
11938 tg3_flag_set(tp, FLASH);
11939 break;
11940 case FLASH_5752VENDOR_ST_M45PE10:
11941 case FLASH_5752VENDOR_ST_M45PE20:
11942 case FLASH_5752VENDOR_ST_M45PE40:
11943 tp->nvram_jedecnum = JEDEC_ST;
11944 tg3_flag_set(tp, NVRAM_BUFFERED);
11945 tg3_flag_set(tp, FLASH);
11946 break;
11949 if (tg3_flag(tp, FLASH)) {
11950 tg3_nvram_get_pagesize(tp, nvcfg1);
11951 } else {
11952 /* For eeprom, set pagesize to maximum eeprom size */
11953 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11955 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11956 tw32(NVRAM_CFG1, nvcfg1);
11960 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11962 u32 nvcfg1, protect = 0;
11964 nvcfg1 = tr32(NVRAM_CFG1);
11966 /* NVRAM protection for TPM */
11967 if (nvcfg1 & (1 << 27)) {
11968 tg3_flag_set(tp, PROTECTED_NVRAM);
11969 protect = 1;
11972 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11973 switch (nvcfg1) {
11974 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11975 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11976 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11977 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11978 tp->nvram_jedecnum = JEDEC_ATMEL;
11979 tg3_flag_set(tp, NVRAM_BUFFERED);
11980 tg3_flag_set(tp, FLASH);
11981 tp->nvram_pagesize = 264;
11982 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11983 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11984 tp->nvram_size = (protect ? 0x3e200 :
11985 TG3_NVRAM_SIZE_512KB);
11986 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11987 tp->nvram_size = (protect ? 0x1f200 :
11988 TG3_NVRAM_SIZE_256KB);
11989 else
11990 tp->nvram_size = (protect ? 0x1f200 :
11991 TG3_NVRAM_SIZE_128KB);
11992 break;
11993 case FLASH_5752VENDOR_ST_M45PE10:
11994 case FLASH_5752VENDOR_ST_M45PE20:
11995 case FLASH_5752VENDOR_ST_M45PE40:
11996 tp->nvram_jedecnum = JEDEC_ST;
11997 tg3_flag_set(tp, NVRAM_BUFFERED);
11998 tg3_flag_set(tp, FLASH);
11999 tp->nvram_pagesize = 256;
12000 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12001 tp->nvram_size = (protect ?
12002 TG3_NVRAM_SIZE_64KB :
12003 TG3_NVRAM_SIZE_128KB);
12004 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12005 tp->nvram_size = (protect ?
12006 TG3_NVRAM_SIZE_64KB :
12007 TG3_NVRAM_SIZE_256KB);
12008 else
12009 tp->nvram_size = (protect ?
12010 TG3_NVRAM_SIZE_128KB :
12011 TG3_NVRAM_SIZE_512KB);
12012 break;
12016 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12018 u32 nvcfg1;
12020 nvcfg1 = tr32(NVRAM_CFG1);
12022 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12023 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12024 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12025 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12026 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12027 tp->nvram_jedecnum = JEDEC_ATMEL;
12028 tg3_flag_set(tp, NVRAM_BUFFERED);
12029 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12031 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12032 tw32(NVRAM_CFG1, nvcfg1);
12033 break;
12034 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12035 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12036 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12037 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12038 tp->nvram_jedecnum = JEDEC_ATMEL;
12039 tg3_flag_set(tp, NVRAM_BUFFERED);
12040 tg3_flag_set(tp, FLASH);
12041 tp->nvram_pagesize = 264;
12042 break;
12043 case FLASH_5752VENDOR_ST_M45PE10:
12044 case FLASH_5752VENDOR_ST_M45PE20:
12045 case FLASH_5752VENDOR_ST_M45PE40:
12046 tp->nvram_jedecnum = JEDEC_ST;
12047 tg3_flag_set(tp, NVRAM_BUFFERED);
12048 tg3_flag_set(tp, FLASH);
12049 tp->nvram_pagesize = 256;
12050 break;
12054 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12056 u32 nvcfg1, protect = 0;
12058 nvcfg1 = tr32(NVRAM_CFG1);
12060 /* NVRAM protection for TPM */
12061 if (nvcfg1 & (1 << 27)) {
12062 tg3_flag_set(tp, PROTECTED_NVRAM);
12063 protect = 1;
12066 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12067 switch (nvcfg1) {
12068 case FLASH_5761VENDOR_ATMEL_ADB021D:
12069 case FLASH_5761VENDOR_ATMEL_ADB041D:
12070 case FLASH_5761VENDOR_ATMEL_ADB081D:
12071 case FLASH_5761VENDOR_ATMEL_ADB161D:
12072 case FLASH_5761VENDOR_ATMEL_MDB021D:
12073 case FLASH_5761VENDOR_ATMEL_MDB041D:
12074 case FLASH_5761VENDOR_ATMEL_MDB081D:
12075 case FLASH_5761VENDOR_ATMEL_MDB161D:
12076 tp->nvram_jedecnum = JEDEC_ATMEL;
12077 tg3_flag_set(tp, NVRAM_BUFFERED);
12078 tg3_flag_set(tp, FLASH);
12079 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12080 tp->nvram_pagesize = 256;
12081 break;
12082 case FLASH_5761VENDOR_ST_A_M45PE20:
12083 case FLASH_5761VENDOR_ST_A_M45PE40:
12084 case FLASH_5761VENDOR_ST_A_M45PE80:
12085 case FLASH_5761VENDOR_ST_A_M45PE16:
12086 case FLASH_5761VENDOR_ST_M_M45PE20:
12087 case FLASH_5761VENDOR_ST_M_M45PE40:
12088 case FLASH_5761VENDOR_ST_M_M45PE80:
12089 case FLASH_5761VENDOR_ST_M_M45PE16:
12090 tp->nvram_jedecnum = JEDEC_ST;
12091 tg3_flag_set(tp, NVRAM_BUFFERED);
12092 tg3_flag_set(tp, FLASH);
12093 tp->nvram_pagesize = 256;
12094 break;
12097 if (protect) {
12098 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12099 } else {
12100 switch (nvcfg1) {
12101 case FLASH_5761VENDOR_ATMEL_ADB161D:
12102 case FLASH_5761VENDOR_ATMEL_MDB161D:
12103 case FLASH_5761VENDOR_ST_A_M45PE16:
12104 case FLASH_5761VENDOR_ST_M_M45PE16:
12105 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12106 break;
12107 case FLASH_5761VENDOR_ATMEL_ADB081D:
12108 case FLASH_5761VENDOR_ATMEL_MDB081D:
12109 case FLASH_5761VENDOR_ST_A_M45PE80:
12110 case FLASH_5761VENDOR_ST_M_M45PE80:
12111 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12112 break;
12113 case FLASH_5761VENDOR_ATMEL_ADB041D:
12114 case FLASH_5761VENDOR_ATMEL_MDB041D:
12115 case FLASH_5761VENDOR_ST_A_M45PE40:
12116 case FLASH_5761VENDOR_ST_M_M45PE40:
12117 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12118 break;
12119 case FLASH_5761VENDOR_ATMEL_ADB021D:
12120 case FLASH_5761VENDOR_ATMEL_MDB021D:
12121 case FLASH_5761VENDOR_ST_A_M45PE20:
12122 case FLASH_5761VENDOR_ST_M_M45PE20:
12123 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12124 break;
12129 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12131 tp->nvram_jedecnum = JEDEC_ATMEL;
12132 tg3_flag_set(tp, NVRAM_BUFFERED);
12133 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12136 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12138 u32 nvcfg1;
12140 nvcfg1 = tr32(NVRAM_CFG1);
12142 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12143 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12144 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12145 tp->nvram_jedecnum = JEDEC_ATMEL;
12146 tg3_flag_set(tp, NVRAM_BUFFERED);
12147 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12149 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12150 tw32(NVRAM_CFG1, nvcfg1);
12151 return;
12152 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12153 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12154 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12155 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12156 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12157 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12158 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12159 tp->nvram_jedecnum = JEDEC_ATMEL;
12160 tg3_flag_set(tp, NVRAM_BUFFERED);
12161 tg3_flag_set(tp, FLASH);
12163 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12164 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12165 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12166 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12167 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12168 break;
12169 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12170 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12171 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12172 break;
12173 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12174 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12175 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12176 break;
12178 break;
12179 case FLASH_5752VENDOR_ST_M45PE10:
12180 case FLASH_5752VENDOR_ST_M45PE20:
12181 case FLASH_5752VENDOR_ST_M45PE40:
12182 tp->nvram_jedecnum = JEDEC_ST;
12183 tg3_flag_set(tp, NVRAM_BUFFERED);
12184 tg3_flag_set(tp, FLASH);
12186 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12187 case FLASH_5752VENDOR_ST_M45PE10:
12188 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12189 break;
12190 case FLASH_5752VENDOR_ST_M45PE20:
12191 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12192 break;
12193 case FLASH_5752VENDOR_ST_M45PE40:
12194 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12195 break;
12197 break;
12198 default:
12199 tg3_flag_set(tp, NO_NVRAM);
12200 return;
12203 tg3_nvram_get_pagesize(tp, nvcfg1);
12204 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12205 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12209 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12211 u32 nvcfg1;
12213 nvcfg1 = tr32(NVRAM_CFG1);
12215 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12216 case FLASH_5717VENDOR_ATMEL_EEPROM:
12217 case FLASH_5717VENDOR_MICRO_EEPROM:
12218 tp->nvram_jedecnum = JEDEC_ATMEL;
12219 tg3_flag_set(tp, NVRAM_BUFFERED);
12220 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12222 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12223 tw32(NVRAM_CFG1, nvcfg1);
12224 return;
12225 case FLASH_5717VENDOR_ATMEL_MDB011D:
12226 case FLASH_5717VENDOR_ATMEL_ADB011B:
12227 case FLASH_5717VENDOR_ATMEL_ADB011D:
12228 case FLASH_5717VENDOR_ATMEL_MDB021D:
12229 case FLASH_5717VENDOR_ATMEL_ADB021B:
12230 case FLASH_5717VENDOR_ATMEL_ADB021D:
12231 case FLASH_5717VENDOR_ATMEL_45USPT:
12232 tp->nvram_jedecnum = JEDEC_ATMEL;
12233 tg3_flag_set(tp, NVRAM_BUFFERED);
12234 tg3_flag_set(tp, FLASH);
12236 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12237 case FLASH_5717VENDOR_ATMEL_MDB021D:
12238 /* Detect size with tg3_nvram_get_size() */
12239 break;
12240 case FLASH_5717VENDOR_ATMEL_ADB021B:
12241 case FLASH_5717VENDOR_ATMEL_ADB021D:
12242 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12243 break;
12244 default:
12245 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12246 break;
12248 break;
12249 case FLASH_5717VENDOR_ST_M_M25PE10:
12250 case FLASH_5717VENDOR_ST_A_M25PE10:
12251 case FLASH_5717VENDOR_ST_M_M45PE10:
12252 case FLASH_5717VENDOR_ST_A_M45PE10:
12253 case FLASH_5717VENDOR_ST_M_M25PE20:
12254 case FLASH_5717VENDOR_ST_A_M25PE20:
12255 case FLASH_5717VENDOR_ST_M_M45PE20:
12256 case FLASH_5717VENDOR_ST_A_M45PE20:
12257 case FLASH_5717VENDOR_ST_25USPT:
12258 case FLASH_5717VENDOR_ST_45USPT:
12259 tp->nvram_jedecnum = JEDEC_ST;
12260 tg3_flag_set(tp, NVRAM_BUFFERED);
12261 tg3_flag_set(tp, FLASH);
12263 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12264 case FLASH_5717VENDOR_ST_M_M25PE20:
12265 case FLASH_5717VENDOR_ST_M_M45PE20:
12266 /* Detect size with tg3_nvram_get_size() */
12267 break;
12268 case FLASH_5717VENDOR_ST_A_M25PE20:
12269 case FLASH_5717VENDOR_ST_A_M45PE20:
12270 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12271 break;
12272 default:
12273 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12274 break;
12276 break;
12277 default:
12278 tg3_flag_set(tp, NO_NVRAM);
12279 return;
12282 tg3_nvram_get_pagesize(tp, nvcfg1);
12283 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12284 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12287 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12289 u32 nvcfg1, nvmpinstrp;
12291 nvcfg1 = tr32(NVRAM_CFG1);
12292 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12294 switch (nvmpinstrp) {
12295 case FLASH_5720_EEPROM_HD:
12296 case FLASH_5720_EEPROM_LD:
12297 tp->nvram_jedecnum = JEDEC_ATMEL;
12298 tg3_flag_set(tp, NVRAM_BUFFERED);
12300 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12301 tw32(NVRAM_CFG1, nvcfg1);
12302 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12303 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12304 else
12305 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12306 return;
12307 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12308 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12309 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12310 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12311 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12312 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12313 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12314 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12315 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12316 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12317 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12318 case FLASH_5720VENDOR_ATMEL_45USPT:
12319 tp->nvram_jedecnum = JEDEC_ATMEL;
12320 tg3_flag_set(tp, NVRAM_BUFFERED);
12321 tg3_flag_set(tp, FLASH);
12323 switch (nvmpinstrp) {
12324 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12325 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12326 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12327 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12328 break;
12329 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12330 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12331 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12332 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12333 break;
12334 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12335 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12336 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12337 break;
12338 default:
12339 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12340 break;
12342 break;
12343 case FLASH_5720VENDOR_M_ST_M25PE10:
12344 case FLASH_5720VENDOR_M_ST_M45PE10:
12345 case FLASH_5720VENDOR_A_ST_M25PE10:
12346 case FLASH_5720VENDOR_A_ST_M45PE10:
12347 case FLASH_5720VENDOR_M_ST_M25PE20:
12348 case FLASH_5720VENDOR_M_ST_M45PE20:
12349 case FLASH_5720VENDOR_A_ST_M25PE20:
12350 case FLASH_5720VENDOR_A_ST_M45PE20:
12351 case FLASH_5720VENDOR_M_ST_M25PE40:
12352 case FLASH_5720VENDOR_M_ST_M45PE40:
12353 case FLASH_5720VENDOR_A_ST_M25PE40:
12354 case FLASH_5720VENDOR_A_ST_M45PE40:
12355 case FLASH_5720VENDOR_M_ST_M25PE80:
12356 case FLASH_5720VENDOR_M_ST_M45PE80:
12357 case FLASH_5720VENDOR_A_ST_M25PE80:
12358 case FLASH_5720VENDOR_A_ST_M45PE80:
12359 case FLASH_5720VENDOR_ST_25USPT:
12360 case FLASH_5720VENDOR_ST_45USPT:
12361 tp->nvram_jedecnum = JEDEC_ST;
12362 tg3_flag_set(tp, NVRAM_BUFFERED);
12363 tg3_flag_set(tp, FLASH);
12365 switch (nvmpinstrp) {
12366 case FLASH_5720VENDOR_M_ST_M25PE20:
12367 case FLASH_5720VENDOR_M_ST_M45PE20:
12368 case FLASH_5720VENDOR_A_ST_M25PE20:
12369 case FLASH_5720VENDOR_A_ST_M45PE20:
12370 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12371 break;
12372 case FLASH_5720VENDOR_M_ST_M25PE40:
12373 case FLASH_5720VENDOR_M_ST_M45PE40:
12374 case FLASH_5720VENDOR_A_ST_M25PE40:
12375 case FLASH_5720VENDOR_A_ST_M45PE40:
12376 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12377 break;
12378 case FLASH_5720VENDOR_M_ST_M25PE80:
12379 case FLASH_5720VENDOR_M_ST_M45PE80:
12380 case FLASH_5720VENDOR_A_ST_M25PE80:
12381 case FLASH_5720VENDOR_A_ST_M45PE80:
12382 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12383 break;
12384 default:
12385 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12386 break;
12388 break;
12389 default:
12390 tg3_flag_set(tp, NO_NVRAM);
12391 return;
12394 tg3_nvram_get_pagesize(tp, nvcfg1);
12395 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12396 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12399 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12400 static void __devinit tg3_nvram_init(struct tg3 *tp)
12402 tw32_f(GRC_EEPROM_ADDR,
12403 (EEPROM_ADDR_FSM_RESET |
12404 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12405 EEPROM_ADDR_CLKPERD_SHIFT)));
12407 msleep(1);
12409 /* Enable seeprom accesses. */
12410 tw32_f(GRC_LOCAL_CTRL,
12411 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12412 udelay(100);
12414 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12415 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12416 tg3_flag_set(tp, NVRAM);
12418 if (tg3_nvram_lock(tp)) {
12419 netdev_warn(tp->dev,
12420 "Cannot get nvram lock, %s failed\n",
12421 __func__);
12422 return;
12424 tg3_enable_nvram_access(tp);
12426 tp->nvram_size = 0;
12428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12429 tg3_get_5752_nvram_info(tp);
12430 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12431 tg3_get_5755_nvram_info(tp);
12432 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12433 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12434 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12435 tg3_get_5787_nvram_info(tp);
12436 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12437 tg3_get_5761_nvram_info(tp);
12438 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12439 tg3_get_5906_nvram_info(tp);
12440 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12442 tg3_get_57780_nvram_info(tp);
12443 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12445 tg3_get_5717_nvram_info(tp);
12446 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12447 tg3_get_5720_nvram_info(tp);
12448 else
12449 tg3_get_nvram_info(tp);
12451 if (tp->nvram_size == 0)
12452 tg3_get_nvram_size(tp);
12454 tg3_disable_nvram_access(tp);
12455 tg3_nvram_unlock(tp);
12457 } else {
12458 tg3_flag_clear(tp, NVRAM);
12459 tg3_flag_clear(tp, NVRAM_BUFFERED);
12461 tg3_get_eeprom_size(tp);
12465 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12466 u32 offset, u32 len, u8 *buf)
12468 int i, j, rc = 0;
12469 u32 val;
12471 for (i = 0; i < len; i += 4) {
12472 u32 addr;
12473 __be32 data;
12475 addr = offset + i;
12477 memcpy(&data, buf + i, 4);
12480 * The SEEPROM interface expects the data to always be opposite
12481 * the native endian format. We accomplish this by reversing
12482 * all the operations that would have been performed on the
12483 * data from a call to tg3_nvram_read_be32().
12485 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12487 val = tr32(GRC_EEPROM_ADDR);
12488 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12490 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12491 EEPROM_ADDR_READ);
12492 tw32(GRC_EEPROM_ADDR, val |
12493 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12494 (addr & EEPROM_ADDR_ADDR_MASK) |
12495 EEPROM_ADDR_START |
12496 EEPROM_ADDR_WRITE);
12498 for (j = 0; j < 1000; j++) {
12499 val = tr32(GRC_EEPROM_ADDR);
12501 if (val & EEPROM_ADDR_COMPLETE)
12502 break;
12503 msleep(1);
12505 if (!(val & EEPROM_ADDR_COMPLETE)) {
12506 rc = -EBUSY;
12507 break;
12511 return rc;
12514 /* offset and length are dword aligned */
12515 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12516 u8 *buf)
12518 int ret = 0;
12519 u32 pagesize = tp->nvram_pagesize;
12520 u32 pagemask = pagesize - 1;
12521 u32 nvram_cmd;
12522 u8 *tmp;
12524 tmp = kmalloc(pagesize, GFP_KERNEL);
12525 if (tmp == NULL)
12526 return -ENOMEM;
12528 while (len) {
12529 int j;
12530 u32 phy_addr, page_off, size;
12532 phy_addr = offset & ~pagemask;
12534 for (j = 0; j < pagesize; j += 4) {
12535 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12536 (__be32 *) (tmp + j));
12537 if (ret)
12538 break;
12540 if (ret)
12541 break;
12543 page_off = offset & pagemask;
12544 size = pagesize;
12545 if (len < size)
12546 size = len;
12548 len -= size;
12550 memcpy(tmp + page_off, buf, size);
12552 offset = offset + (pagesize - page_off);
12554 tg3_enable_nvram_access(tp);
12557 * Before we can erase the flash page, we need
12558 * to issue a special "write enable" command.
12560 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12562 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12563 break;
12565 /* Erase the target page */
12566 tw32(NVRAM_ADDR, phy_addr);
12568 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12569 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12571 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12572 break;
12574 /* Issue another write enable to start the write. */
12575 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12577 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12578 break;
12580 for (j = 0; j < pagesize; j += 4) {
12581 __be32 data;
12583 data = *((__be32 *) (tmp + j));
12585 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12587 tw32(NVRAM_ADDR, phy_addr + j);
12589 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12590 NVRAM_CMD_WR;
12592 if (j == 0)
12593 nvram_cmd |= NVRAM_CMD_FIRST;
12594 else if (j == (pagesize - 4))
12595 nvram_cmd |= NVRAM_CMD_LAST;
12597 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12598 break;
12600 if (ret)
12601 break;
12604 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12605 tg3_nvram_exec_cmd(tp, nvram_cmd);
12607 kfree(tmp);
12609 return ret;
12612 /* offset and length are dword aligned */
12613 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12614 u8 *buf)
12616 int i, ret = 0;
12618 for (i = 0; i < len; i += 4, offset += 4) {
12619 u32 page_off, phy_addr, nvram_cmd;
12620 __be32 data;
12622 memcpy(&data, buf + i, 4);
12623 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12625 page_off = offset % tp->nvram_pagesize;
12627 phy_addr = tg3_nvram_phys_addr(tp, offset);
12629 tw32(NVRAM_ADDR, phy_addr);
12631 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12633 if (page_off == 0 || i == 0)
12634 nvram_cmd |= NVRAM_CMD_FIRST;
12635 if (page_off == (tp->nvram_pagesize - 4))
12636 nvram_cmd |= NVRAM_CMD_LAST;
12638 if (i == (len - 4))
12639 nvram_cmd |= NVRAM_CMD_LAST;
12641 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12642 !tg3_flag(tp, 5755_PLUS) &&
12643 (tp->nvram_jedecnum == JEDEC_ST) &&
12644 (nvram_cmd & NVRAM_CMD_FIRST)) {
12646 if ((ret = tg3_nvram_exec_cmd(tp,
12647 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12648 NVRAM_CMD_DONE)))
12650 break;
12652 if (!tg3_flag(tp, FLASH)) {
12653 /* We always do complete word writes to eeprom. */
12654 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12657 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12658 break;
12660 return ret;
12663 /* offset and length are dword aligned */
12664 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12666 int ret;
12668 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12669 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12670 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12671 udelay(40);
12674 if (!tg3_flag(tp, NVRAM)) {
12675 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12676 } else {
12677 u32 grc_mode;
12679 ret = tg3_nvram_lock(tp);
12680 if (ret)
12681 return ret;
12683 tg3_enable_nvram_access(tp);
12684 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12685 tw32(NVRAM_WRITE1, 0x406);
12687 grc_mode = tr32(GRC_MODE);
12688 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12690 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12691 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12692 buf);
12693 } else {
12694 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12695 buf);
12698 grc_mode = tr32(GRC_MODE);
12699 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12701 tg3_disable_nvram_access(tp);
12702 tg3_nvram_unlock(tp);
12705 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12706 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12707 udelay(40);
12710 return ret;
12713 struct subsys_tbl_ent {
12714 u16 subsys_vendor, subsys_devid;
12715 u32 phy_id;
12718 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12719 /* Broadcom boards. */
12720 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12721 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12722 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12723 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12724 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12725 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12726 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12727 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12728 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12729 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12730 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12731 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12732 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12733 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12734 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12735 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12736 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12737 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12738 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12739 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12740 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12741 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12743 /* 3com boards. */
12744 { TG3PCI_SUBVENDOR_ID_3COM,
12745 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12746 { TG3PCI_SUBVENDOR_ID_3COM,
12747 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12748 { TG3PCI_SUBVENDOR_ID_3COM,
12749 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12750 { TG3PCI_SUBVENDOR_ID_3COM,
12751 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12752 { TG3PCI_SUBVENDOR_ID_3COM,
12753 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12755 /* DELL boards. */
12756 { TG3PCI_SUBVENDOR_ID_DELL,
12757 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12758 { TG3PCI_SUBVENDOR_ID_DELL,
12759 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12760 { TG3PCI_SUBVENDOR_ID_DELL,
12761 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12762 { TG3PCI_SUBVENDOR_ID_DELL,
12763 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12765 /* Compaq boards. */
12766 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12767 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12768 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12769 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12770 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12771 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12772 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12773 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12774 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12775 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12777 /* IBM boards. */
12778 { TG3PCI_SUBVENDOR_ID_IBM,
12779 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12782 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12784 int i;
12786 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12787 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12788 tp->pdev->subsystem_vendor) &&
12789 (subsys_id_to_phy_id[i].subsys_devid ==
12790 tp->pdev->subsystem_device))
12791 return &subsys_id_to_phy_id[i];
12793 return NULL;
12796 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12798 u32 val;
12800 tp->phy_id = TG3_PHY_ID_INVALID;
12801 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12803 /* Assume an onboard device and WOL capable by default. */
12804 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12805 tg3_flag_set(tp, WOL_CAP);
12807 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12808 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12809 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12810 tg3_flag_set(tp, IS_NIC);
12812 val = tr32(VCPU_CFGSHDW);
12813 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12814 tg3_flag_set(tp, ASPM_WORKAROUND);
12815 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12816 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12817 tg3_flag_set(tp, WOL_ENABLE);
12818 device_set_wakeup_enable(&tp->pdev->dev, true);
12820 goto done;
12823 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12824 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12825 u32 nic_cfg, led_cfg;
12826 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12827 int eeprom_phy_serdes = 0;
12829 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12830 tp->nic_sram_data_cfg = nic_cfg;
12832 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12833 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12834 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12835 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12836 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12837 (ver > 0) && (ver < 0x100))
12838 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12840 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12841 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12843 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12844 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12845 eeprom_phy_serdes = 1;
12847 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12848 if (nic_phy_id != 0) {
12849 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12850 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12852 eeprom_phy_id = (id1 >> 16) << 10;
12853 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12854 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12855 } else
12856 eeprom_phy_id = 0;
12858 tp->phy_id = eeprom_phy_id;
12859 if (eeprom_phy_serdes) {
12860 if (!tg3_flag(tp, 5705_PLUS))
12861 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12862 else
12863 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12866 if (tg3_flag(tp, 5750_PLUS))
12867 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12868 SHASTA_EXT_LED_MODE_MASK);
12869 else
12870 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12872 switch (led_cfg) {
12873 default:
12874 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12875 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12876 break;
12878 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12879 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12880 break;
12882 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12883 tp->led_ctrl = LED_CTRL_MODE_MAC;
12885 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12886 * read on some older 5700/5701 bootcode.
12888 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12889 ASIC_REV_5700 ||
12890 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12891 ASIC_REV_5701)
12892 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12894 break;
12896 case SHASTA_EXT_LED_SHARED:
12897 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12898 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12899 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12900 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12901 LED_CTRL_MODE_PHY_2);
12902 break;
12904 case SHASTA_EXT_LED_MAC:
12905 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12906 break;
12908 case SHASTA_EXT_LED_COMBO:
12909 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12910 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12911 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12912 LED_CTRL_MODE_PHY_2);
12913 break;
12917 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12918 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12919 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12920 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12922 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12923 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12925 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12926 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12927 if ((tp->pdev->subsystem_vendor ==
12928 PCI_VENDOR_ID_ARIMA) &&
12929 (tp->pdev->subsystem_device == 0x205a ||
12930 tp->pdev->subsystem_device == 0x2063))
12931 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12932 } else {
12933 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12934 tg3_flag_set(tp, IS_NIC);
12937 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12938 tg3_flag_set(tp, ENABLE_ASF);
12939 if (tg3_flag(tp, 5750_PLUS))
12940 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12943 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12944 tg3_flag(tp, 5750_PLUS))
12945 tg3_flag_set(tp, ENABLE_APE);
12947 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12948 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12949 tg3_flag_clear(tp, WOL_CAP);
12951 if (tg3_flag(tp, WOL_CAP) &&
12952 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12953 tg3_flag_set(tp, WOL_ENABLE);
12954 device_set_wakeup_enable(&tp->pdev->dev, true);
12957 if (cfg2 & (1 << 17))
12958 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12960 /* serdes signal pre-emphasis in register 0x590 set by */
12961 /* bootcode if bit 18 is set */
12962 if (cfg2 & (1 << 18))
12963 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12965 if ((tg3_flag(tp, 57765_PLUS) ||
12966 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12967 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12968 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12969 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12971 if (tg3_flag(tp, PCI_EXPRESS) &&
12972 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12973 !tg3_flag(tp, 57765_PLUS)) {
12974 u32 cfg3;
12976 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12977 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12978 tg3_flag_set(tp, ASPM_WORKAROUND);
12981 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12982 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12983 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12984 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12985 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12986 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12988 done:
12989 if (tg3_flag(tp, WOL_CAP))
12990 device_set_wakeup_enable(&tp->pdev->dev,
12991 tg3_flag(tp, WOL_ENABLE));
12992 else
12993 device_set_wakeup_capable(&tp->pdev->dev, false);
12996 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12998 int i;
12999 u32 val;
13001 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13002 tw32(OTP_CTRL, cmd);
13004 /* Wait for up to 1 ms for command to execute. */
13005 for (i = 0; i < 100; i++) {
13006 val = tr32(OTP_STATUS);
13007 if (val & OTP_STATUS_CMD_DONE)
13008 break;
13009 udelay(10);
13012 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13015 /* Read the gphy configuration from the OTP region of the chip. The gphy
13016 * configuration is a 32-bit value that straddles the alignment boundary.
13017 * We do two 32-bit reads and then shift and merge the results.
13019 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13021 u32 bhalf_otp, thalf_otp;
13023 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13025 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13026 return 0;
13028 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13030 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13031 return 0;
13033 thalf_otp = tr32(OTP_READ_DATA);
13035 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13037 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13038 return 0;
13040 bhalf_otp = tr32(OTP_READ_DATA);
13042 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13045 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13047 u32 adv = ADVERTISED_Autoneg |
13048 ADVERTISED_Pause;
13050 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13051 adv |= ADVERTISED_1000baseT_Half |
13052 ADVERTISED_1000baseT_Full;
13054 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13055 adv |= ADVERTISED_100baseT_Half |
13056 ADVERTISED_100baseT_Full |
13057 ADVERTISED_10baseT_Half |
13058 ADVERTISED_10baseT_Full |
13059 ADVERTISED_TP;
13060 else
13061 adv |= ADVERTISED_FIBRE;
13063 tp->link_config.advertising = adv;
13064 tp->link_config.speed = SPEED_INVALID;
13065 tp->link_config.duplex = DUPLEX_INVALID;
13066 tp->link_config.autoneg = AUTONEG_ENABLE;
13067 tp->link_config.active_speed = SPEED_INVALID;
13068 tp->link_config.active_duplex = DUPLEX_INVALID;
13069 tp->link_config.orig_speed = SPEED_INVALID;
13070 tp->link_config.orig_duplex = DUPLEX_INVALID;
13071 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13074 static int __devinit tg3_phy_probe(struct tg3 *tp)
13076 u32 hw_phy_id_1, hw_phy_id_2;
13077 u32 hw_phy_id, hw_phy_id_masked;
13078 int err;
13080 /* flow control autonegotiation is default behavior */
13081 tg3_flag_set(tp, PAUSE_AUTONEG);
13082 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13084 if (tg3_flag(tp, USE_PHYLIB))
13085 return tg3_phy_init(tp);
13087 /* Reading the PHY ID register can conflict with ASF
13088 * firmware access to the PHY hardware.
13090 err = 0;
13091 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13092 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13093 } else {
13094 /* Now read the physical PHY_ID from the chip and verify
13095 * that it is sane. If it doesn't look good, we fall back
13096 * to either the hard-coded table based PHY_ID and failing
13097 * that the value found in the eeprom area.
13099 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13100 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13102 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13103 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13104 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13106 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13109 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13110 tp->phy_id = hw_phy_id;
13111 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13112 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13113 else
13114 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13115 } else {
13116 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13117 /* Do nothing, phy ID already set up in
13118 * tg3_get_eeprom_hw_cfg().
13120 } else {
13121 struct subsys_tbl_ent *p;
13123 /* No eeprom signature? Try the hardcoded
13124 * subsys device table.
13126 p = tg3_lookup_by_subsys(tp);
13127 if (!p)
13128 return -ENODEV;
13130 tp->phy_id = p->phy_id;
13131 if (!tp->phy_id ||
13132 tp->phy_id == TG3_PHY_ID_BCM8002)
13133 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13137 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13138 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13139 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13140 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13141 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13142 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13144 tg3_phy_init_link_config(tp);
13146 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13147 !tg3_flag(tp, ENABLE_APE) &&
13148 !tg3_flag(tp, ENABLE_ASF)) {
13149 u32 bmsr, mask;
13151 tg3_readphy(tp, MII_BMSR, &bmsr);
13152 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13153 (bmsr & BMSR_LSTATUS))
13154 goto skip_phy_reset;
13156 err = tg3_phy_reset(tp);
13157 if (err)
13158 return err;
13160 tg3_phy_set_wirespeed(tp);
13162 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13163 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13164 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13165 if (!tg3_copper_is_advertising_all(tp, mask)) {
13166 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13167 tp->link_config.flowctrl);
13169 tg3_writephy(tp, MII_BMCR,
13170 BMCR_ANENABLE | BMCR_ANRESTART);
13174 skip_phy_reset:
13175 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13176 err = tg3_init_5401phy_dsp(tp);
13177 if (err)
13178 return err;
13180 err = tg3_init_5401phy_dsp(tp);
13183 return err;
13186 static void __devinit tg3_read_vpd(struct tg3 *tp)
13188 u8 *vpd_data;
13189 unsigned int block_end, rosize, len;
13190 int j, i = 0;
13192 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13193 if (!vpd_data)
13194 goto out_no_vpd;
13196 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13197 PCI_VPD_LRDT_RO_DATA);
13198 if (i < 0)
13199 goto out_not_found;
13201 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13202 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13203 i += PCI_VPD_LRDT_TAG_SIZE;
13205 if (block_end > TG3_NVM_VPD_LEN)
13206 goto out_not_found;
13208 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13209 PCI_VPD_RO_KEYWORD_MFR_ID);
13210 if (j > 0) {
13211 len = pci_vpd_info_field_size(&vpd_data[j]);
13213 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13214 if (j + len > block_end || len != 4 ||
13215 memcmp(&vpd_data[j], "1028", 4))
13216 goto partno;
13218 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13219 PCI_VPD_RO_KEYWORD_VENDOR0);
13220 if (j < 0)
13221 goto partno;
13223 len = pci_vpd_info_field_size(&vpd_data[j]);
13225 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13226 if (j + len > block_end)
13227 goto partno;
13229 memcpy(tp->fw_ver, &vpd_data[j], len);
13230 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13233 partno:
13234 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13235 PCI_VPD_RO_KEYWORD_PARTNO);
13236 if (i < 0)
13237 goto out_not_found;
13239 len = pci_vpd_info_field_size(&vpd_data[i]);
13241 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13242 if (len > TG3_BPN_SIZE ||
13243 (len + i) > TG3_NVM_VPD_LEN)
13244 goto out_not_found;
13246 memcpy(tp->board_part_number, &vpd_data[i], len);
13248 out_not_found:
13249 kfree(vpd_data);
13250 if (tp->board_part_number[0])
13251 return;
13253 out_no_vpd:
13254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13255 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13256 strcpy(tp->board_part_number, "BCM5717");
13257 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13258 strcpy(tp->board_part_number, "BCM5718");
13259 else
13260 goto nomatch;
13261 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13262 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13263 strcpy(tp->board_part_number, "BCM57780");
13264 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13265 strcpy(tp->board_part_number, "BCM57760");
13266 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13267 strcpy(tp->board_part_number, "BCM57790");
13268 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13269 strcpy(tp->board_part_number, "BCM57788");
13270 else
13271 goto nomatch;
13272 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13273 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13274 strcpy(tp->board_part_number, "BCM57761");
13275 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13276 strcpy(tp->board_part_number, "BCM57765");
13277 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13278 strcpy(tp->board_part_number, "BCM57781");
13279 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13280 strcpy(tp->board_part_number, "BCM57785");
13281 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13282 strcpy(tp->board_part_number, "BCM57791");
13283 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13284 strcpy(tp->board_part_number, "BCM57795");
13285 else
13286 goto nomatch;
13287 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13288 strcpy(tp->board_part_number, "BCM95906");
13289 } else {
13290 nomatch:
13291 strcpy(tp->board_part_number, "none");
13295 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13297 u32 val;
13299 if (tg3_nvram_read(tp, offset, &val) ||
13300 (val & 0xfc000000) != 0x0c000000 ||
13301 tg3_nvram_read(tp, offset + 4, &val) ||
13302 val != 0)
13303 return 0;
13305 return 1;
13308 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13310 u32 val, offset, start, ver_offset;
13311 int i, dst_off;
13312 bool newver = false;
13314 if (tg3_nvram_read(tp, 0xc, &offset) ||
13315 tg3_nvram_read(tp, 0x4, &start))
13316 return;
13318 offset = tg3_nvram_logical_addr(tp, offset);
13320 if (tg3_nvram_read(tp, offset, &val))
13321 return;
13323 if ((val & 0xfc000000) == 0x0c000000) {
13324 if (tg3_nvram_read(tp, offset + 4, &val))
13325 return;
13327 if (val == 0)
13328 newver = true;
13331 dst_off = strlen(tp->fw_ver);
13333 if (newver) {
13334 if (TG3_VER_SIZE - dst_off < 16 ||
13335 tg3_nvram_read(tp, offset + 8, &ver_offset))
13336 return;
13338 offset = offset + ver_offset - start;
13339 for (i = 0; i < 16; i += 4) {
13340 __be32 v;
13341 if (tg3_nvram_read_be32(tp, offset + i, &v))
13342 return;
13344 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13346 } else {
13347 u32 major, minor;
13349 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13350 return;
13352 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13353 TG3_NVM_BCVER_MAJSFT;
13354 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13355 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13356 "v%d.%02d", major, minor);
13360 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13362 u32 val, major, minor;
13364 /* Use native endian representation */
13365 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13366 return;
13368 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13369 TG3_NVM_HWSB_CFG1_MAJSFT;
13370 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13371 TG3_NVM_HWSB_CFG1_MINSFT;
13373 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13376 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13378 u32 offset, major, minor, build;
13380 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13382 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13383 return;
13385 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13386 case TG3_EEPROM_SB_REVISION_0:
13387 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13388 break;
13389 case TG3_EEPROM_SB_REVISION_2:
13390 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13391 break;
13392 case TG3_EEPROM_SB_REVISION_3:
13393 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13394 break;
13395 case TG3_EEPROM_SB_REVISION_4:
13396 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13397 break;
13398 case TG3_EEPROM_SB_REVISION_5:
13399 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13400 break;
13401 case TG3_EEPROM_SB_REVISION_6:
13402 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13403 break;
13404 default:
13405 return;
13408 if (tg3_nvram_read(tp, offset, &val))
13409 return;
13411 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13412 TG3_EEPROM_SB_EDH_BLD_SHFT;
13413 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13414 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13415 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13417 if (minor > 99 || build > 26)
13418 return;
13420 offset = strlen(tp->fw_ver);
13421 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13422 " v%d.%02d", major, minor);
13424 if (build > 0) {
13425 offset = strlen(tp->fw_ver);
13426 if (offset < TG3_VER_SIZE - 1)
13427 tp->fw_ver[offset] = 'a' + build - 1;
13431 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13433 u32 val, offset, start;
13434 int i, vlen;
13436 for (offset = TG3_NVM_DIR_START;
13437 offset < TG3_NVM_DIR_END;
13438 offset += TG3_NVM_DIRENT_SIZE) {
13439 if (tg3_nvram_read(tp, offset, &val))
13440 return;
13442 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13443 break;
13446 if (offset == TG3_NVM_DIR_END)
13447 return;
13449 if (!tg3_flag(tp, 5705_PLUS))
13450 start = 0x08000000;
13451 else if (tg3_nvram_read(tp, offset - 4, &start))
13452 return;
13454 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13455 !tg3_fw_img_is_valid(tp, offset) ||
13456 tg3_nvram_read(tp, offset + 8, &val))
13457 return;
13459 offset += val - start;
13461 vlen = strlen(tp->fw_ver);
13463 tp->fw_ver[vlen++] = ',';
13464 tp->fw_ver[vlen++] = ' ';
13466 for (i = 0; i < 4; i++) {
13467 __be32 v;
13468 if (tg3_nvram_read_be32(tp, offset, &v))
13469 return;
13471 offset += sizeof(v);
13473 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13474 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13475 break;
13478 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13479 vlen += sizeof(v);
13483 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13485 int vlen;
13486 u32 apedata;
13487 char *fwtype;
13489 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13490 return;
13492 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13493 if (apedata != APE_SEG_SIG_MAGIC)
13494 return;
13496 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13497 if (!(apedata & APE_FW_STATUS_READY))
13498 return;
13500 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13502 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13503 tg3_flag_set(tp, APE_HAS_NCSI);
13504 fwtype = "NCSI";
13505 } else {
13506 fwtype = "DASH";
13509 vlen = strlen(tp->fw_ver);
13511 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13512 fwtype,
13513 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13514 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13515 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13516 (apedata & APE_FW_VERSION_BLDMSK));
13519 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13521 u32 val;
13522 bool vpd_vers = false;
13524 if (tp->fw_ver[0] != 0)
13525 vpd_vers = true;
13527 if (tg3_flag(tp, NO_NVRAM)) {
13528 strcat(tp->fw_ver, "sb");
13529 return;
13532 if (tg3_nvram_read(tp, 0, &val))
13533 return;
13535 if (val == TG3_EEPROM_MAGIC)
13536 tg3_read_bc_ver(tp);
13537 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13538 tg3_read_sb_ver(tp, val);
13539 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13540 tg3_read_hwsb_ver(tp);
13541 else
13542 return;
13544 if (vpd_vers)
13545 goto done;
13547 if (tg3_flag(tp, ENABLE_APE)) {
13548 if (tg3_flag(tp, ENABLE_ASF))
13549 tg3_read_dash_ver(tp);
13550 } else if (tg3_flag(tp, ENABLE_ASF)) {
13551 tg3_read_mgmtfw_ver(tp);
13554 done:
13555 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13558 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13560 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13562 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13563 return TG3_RX_RET_MAX_SIZE_5717;
13564 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13565 return TG3_RX_RET_MAX_SIZE_5700;
13566 else
13567 return TG3_RX_RET_MAX_SIZE_5705;
13570 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13571 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13572 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13573 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13574 { },
13577 static int __devinit tg3_get_invariants(struct tg3 *tp)
13579 u32 misc_ctrl_reg;
13580 u32 pci_state_reg, grc_misc_cfg;
13581 u32 val;
13582 u16 pci_cmd;
13583 int err;
13585 /* Force memory write invalidate off. If we leave it on,
13586 * then on 5700_BX chips we have to enable a workaround.
13587 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13588 * to match the cacheline size. The Broadcom driver have this
13589 * workaround but turns MWI off all the times so never uses
13590 * it. This seems to suggest that the workaround is insufficient.
13592 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13593 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13594 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13596 /* Important! -- Make sure register accesses are byteswapped
13597 * correctly. Also, for those chips that require it, make
13598 * sure that indirect register accesses are enabled before
13599 * the first operation.
13601 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13602 &misc_ctrl_reg);
13603 tp->misc_host_ctrl |= (misc_ctrl_reg &
13604 MISC_HOST_CTRL_CHIPREV);
13605 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13606 tp->misc_host_ctrl);
13608 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13609 MISC_HOST_CTRL_CHIPREV_SHIFT);
13610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13611 u32 prod_id_asic_rev;
13613 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13614 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13615 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13616 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13617 pci_read_config_dword(tp->pdev,
13618 TG3PCI_GEN2_PRODID_ASICREV,
13619 &prod_id_asic_rev);
13620 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13621 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13622 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13623 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13624 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13625 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13626 pci_read_config_dword(tp->pdev,
13627 TG3PCI_GEN15_PRODID_ASICREV,
13628 &prod_id_asic_rev);
13629 else
13630 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13631 &prod_id_asic_rev);
13633 tp->pci_chip_rev_id = prod_id_asic_rev;
13636 /* Wrong chip ID in 5752 A0. This code can be removed later
13637 * as A0 is not in production.
13639 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13640 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13642 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13643 * we need to disable memory and use config. cycles
13644 * only to access all registers. The 5702/03 chips
13645 * can mistakenly decode the special cycles from the
13646 * ICH chipsets as memory write cycles, causing corruption
13647 * of register and memory space. Only certain ICH bridges
13648 * will drive special cycles with non-zero data during the
13649 * address phase which can fall within the 5703's address
13650 * range. This is not an ICH bug as the PCI spec allows
13651 * non-zero address during special cycles. However, only
13652 * these ICH bridges are known to drive non-zero addresses
13653 * during special cycles.
13655 * Since special cycles do not cross PCI bridges, we only
13656 * enable this workaround if the 5703 is on the secondary
13657 * bus of these ICH bridges.
13659 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13660 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13661 static struct tg3_dev_id {
13662 u32 vendor;
13663 u32 device;
13664 u32 rev;
13665 } ich_chipsets[] = {
13666 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13667 PCI_ANY_ID },
13668 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13669 PCI_ANY_ID },
13670 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13671 0xa },
13672 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13673 PCI_ANY_ID },
13674 { },
13676 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13677 struct pci_dev *bridge = NULL;
13679 while (pci_id->vendor != 0) {
13680 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13681 bridge);
13682 if (!bridge) {
13683 pci_id++;
13684 continue;
13686 if (pci_id->rev != PCI_ANY_ID) {
13687 if (bridge->revision > pci_id->rev)
13688 continue;
13690 if (bridge->subordinate &&
13691 (bridge->subordinate->number ==
13692 tp->pdev->bus->number)) {
13693 tg3_flag_set(tp, ICH_WORKAROUND);
13694 pci_dev_put(bridge);
13695 break;
13700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13701 static struct tg3_dev_id {
13702 u32 vendor;
13703 u32 device;
13704 } bridge_chipsets[] = {
13705 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13706 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13707 { },
13709 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13710 struct pci_dev *bridge = NULL;
13712 while (pci_id->vendor != 0) {
13713 bridge = pci_get_device(pci_id->vendor,
13714 pci_id->device,
13715 bridge);
13716 if (!bridge) {
13717 pci_id++;
13718 continue;
13720 if (bridge->subordinate &&
13721 (bridge->subordinate->number <=
13722 tp->pdev->bus->number) &&
13723 (bridge->subordinate->subordinate >=
13724 tp->pdev->bus->number)) {
13725 tg3_flag_set(tp, 5701_DMA_BUG);
13726 pci_dev_put(bridge);
13727 break;
13732 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13733 * DMA addresses > 40-bit. This bridge may have other additional
13734 * 57xx devices behind it in some 4-port NIC designs for example.
13735 * Any tg3 device found behind the bridge will also need the 40-bit
13736 * DMA workaround.
13738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13739 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13740 tg3_flag_set(tp, 5780_CLASS);
13741 tg3_flag_set(tp, 40BIT_DMA_BUG);
13742 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13743 } else {
13744 struct pci_dev *bridge = NULL;
13746 do {
13747 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13748 PCI_DEVICE_ID_SERVERWORKS_EPB,
13749 bridge);
13750 if (bridge && bridge->subordinate &&
13751 (bridge->subordinate->number <=
13752 tp->pdev->bus->number) &&
13753 (bridge->subordinate->subordinate >=
13754 tp->pdev->bus->number)) {
13755 tg3_flag_set(tp, 40BIT_DMA_BUG);
13756 pci_dev_put(bridge);
13757 break;
13759 } while (bridge);
13762 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13763 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13764 tp->pdev_peer = tg3_find_peer(tp);
13766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13768 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13769 tg3_flag_set(tp, 5717_PLUS);
13771 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13772 tg3_flag(tp, 5717_PLUS))
13773 tg3_flag_set(tp, 57765_PLUS);
13775 /* Intentionally exclude ASIC_REV_5906 */
13776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13777 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13778 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13780 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13781 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13782 tg3_flag(tp, 57765_PLUS))
13783 tg3_flag_set(tp, 5755_PLUS);
13785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13787 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13788 tg3_flag(tp, 5755_PLUS) ||
13789 tg3_flag(tp, 5780_CLASS))
13790 tg3_flag_set(tp, 5750_PLUS);
13792 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13793 tg3_flag(tp, 5750_PLUS))
13794 tg3_flag_set(tp, 5705_PLUS);
13796 /* Determine TSO capabilities */
13797 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13798 ; /* Do nothing. HW bug. */
13799 else if (tg3_flag(tp, 57765_PLUS))
13800 tg3_flag_set(tp, HW_TSO_3);
13801 else if (tg3_flag(tp, 5755_PLUS) ||
13802 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13803 tg3_flag_set(tp, HW_TSO_2);
13804 else if (tg3_flag(tp, 5750_PLUS)) {
13805 tg3_flag_set(tp, HW_TSO_1);
13806 tg3_flag_set(tp, TSO_BUG);
13807 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13808 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13809 tg3_flag_clear(tp, TSO_BUG);
13810 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13811 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13812 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13813 tg3_flag_set(tp, TSO_BUG);
13814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13815 tp->fw_needed = FIRMWARE_TG3TSO5;
13816 else
13817 tp->fw_needed = FIRMWARE_TG3TSO;
13820 /* Selectively allow TSO based on operating conditions */
13821 if (tg3_flag(tp, HW_TSO_1) ||
13822 tg3_flag(tp, HW_TSO_2) ||
13823 tg3_flag(tp, HW_TSO_3) ||
13824 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13825 tg3_flag_set(tp, TSO_CAPABLE);
13826 else {
13827 tg3_flag_clear(tp, TSO_CAPABLE);
13828 tg3_flag_clear(tp, TSO_BUG);
13829 tp->fw_needed = NULL;
13832 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13833 tp->fw_needed = FIRMWARE_TG3;
13835 tp->irq_max = 1;
13837 if (tg3_flag(tp, 5750_PLUS)) {
13838 tg3_flag_set(tp, SUPPORT_MSI);
13839 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13840 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13841 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13842 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13843 tp->pdev_peer == tp->pdev))
13844 tg3_flag_clear(tp, SUPPORT_MSI);
13846 if (tg3_flag(tp, 5755_PLUS) ||
13847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13848 tg3_flag_set(tp, 1SHOT_MSI);
13851 if (tg3_flag(tp, 57765_PLUS)) {
13852 tg3_flag_set(tp, SUPPORT_MSIX);
13853 tp->irq_max = TG3_IRQ_MAX_VECS;
13857 if (tg3_flag(tp, 5755_PLUS))
13858 tg3_flag_set(tp, SHORT_DMA_BUG);
13860 if (tg3_flag(tp, 5717_PLUS))
13861 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13863 if (tg3_flag(tp, 57765_PLUS) &&
13864 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13865 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13867 if (!tg3_flag(tp, 5705_PLUS) ||
13868 tg3_flag(tp, 5780_CLASS) ||
13869 tg3_flag(tp, USE_JUMBO_BDFLAG))
13870 tg3_flag_set(tp, JUMBO_CAPABLE);
13872 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13873 &pci_state_reg);
13875 if (pci_is_pcie(tp->pdev)) {
13876 u16 lnkctl;
13878 tg3_flag_set(tp, PCI_EXPRESS);
13880 tp->pcie_readrq = 4096;
13881 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13882 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13883 tp->pcie_readrq = 2048;
13885 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13887 pci_read_config_word(tp->pdev,
13888 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13889 &lnkctl);
13890 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13891 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13892 ASIC_REV_5906) {
13893 tg3_flag_clear(tp, HW_TSO_2);
13894 tg3_flag_clear(tp, TSO_CAPABLE);
13896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13898 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13899 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13900 tg3_flag_set(tp, CLKREQ_BUG);
13901 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13902 tg3_flag_set(tp, L1PLLPD_EN);
13904 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13905 /* BCM5785 devices are effectively PCIe devices, and should
13906 * follow PCIe codepaths, but do not have a PCIe capabilities
13907 * section.
13909 tg3_flag_set(tp, PCI_EXPRESS);
13910 } else if (!tg3_flag(tp, 5705_PLUS) ||
13911 tg3_flag(tp, 5780_CLASS)) {
13912 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13913 if (!tp->pcix_cap) {
13914 dev_err(&tp->pdev->dev,
13915 "Cannot find PCI-X capability, aborting\n");
13916 return -EIO;
13919 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13920 tg3_flag_set(tp, PCIX_MODE);
13923 /* If we have an AMD 762 or VIA K8T800 chipset, write
13924 * reordering to the mailbox registers done by the host
13925 * controller can cause major troubles. We read back from
13926 * every mailbox register write to force the writes to be
13927 * posted to the chip in order.
13929 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13930 !tg3_flag(tp, PCI_EXPRESS))
13931 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13933 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13934 &tp->pci_cacheline_sz);
13935 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13936 &tp->pci_lat_timer);
13937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13938 tp->pci_lat_timer < 64) {
13939 tp->pci_lat_timer = 64;
13940 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13941 tp->pci_lat_timer);
13944 /* Important! -- It is critical that the PCI-X hw workaround
13945 * situation is decided before the first MMIO register access.
13947 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13948 /* 5700 BX chips need to have their TX producer index
13949 * mailboxes written twice to workaround a bug.
13951 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13953 /* If we are in PCI-X mode, enable register write workaround.
13955 * The workaround is to use indirect register accesses
13956 * for all chip writes not to mailbox registers.
13958 if (tg3_flag(tp, PCIX_MODE)) {
13959 u32 pm_reg;
13961 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13963 /* The chip can have it's power management PCI config
13964 * space registers clobbered due to this bug.
13965 * So explicitly force the chip into D0 here.
13967 pci_read_config_dword(tp->pdev,
13968 tp->pm_cap + PCI_PM_CTRL,
13969 &pm_reg);
13970 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13971 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13972 pci_write_config_dword(tp->pdev,
13973 tp->pm_cap + PCI_PM_CTRL,
13974 pm_reg);
13976 /* Also, force SERR#/PERR# in PCI command. */
13977 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13978 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13979 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13983 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13984 tg3_flag_set(tp, PCI_HIGH_SPEED);
13985 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13986 tg3_flag_set(tp, PCI_32BIT);
13988 /* Chip-specific fixup from Broadcom driver */
13989 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13990 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13991 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13992 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13995 /* Default fast path register access methods */
13996 tp->read32 = tg3_read32;
13997 tp->write32 = tg3_write32;
13998 tp->read32_mbox = tg3_read32;
13999 tp->write32_mbox = tg3_write32;
14000 tp->write32_tx_mbox = tg3_write32;
14001 tp->write32_rx_mbox = tg3_write32;
14003 /* Various workaround register access methods */
14004 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14005 tp->write32 = tg3_write_indirect_reg32;
14006 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14007 (tg3_flag(tp, PCI_EXPRESS) &&
14008 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14010 * Back to back register writes can cause problems on these
14011 * chips, the workaround is to read back all reg writes
14012 * except those to mailbox regs.
14014 * See tg3_write_indirect_reg32().
14016 tp->write32 = tg3_write_flush_reg32;
14019 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14020 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14021 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14022 tp->write32_rx_mbox = tg3_write_flush_reg32;
14025 if (tg3_flag(tp, ICH_WORKAROUND)) {
14026 tp->read32 = tg3_read_indirect_reg32;
14027 tp->write32 = tg3_write_indirect_reg32;
14028 tp->read32_mbox = tg3_read_indirect_mbox;
14029 tp->write32_mbox = tg3_write_indirect_mbox;
14030 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14031 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14033 iounmap(tp->regs);
14034 tp->regs = NULL;
14036 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14037 pci_cmd &= ~PCI_COMMAND_MEMORY;
14038 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14041 tp->read32_mbox = tg3_read32_mbox_5906;
14042 tp->write32_mbox = tg3_write32_mbox_5906;
14043 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14044 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14047 if (tp->write32 == tg3_write_indirect_reg32 ||
14048 (tg3_flag(tp, PCIX_MODE) &&
14049 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14051 tg3_flag_set(tp, SRAM_USE_CONFIG);
14053 /* The memory arbiter has to be enabled in order for SRAM accesses
14054 * to succeed. Normally on powerup the tg3 chip firmware will make
14055 * sure it is enabled, but other entities such as system netboot
14056 * code might disable it.
14058 val = tr32(MEMARB_MODE);
14059 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14061 if (tg3_flag(tp, PCIX_MODE)) {
14062 pci_read_config_dword(tp->pdev,
14063 tp->pcix_cap + PCI_X_STATUS, &val);
14064 tp->pci_fn = val & 0x7;
14065 } else {
14066 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14069 /* Get eeprom hw config before calling tg3_set_power_state().
14070 * In particular, the TG3_FLAG_IS_NIC flag must be
14071 * determined before calling tg3_set_power_state() so that
14072 * we know whether or not to switch out of Vaux power.
14073 * When the flag is set, it means that GPIO1 is used for eeprom
14074 * write protect and also implies that it is a LOM where GPIOs
14075 * are not used to switch power.
14077 tg3_get_eeprom_hw_cfg(tp);
14079 if (tg3_flag(tp, ENABLE_APE)) {
14080 /* Allow reads and writes to the
14081 * APE register and memory space.
14083 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14084 PCISTATE_ALLOW_APE_SHMEM_WR |
14085 PCISTATE_ALLOW_APE_PSPACE_WR;
14086 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14087 pci_state_reg);
14089 tg3_ape_lock_init(tp);
14092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14095 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14096 tg3_flag(tp, 57765_PLUS))
14097 tg3_flag_set(tp, CPMU_PRESENT);
14099 /* Set up tp->grc_local_ctrl before calling
14100 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14101 * will bring 5700's external PHY out of reset.
14102 * It is also used as eeprom write protect on LOMs.
14104 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14106 tg3_flag(tp, EEPROM_WRITE_PROT))
14107 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14108 GRC_LCLCTRL_GPIO_OUTPUT1);
14109 /* Unused GPIO3 must be driven as output on 5752 because there
14110 * are no pull-up resistors on unused GPIO pins.
14112 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14113 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14117 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14118 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14120 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14121 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14122 /* Turn off the debug UART. */
14123 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14124 if (tg3_flag(tp, IS_NIC))
14125 /* Keep VMain power. */
14126 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14127 GRC_LCLCTRL_GPIO_OUTPUT0;
14130 /* Switch out of Vaux if it is a NIC */
14131 tg3_pwrsrc_switch_to_vmain(tp);
14133 /* Derive initial jumbo mode from MTU assigned in
14134 * ether_setup() via the alloc_etherdev() call
14136 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14137 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14139 /* Determine WakeOnLan speed to use. */
14140 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14141 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14142 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14143 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14144 tg3_flag_clear(tp, WOL_SPEED_100MB);
14145 } else {
14146 tg3_flag_set(tp, WOL_SPEED_100MB);
14149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14150 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14152 /* A few boards don't want Ethernet@WireSpeed phy feature */
14153 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14154 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14155 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14156 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14157 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14158 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14159 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14161 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14162 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14163 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14164 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14165 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14167 if (tg3_flag(tp, 5705_PLUS) &&
14168 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14169 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14170 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14171 !tg3_flag(tp, 57765_PLUS)) {
14172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14176 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14177 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14178 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14179 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14180 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14181 } else
14182 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14186 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14187 tp->phy_otp = tg3_read_otp_phycfg(tp);
14188 if (tp->phy_otp == 0)
14189 tp->phy_otp = TG3_OTP_DEFAULT;
14192 if (tg3_flag(tp, CPMU_PRESENT))
14193 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14194 else
14195 tp->mi_mode = MAC_MI_MODE_BASE;
14197 tp->coalesce_mode = 0;
14198 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14199 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14200 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14202 /* Set these bits to enable statistics workaround. */
14203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14204 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14205 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14206 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14207 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14212 tg3_flag_set(tp, USE_PHYLIB);
14214 err = tg3_mdio_init(tp);
14215 if (err)
14216 return err;
14218 /* Initialize data/descriptor byte/word swapping. */
14219 val = tr32(GRC_MODE);
14220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14221 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14222 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14223 GRC_MODE_B2HRX_ENABLE |
14224 GRC_MODE_HTX2B_ENABLE |
14225 GRC_MODE_HOST_STACKUP);
14226 else
14227 val &= GRC_MODE_HOST_STACKUP;
14229 tw32(GRC_MODE, val | tp->grc_mode);
14231 tg3_switch_clocks(tp);
14233 /* Clear this out for sanity. */
14234 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14236 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14237 &pci_state_reg);
14238 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14239 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14240 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14242 if (chiprevid == CHIPREV_ID_5701_A0 ||
14243 chiprevid == CHIPREV_ID_5701_B0 ||
14244 chiprevid == CHIPREV_ID_5701_B2 ||
14245 chiprevid == CHIPREV_ID_5701_B5) {
14246 void __iomem *sram_base;
14248 /* Write some dummy words into the SRAM status block
14249 * area, see if it reads back correctly. If the return
14250 * value is bad, force enable the PCIX workaround.
14252 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14254 writel(0x00000000, sram_base);
14255 writel(0x00000000, sram_base + 4);
14256 writel(0xffffffff, sram_base + 4);
14257 if (readl(sram_base) != 0x00000000)
14258 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14262 udelay(50);
14263 tg3_nvram_init(tp);
14265 grc_misc_cfg = tr32(GRC_MISC_CFG);
14266 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14268 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14269 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14270 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14271 tg3_flag_set(tp, IS_5788);
14273 if (!tg3_flag(tp, IS_5788) &&
14274 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14275 tg3_flag_set(tp, TAGGED_STATUS);
14276 if (tg3_flag(tp, TAGGED_STATUS)) {
14277 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14278 HOSTCC_MODE_CLRTICK_TXBD);
14280 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14281 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14282 tp->misc_host_ctrl);
14285 /* Preserve the APE MAC_MODE bits */
14286 if (tg3_flag(tp, ENABLE_APE))
14287 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14288 else
14289 tp->mac_mode = TG3_DEF_MAC_MODE;
14291 /* these are limited to 10/100 only */
14292 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14293 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14294 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14295 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14296 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14297 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14298 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14299 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14300 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14301 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14302 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14303 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14304 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14305 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14306 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14307 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14309 err = tg3_phy_probe(tp);
14310 if (err) {
14311 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14312 /* ... but do not return immediately ... */
14313 tg3_mdio_fini(tp);
14316 tg3_read_vpd(tp);
14317 tg3_read_fw_ver(tp);
14319 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14320 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14321 } else {
14322 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14323 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14324 else
14325 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14328 /* 5700 {AX,BX} chips have a broken status block link
14329 * change bit implementation, so we must use the
14330 * status register in those cases.
14332 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14333 tg3_flag_set(tp, USE_LINKCHG_REG);
14334 else
14335 tg3_flag_clear(tp, USE_LINKCHG_REG);
14337 /* The led_ctrl is set during tg3_phy_probe, here we might
14338 * have to force the link status polling mechanism based
14339 * upon subsystem IDs.
14341 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14342 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14343 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14344 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14345 tg3_flag_set(tp, USE_LINKCHG_REG);
14348 /* For all SERDES we poll the MAC status register. */
14349 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14350 tg3_flag_set(tp, POLL_SERDES);
14351 else
14352 tg3_flag_clear(tp, POLL_SERDES);
14354 tp->rx_offset = NET_IP_ALIGN;
14355 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14357 tg3_flag(tp, PCIX_MODE)) {
14358 tp->rx_offset = 0;
14359 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14360 tp->rx_copy_thresh = ~(u16)0;
14361 #endif
14364 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14365 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14366 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14368 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14370 /* Increment the rx prod index on the rx std ring by at most
14371 * 8 for these chips to workaround hw errata.
14373 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14374 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14375 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14376 tp->rx_std_max_post = 8;
14378 if (tg3_flag(tp, ASPM_WORKAROUND))
14379 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14380 PCIE_PWR_MGMT_L1_THRESH_MSK;
14382 return err;
14385 #ifdef CONFIG_SPARC
14386 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14388 struct net_device *dev = tp->dev;
14389 struct pci_dev *pdev = tp->pdev;
14390 struct device_node *dp = pci_device_to_OF_node(pdev);
14391 const unsigned char *addr;
14392 int len;
14394 addr = of_get_property(dp, "local-mac-address", &len);
14395 if (addr && len == 6) {
14396 memcpy(dev->dev_addr, addr, 6);
14397 memcpy(dev->perm_addr, dev->dev_addr, 6);
14398 return 0;
14400 return -ENODEV;
14403 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14405 struct net_device *dev = tp->dev;
14407 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14408 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14409 return 0;
14411 #endif
14413 static int __devinit tg3_get_device_address(struct tg3 *tp)
14415 struct net_device *dev = tp->dev;
14416 u32 hi, lo, mac_offset;
14417 int addr_ok = 0;
14419 #ifdef CONFIG_SPARC
14420 if (!tg3_get_macaddr_sparc(tp))
14421 return 0;
14422 #endif
14424 mac_offset = 0x7c;
14425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14426 tg3_flag(tp, 5780_CLASS)) {
14427 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14428 mac_offset = 0xcc;
14429 if (tg3_nvram_lock(tp))
14430 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14431 else
14432 tg3_nvram_unlock(tp);
14433 } else if (tg3_flag(tp, 5717_PLUS)) {
14434 if (tp->pci_fn & 1)
14435 mac_offset = 0xcc;
14436 if (tp->pci_fn > 1)
14437 mac_offset += 0x18c;
14438 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14439 mac_offset = 0x10;
14441 /* First try to get it from MAC address mailbox. */
14442 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14443 if ((hi >> 16) == 0x484b) {
14444 dev->dev_addr[0] = (hi >> 8) & 0xff;
14445 dev->dev_addr[1] = (hi >> 0) & 0xff;
14447 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14448 dev->dev_addr[2] = (lo >> 24) & 0xff;
14449 dev->dev_addr[3] = (lo >> 16) & 0xff;
14450 dev->dev_addr[4] = (lo >> 8) & 0xff;
14451 dev->dev_addr[5] = (lo >> 0) & 0xff;
14453 /* Some old bootcode may report a 0 MAC address in SRAM */
14454 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14456 if (!addr_ok) {
14457 /* Next, try NVRAM. */
14458 if (!tg3_flag(tp, NO_NVRAM) &&
14459 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14460 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14461 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14462 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14464 /* Finally just fetch it out of the MAC control regs. */
14465 else {
14466 hi = tr32(MAC_ADDR_0_HIGH);
14467 lo = tr32(MAC_ADDR_0_LOW);
14469 dev->dev_addr[5] = lo & 0xff;
14470 dev->dev_addr[4] = (lo >> 8) & 0xff;
14471 dev->dev_addr[3] = (lo >> 16) & 0xff;
14472 dev->dev_addr[2] = (lo >> 24) & 0xff;
14473 dev->dev_addr[1] = hi & 0xff;
14474 dev->dev_addr[0] = (hi >> 8) & 0xff;
14478 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14479 #ifdef CONFIG_SPARC
14480 if (!tg3_get_default_macaddr_sparc(tp))
14481 return 0;
14482 #endif
14483 return -EINVAL;
14485 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14486 return 0;
14489 #define BOUNDARY_SINGLE_CACHELINE 1
14490 #define BOUNDARY_MULTI_CACHELINE 2
14492 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14494 int cacheline_size;
14495 u8 byte;
14496 int goal;
14498 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14499 if (byte == 0)
14500 cacheline_size = 1024;
14501 else
14502 cacheline_size = (int) byte * 4;
14504 /* On 5703 and later chips, the boundary bits have no
14505 * effect.
14507 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14508 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14509 !tg3_flag(tp, PCI_EXPRESS))
14510 goto out;
14512 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14513 goal = BOUNDARY_MULTI_CACHELINE;
14514 #else
14515 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14516 goal = BOUNDARY_SINGLE_CACHELINE;
14517 #else
14518 goal = 0;
14519 #endif
14520 #endif
14522 if (tg3_flag(tp, 57765_PLUS)) {
14523 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14524 goto out;
14527 if (!goal)
14528 goto out;
14530 /* PCI controllers on most RISC systems tend to disconnect
14531 * when a device tries to burst across a cache-line boundary.
14532 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14534 * Unfortunately, for PCI-E there are only limited
14535 * write-side controls for this, and thus for reads
14536 * we will still get the disconnects. We'll also waste
14537 * these PCI cycles for both read and write for chips
14538 * other than 5700 and 5701 which do not implement the
14539 * boundary bits.
14541 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14542 switch (cacheline_size) {
14543 case 16:
14544 case 32:
14545 case 64:
14546 case 128:
14547 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14548 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14549 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14550 } else {
14551 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14552 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14554 break;
14556 case 256:
14557 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14558 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14559 break;
14561 default:
14562 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14563 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14564 break;
14566 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14567 switch (cacheline_size) {
14568 case 16:
14569 case 32:
14570 case 64:
14571 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14572 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14573 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14574 break;
14576 /* fallthrough */
14577 case 128:
14578 default:
14579 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14580 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14581 break;
14583 } else {
14584 switch (cacheline_size) {
14585 case 16:
14586 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14587 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14588 DMA_RWCTRL_WRITE_BNDRY_16);
14589 break;
14591 /* fallthrough */
14592 case 32:
14593 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14594 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14595 DMA_RWCTRL_WRITE_BNDRY_32);
14596 break;
14598 /* fallthrough */
14599 case 64:
14600 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14601 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14602 DMA_RWCTRL_WRITE_BNDRY_64);
14603 break;
14605 /* fallthrough */
14606 case 128:
14607 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14608 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14609 DMA_RWCTRL_WRITE_BNDRY_128);
14610 break;
14612 /* fallthrough */
14613 case 256:
14614 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14615 DMA_RWCTRL_WRITE_BNDRY_256);
14616 break;
14617 case 512:
14618 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14619 DMA_RWCTRL_WRITE_BNDRY_512);
14620 break;
14621 case 1024:
14622 default:
14623 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14624 DMA_RWCTRL_WRITE_BNDRY_1024);
14625 break;
14629 out:
14630 return val;
14633 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14635 struct tg3_internal_buffer_desc test_desc;
14636 u32 sram_dma_descs;
14637 int i, ret;
14639 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14641 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14642 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14643 tw32(RDMAC_STATUS, 0);
14644 tw32(WDMAC_STATUS, 0);
14646 tw32(BUFMGR_MODE, 0);
14647 tw32(FTQ_RESET, 0);
14649 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14650 test_desc.addr_lo = buf_dma & 0xffffffff;
14651 test_desc.nic_mbuf = 0x00002100;
14652 test_desc.len = size;
14655 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14656 * the *second* time the tg3 driver was getting loaded after an
14657 * initial scan.
14659 * Broadcom tells me:
14660 * ...the DMA engine is connected to the GRC block and a DMA
14661 * reset may affect the GRC block in some unpredictable way...
14662 * The behavior of resets to individual blocks has not been tested.
14664 * Broadcom noted the GRC reset will also reset all sub-components.
14666 if (to_device) {
14667 test_desc.cqid_sqid = (13 << 8) | 2;
14669 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14670 udelay(40);
14671 } else {
14672 test_desc.cqid_sqid = (16 << 8) | 7;
14674 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14675 udelay(40);
14677 test_desc.flags = 0x00000005;
14679 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14680 u32 val;
14682 val = *(((u32 *)&test_desc) + i);
14683 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14684 sram_dma_descs + (i * sizeof(u32)));
14685 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14687 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14689 if (to_device)
14690 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14691 else
14692 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14694 ret = -ENODEV;
14695 for (i = 0; i < 40; i++) {
14696 u32 val;
14698 if (to_device)
14699 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14700 else
14701 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14702 if ((val & 0xffff) == sram_dma_descs) {
14703 ret = 0;
14704 break;
14707 udelay(100);
14710 return ret;
14713 #define TEST_BUFFER_SIZE 0x2000
14715 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14716 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14717 { },
14720 static int __devinit tg3_test_dma(struct tg3 *tp)
14722 dma_addr_t buf_dma;
14723 u32 *buf, saved_dma_rwctrl;
14724 int ret = 0;
14726 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14727 &buf_dma, GFP_KERNEL);
14728 if (!buf) {
14729 ret = -ENOMEM;
14730 goto out_nofree;
14733 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14734 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14736 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14738 if (tg3_flag(tp, 57765_PLUS))
14739 goto out;
14741 if (tg3_flag(tp, PCI_EXPRESS)) {
14742 /* DMA read watermark not used on PCIE */
14743 tp->dma_rwctrl |= 0x00180000;
14744 } else if (!tg3_flag(tp, PCIX_MODE)) {
14745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14747 tp->dma_rwctrl |= 0x003f0000;
14748 else
14749 tp->dma_rwctrl |= 0x003f000f;
14750 } else {
14751 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14752 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14753 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14754 u32 read_water = 0x7;
14756 /* If the 5704 is behind the EPB bridge, we can
14757 * do the less restrictive ONE_DMA workaround for
14758 * better performance.
14760 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14761 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14762 tp->dma_rwctrl |= 0x8000;
14763 else if (ccval == 0x6 || ccval == 0x7)
14764 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14767 read_water = 4;
14768 /* Set bit 23 to enable PCIX hw bug fix */
14769 tp->dma_rwctrl |=
14770 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14771 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14772 (1 << 23);
14773 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14774 /* 5780 always in PCIX mode */
14775 tp->dma_rwctrl |= 0x00144000;
14776 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14777 /* 5714 always in PCIX mode */
14778 tp->dma_rwctrl |= 0x00148000;
14779 } else {
14780 tp->dma_rwctrl |= 0x001b000f;
14784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14785 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14786 tp->dma_rwctrl &= 0xfffffff0;
14788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14789 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14790 /* Remove this if it causes problems for some boards. */
14791 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14793 /* On 5700/5701 chips, we need to set this bit.
14794 * Otherwise the chip will issue cacheline transactions
14795 * to streamable DMA memory with not all the byte
14796 * enables turned on. This is an error on several
14797 * RISC PCI controllers, in particular sparc64.
14799 * On 5703/5704 chips, this bit has been reassigned
14800 * a different meaning. In particular, it is used
14801 * on those chips to enable a PCI-X workaround.
14803 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14806 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14808 #if 0
14809 /* Unneeded, already done by tg3_get_invariants. */
14810 tg3_switch_clocks(tp);
14811 #endif
14813 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14814 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14815 goto out;
14817 /* It is best to perform DMA test with maximum write burst size
14818 * to expose the 5700/5701 write DMA bug.
14820 saved_dma_rwctrl = tp->dma_rwctrl;
14821 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14822 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14824 while (1) {
14825 u32 *p = buf, i;
14827 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14828 p[i] = i;
14830 /* Send the buffer to the chip. */
14831 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14832 if (ret) {
14833 dev_err(&tp->pdev->dev,
14834 "%s: Buffer write failed. err = %d\n",
14835 __func__, ret);
14836 break;
14839 #if 0
14840 /* validate data reached card RAM correctly. */
14841 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14842 u32 val;
14843 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14844 if (le32_to_cpu(val) != p[i]) {
14845 dev_err(&tp->pdev->dev,
14846 "%s: Buffer corrupted on device! "
14847 "(%d != %d)\n", __func__, val, i);
14848 /* ret = -ENODEV here? */
14850 p[i] = 0;
14852 #endif
14853 /* Now read it back. */
14854 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14855 if (ret) {
14856 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14857 "err = %d\n", __func__, ret);
14858 break;
14861 /* Verify it. */
14862 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14863 if (p[i] == i)
14864 continue;
14866 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14867 DMA_RWCTRL_WRITE_BNDRY_16) {
14868 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14869 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14870 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14871 break;
14872 } else {
14873 dev_err(&tp->pdev->dev,
14874 "%s: Buffer corrupted on read back! "
14875 "(%d != %d)\n", __func__, p[i], i);
14876 ret = -ENODEV;
14877 goto out;
14881 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14882 /* Success. */
14883 ret = 0;
14884 break;
14887 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14888 DMA_RWCTRL_WRITE_BNDRY_16) {
14889 /* DMA test passed without adjusting DMA boundary,
14890 * now look for chipsets that are known to expose the
14891 * DMA bug without failing the test.
14893 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14894 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14895 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14896 } else {
14897 /* Safe to use the calculated DMA boundary. */
14898 tp->dma_rwctrl = saved_dma_rwctrl;
14901 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14904 out:
14905 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14906 out_nofree:
14907 return ret;
14910 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14912 if (tg3_flag(tp, 57765_PLUS)) {
14913 tp->bufmgr_config.mbuf_read_dma_low_water =
14914 DEFAULT_MB_RDMA_LOW_WATER_5705;
14915 tp->bufmgr_config.mbuf_mac_rx_low_water =
14916 DEFAULT_MB_MACRX_LOW_WATER_57765;
14917 tp->bufmgr_config.mbuf_high_water =
14918 DEFAULT_MB_HIGH_WATER_57765;
14920 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14921 DEFAULT_MB_RDMA_LOW_WATER_5705;
14922 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14923 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14924 tp->bufmgr_config.mbuf_high_water_jumbo =
14925 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14926 } else if (tg3_flag(tp, 5705_PLUS)) {
14927 tp->bufmgr_config.mbuf_read_dma_low_water =
14928 DEFAULT_MB_RDMA_LOW_WATER_5705;
14929 tp->bufmgr_config.mbuf_mac_rx_low_water =
14930 DEFAULT_MB_MACRX_LOW_WATER_5705;
14931 tp->bufmgr_config.mbuf_high_water =
14932 DEFAULT_MB_HIGH_WATER_5705;
14933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14934 tp->bufmgr_config.mbuf_mac_rx_low_water =
14935 DEFAULT_MB_MACRX_LOW_WATER_5906;
14936 tp->bufmgr_config.mbuf_high_water =
14937 DEFAULT_MB_HIGH_WATER_5906;
14940 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14941 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14942 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14943 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14944 tp->bufmgr_config.mbuf_high_water_jumbo =
14945 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14946 } else {
14947 tp->bufmgr_config.mbuf_read_dma_low_water =
14948 DEFAULT_MB_RDMA_LOW_WATER;
14949 tp->bufmgr_config.mbuf_mac_rx_low_water =
14950 DEFAULT_MB_MACRX_LOW_WATER;
14951 tp->bufmgr_config.mbuf_high_water =
14952 DEFAULT_MB_HIGH_WATER;
14954 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14955 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14956 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14957 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14958 tp->bufmgr_config.mbuf_high_water_jumbo =
14959 DEFAULT_MB_HIGH_WATER_JUMBO;
14962 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14963 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14966 static char * __devinit tg3_phy_string(struct tg3 *tp)
14968 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14969 case TG3_PHY_ID_BCM5400: return "5400";
14970 case TG3_PHY_ID_BCM5401: return "5401";
14971 case TG3_PHY_ID_BCM5411: return "5411";
14972 case TG3_PHY_ID_BCM5701: return "5701";
14973 case TG3_PHY_ID_BCM5703: return "5703";
14974 case TG3_PHY_ID_BCM5704: return "5704";
14975 case TG3_PHY_ID_BCM5705: return "5705";
14976 case TG3_PHY_ID_BCM5750: return "5750";
14977 case TG3_PHY_ID_BCM5752: return "5752";
14978 case TG3_PHY_ID_BCM5714: return "5714";
14979 case TG3_PHY_ID_BCM5780: return "5780";
14980 case TG3_PHY_ID_BCM5755: return "5755";
14981 case TG3_PHY_ID_BCM5787: return "5787";
14982 case TG3_PHY_ID_BCM5784: return "5784";
14983 case TG3_PHY_ID_BCM5756: return "5722/5756";
14984 case TG3_PHY_ID_BCM5906: return "5906";
14985 case TG3_PHY_ID_BCM5761: return "5761";
14986 case TG3_PHY_ID_BCM5718C: return "5718C";
14987 case TG3_PHY_ID_BCM5718S: return "5718S";
14988 case TG3_PHY_ID_BCM57765: return "57765";
14989 case TG3_PHY_ID_BCM5719C: return "5719C";
14990 case TG3_PHY_ID_BCM5720C: return "5720C";
14991 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14992 case 0: return "serdes";
14993 default: return "unknown";
14997 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14999 if (tg3_flag(tp, PCI_EXPRESS)) {
15000 strcpy(str, "PCI Express");
15001 return str;
15002 } else if (tg3_flag(tp, PCIX_MODE)) {
15003 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15005 strcpy(str, "PCIX:");
15007 if ((clock_ctrl == 7) ||
15008 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15009 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15010 strcat(str, "133MHz");
15011 else if (clock_ctrl == 0)
15012 strcat(str, "33MHz");
15013 else if (clock_ctrl == 2)
15014 strcat(str, "50MHz");
15015 else if (clock_ctrl == 4)
15016 strcat(str, "66MHz");
15017 else if (clock_ctrl == 6)
15018 strcat(str, "100MHz");
15019 } else {
15020 strcpy(str, "PCI:");
15021 if (tg3_flag(tp, PCI_HIGH_SPEED))
15022 strcat(str, "66MHz");
15023 else
15024 strcat(str, "33MHz");
15026 if (tg3_flag(tp, PCI_32BIT))
15027 strcat(str, ":32-bit");
15028 else
15029 strcat(str, ":64-bit");
15030 return str;
15033 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15035 struct pci_dev *peer;
15036 unsigned int func, devnr = tp->pdev->devfn & ~7;
15038 for (func = 0; func < 8; func++) {
15039 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15040 if (peer && peer != tp->pdev)
15041 break;
15042 pci_dev_put(peer);
15044 /* 5704 can be configured in single-port mode, set peer to
15045 * tp->pdev in that case.
15047 if (!peer) {
15048 peer = tp->pdev;
15049 return peer;
15053 * We don't need to keep the refcount elevated; there's no way
15054 * to remove one half of this device without removing the other
15056 pci_dev_put(peer);
15058 return peer;
15061 static void __devinit tg3_init_coal(struct tg3 *tp)
15063 struct ethtool_coalesce *ec = &tp->coal;
15065 memset(ec, 0, sizeof(*ec));
15066 ec->cmd = ETHTOOL_GCOALESCE;
15067 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15068 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15069 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15070 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15071 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15072 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15073 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15074 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15075 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15077 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15078 HOSTCC_MODE_CLRTICK_TXBD)) {
15079 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15080 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15081 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15082 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15085 if (tg3_flag(tp, 5705_PLUS)) {
15086 ec->rx_coalesce_usecs_irq = 0;
15087 ec->tx_coalesce_usecs_irq = 0;
15088 ec->stats_block_coalesce_usecs = 0;
15092 static const struct net_device_ops tg3_netdev_ops = {
15093 .ndo_open = tg3_open,
15094 .ndo_stop = tg3_close,
15095 .ndo_start_xmit = tg3_start_xmit,
15096 .ndo_get_stats64 = tg3_get_stats64,
15097 .ndo_validate_addr = eth_validate_addr,
15098 .ndo_set_multicast_list = tg3_set_rx_mode,
15099 .ndo_set_mac_address = tg3_set_mac_addr,
15100 .ndo_do_ioctl = tg3_ioctl,
15101 .ndo_tx_timeout = tg3_tx_timeout,
15102 .ndo_change_mtu = tg3_change_mtu,
15103 .ndo_fix_features = tg3_fix_features,
15104 .ndo_set_features = tg3_set_features,
15105 #ifdef CONFIG_NET_POLL_CONTROLLER
15106 .ndo_poll_controller = tg3_poll_controller,
15107 #endif
15110 static int __devinit tg3_init_one(struct pci_dev *pdev,
15111 const struct pci_device_id *ent)
15113 struct net_device *dev;
15114 struct tg3 *tp;
15115 int i, err, pm_cap;
15116 u32 sndmbx, rcvmbx, intmbx;
15117 char str[40];
15118 u64 dma_mask, persist_dma_mask;
15119 u32 features = 0;
15121 printk_once(KERN_INFO "%s\n", version);
15123 err = pci_enable_device(pdev);
15124 if (err) {
15125 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15126 return err;
15129 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15130 if (err) {
15131 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15132 goto err_out_disable_pdev;
15135 pci_set_master(pdev);
15137 /* Find power-management capability. */
15138 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15139 if (pm_cap == 0) {
15140 dev_err(&pdev->dev,
15141 "Cannot find Power Management capability, aborting\n");
15142 err = -EIO;
15143 goto err_out_free_res;
15146 err = pci_set_power_state(pdev, PCI_D0);
15147 if (err) {
15148 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15149 goto err_out_free_res;
15152 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15153 if (!dev) {
15154 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15155 err = -ENOMEM;
15156 goto err_out_power_down;
15159 SET_NETDEV_DEV(dev, &pdev->dev);
15161 tp = netdev_priv(dev);
15162 tp->pdev = pdev;
15163 tp->dev = dev;
15164 tp->pm_cap = pm_cap;
15165 tp->rx_mode = TG3_DEF_RX_MODE;
15166 tp->tx_mode = TG3_DEF_TX_MODE;
15168 if (tg3_debug > 0)
15169 tp->msg_enable = tg3_debug;
15170 else
15171 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15173 /* The word/byte swap controls here control register access byte
15174 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15175 * setting below.
15177 tp->misc_host_ctrl =
15178 MISC_HOST_CTRL_MASK_PCI_INT |
15179 MISC_HOST_CTRL_WORD_SWAP |
15180 MISC_HOST_CTRL_INDIR_ACCESS |
15181 MISC_HOST_CTRL_PCISTATE_RW;
15183 /* The NONFRM (non-frame) byte/word swap controls take effect
15184 * on descriptor entries, anything which isn't packet data.
15186 * The StrongARM chips on the board (one for tx, one for rx)
15187 * are running in big-endian mode.
15189 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15190 GRC_MODE_WSWAP_NONFRM_DATA);
15191 #ifdef __BIG_ENDIAN
15192 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15193 #endif
15194 spin_lock_init(&tp->lock);
15195 spin_lock_init(&tp->indirect_lock);
15196 INIT_WORK(&tp->reset_task, tg3_reset_task);
15198 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15199 if (!tp->regs) {
15200 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15201 err = -ENOMEM;
15202 goto err_out_free_dev;
15205 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15206 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15207 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15208 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15209 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15210 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15211 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15212 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15213 tg3_flag_set(tp, ENABLE_APE);
15214 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15215 if (!tp->aperegs) {
15216 dev_err(&pdev->dev,
15217 "Cannot map APE registers, aborting\n");
15218 err = -ENOMEM;
15219 goto err_out_iounmap;
15223 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15224 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15226 dev->ethtool_ops = &tg3_ethtool_ops;
15227 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15228 dev->netdev_ops = &tg3_netdev_ops;
15229 dev->irq = pdev->irq;
15231 err = tg3_get_invariants(tp);
15232 if (err) {
15233 dev_err(&pdev->dev,
15234 "Problem fetching invariants of chip, aborting\n");
15235 goto err_out_apeunmap;
15238 /* The EPB bridge inside 5714, 5715, and 5780 and any
15239 * device behind the EPB cannot support DMA addresses > 40-bit.
15240 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15241 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15242 * do DMA address check in tg3_start_xmit().
15244 if (tg3_flag(tp, IS_5788))
15245 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15246 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15247 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15248 #ifdef CONFIG_HIGHMEM
15249 dma_mask = DMA_BIT_MASK(64);
15250 #endif
15251 } else
15252 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15254 /* Configure DMA attributes. */
15255 if (dma_mask > DMA_BIT_MASK(32)) {
15256 err = pci_set_dma_mask(pdev, dma_mask);
15257 if (!err) {
15258 features |= NETIF_F_HIGHDMA;
15259 err = pci_set_consistent_dma_mask(pdev,
15260 persist_dma_mask);
15261 if (err < 0) {
15262 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15263 "DMA for consistent allocations\n");
15264 goto err_out_apeunmap;
15268 if (err || dma_mask == DMA_BIT_MASK(32)) {
15269 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15270 if (err) {
15271 dev_err(&pdev->dev,
15272 "No usable DMA configuration, aborting\n");
15273 goto err_out_apeunmap;
15277 tg3_init_bufmgr_config(tp);
15279 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15281 /* 5700 B0 chips do not support checksumming correctly due
15282 * to hardware bugs.
15284 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15285 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15287 if (tg3_flag(tp, 5755_PLUS))
15288 features |= NETIF_F_IPV6_CSUM;
15291 /* TSO is on by default on chips that support hardware TSO.
15292 * Firmware TSO on older chips gives lower performance, so it
15293 * is off by default, but can be enabled using ethtool.
15295 if ((tg3_flag(tp, HW_TSO_1) ||
15296 tg3_flag(tp, HW_TSO_2) ||
15297 tg3_flag(tp, HW_TSO_3)) &&
15298 (features & NETIF_F_IP_CSUM))
15299 features |= NETIF_F_TSO;
15300 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15301 if (features & NETIF_F_IPV6_CSUM)
15302 features |= NETIF_F_TSO6;
15303 if (tg3_flag(tp, HW_TSO_3) ||
15304 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15305 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15306 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15308 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15309 features |= NETIF_F_TSO_ECN;
15312 dev->features |= features;
15313 dev->vlan_features |= features;
15316 * Add loopback capability only for a subset of devices that support
15317 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15318 * loopback for the remaining devices.
15320 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15321 !tg3_flag(tp, CPMU_PRESENT))
15322 /* Add the loopback capability */
15323 features |= NETIF_F_LOOPBACK;
15325 dev->hw_features |= features;
15327 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15328 !tg3_flag(tp, TSO_CAPABLE) &&
15329 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15330 tg3_flag_set(tp, MAX_RXPEND_64);
15331 tp->rx_pending = 63;
15334 err = tg3_get_device_address(tp);
15335 if (err) {
15336 dev_err(&pdev->dev,
15337 "Could not obtain valid ethernet address, aborting\n");
15338 goto err_out_apeunmap;
15342 * Reset chip in case UNDI or EFI driver did not shutdown
15343 * DMA self test will enable WDMAC and we'll see (spurious)
15344 * pending DMA on the PCI bus at that point.
15346 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15347 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15348 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15349 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15352 err = tg3_test_dma(tp);
15353 if (err) {
15354 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15355 goto err_out_apeunmap;
15358 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15359 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15360 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15361 for (i = 0; i < tp->irq_max; i++) {
15362 struct tg3_napi *tnapi = &tp->napi[i];
15364 tnapi->tp = tp;
15365 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15367 tnapi->int_mbox = intmbx;
15368 if (i < 4)
15369 intmbx += 0x8;
15370 else
15371 intmbx += 0x4;
15373 tnapi->consmbox = rcvmbx;
15374 tnapi->prodmbox = sndmbx;
15376 if (i)
15377 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15378 else
15379 tnapi->coal_now = HOSTCC_MODE_NOW;
15381 if (!tg3_flag(tp, SUPPORT_MSIX))
15382 break;
15385 * If we support MSIX, we'll be using RSS. If we're using
15386 * RSS, the first vector only handles link interrupts and the
15387 * remaining vectors handle rx and tx interrupts. Reuse the
15388 * mailbox values for the next iteration. The values we setup
15389 * above are still useful for the single vectored mode.
15391 if (!i)
15392 continue;
15394 rcvmbx += 0x8;
15396 if (sndmbx & 0x4)
15397 sndmbx -= 0x4;
15398 else
15399 sndmbx += 0xc;
15402 tg3_init_coal(tp);
15404 pci_set_drvdata(pdev, dev);
15406 if (tg3_flag(tp, 5717_PLUS)) {
15407 /* Resume a low-power mode */
15408 tg3_frob_aux_power(tp, false);
15411 err = register_netdev(dev);
15412 if (err) {
15413 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15414 goto err_out_apeunmap;
15417 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15418 tp->board_part_number,
15419 tp->pci_chip_rev_id,
15420 tg3_bus_string(tp, str),
15421 dev->dev_addr);
15423 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15424 struct phy_device *phydev;
15425 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15426 netdev_info(dev,
15427 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15428 phydev->drv->name, dev_name(&phydev->dev));
15429 } else {
15430 char *ethtype;
15432 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15433 ethtype = "10/100Base-TX";
15434 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15435 ethtype = "1000Base-SX";
15436 else
15437 ethtype = "10/100/1000Base-T";
15439 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15440 "(WireSpeed[%d], EEE[%d])\n",
15441 tg3_phy_string(tp), ethtype,
15442 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15443 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15446 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15447 (dev->features & NETIF_F_RXCSUM) != 0,
15448 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15449 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15450 tg3_flag(tp, ENABLE_ASF) != 0,
15451 tg3_flag(tp, TSO_CAPABLE) != 0);
15452 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15453 tp->dma_rwctrl,
15454 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15455 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15457 pci_save_state(pdev);
15459 return 0;
15461 err_out_apeunmap:
15462 if (tp->aperegs) {
15463 iounmap(tp->aperegs);
15464 tp->aperegs = NULL;
15467 err_out_iounmap:
15468 if (tp->regs) {
15469 iounmap(tp->regs);
15470 tp->regs = NULL;
15473 err_out_free_dev:
15474 free_netdev(dev);
15476 err_out_power_down:
15477 pci_set_power_state(pdev, PCI_D3hot);
15479 err_out_free_res:
15480 pci_release_regions(pdev);
15482 err_out_disable_pdev:
15483 pci_disable_device(pdev);
15484 pci_set_drvdata(pdev, NULL);
15485 return err;
15488 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15490 struct net_device *dev = pci_get_drvdata(pdev);
15492 if (dev) {
15493 struct tg3 *tp = netdev_priv(dev);
15495 if (tp->fw)
15496 release_firmware(tp->fw);
15498 cancel_work_sync(&tp->reset_task);
15500 if (!tg3_flag(tp, USE_PHYLIB)) {
15501 tg3_phy_fini(tp);
15502 tg3_mdio_fini(tp);
15505 unregister_netdev(dev);
15506 if (tp->aperegs) {
15507 iounmap(tp->aperegs);
15508 tp->aperegs = NULL;
15510 if (tp->regs) {
15511 iounmap(tp->regs);
15512 tp->regs = NULL;
15514 free_netdev(dev);
15515 pci_release_regions(pdev);
15516 pci_disable_device(pdev);
15517 pci_set_drvdata(pdev, NULL);
15521 #ifdef CONFIG_PM_SLEEP
15522 static int tg3_suspend(struct device *device)
15524 struct pci_dev *pdev = to_pci_dev(device);
15525 struct net_device *dev = pci_get_drvdata(pdev);
15526 struct tg3 *tp = netdev_priv(dev);
15527 int err;
15529 if (!netif_running(dev))
15530 return 0;
15532 flush_work_sync(&tp->reset_task);
15533 tg3_phy_stop(tp);
15534 tg3_netif_stop(tp);
15536 del_timer_sync(&tp->timer);
15538 tg3_full_lock(tp, 1);
15539 tg3_disable_ints(tp);
15540 tg3_full_unlock(tp);
15542 netif_device_detach(dev);
15544 tg3_full_lock(tp, 0);
15545 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15546 tg3_flag_clear(tp, INIT_COMPLETE);
15547 tg3_full_unlock(tp);
15549 err = tg3_power_down_prepare(tp);
15550 if (err) {
15551 int err2;
15553 tg3_full_lock(tp, 0);
15555 tg3_flag_set(tp, INIT_COMPLETE);
15556 err2 = tg3_restart_hw(tp, 1);
15557 if (err2)
15558 goto out;
15560 tp->timer.expires = jiffies + tp->timer_offset;
15561 add_timer(&tp->timer);
15563 netif_device_attach(dev);
15564 tg3_netif_start(tp);
15566 out:
15567 tg3_full_unlock(tp);
15569 if (!err2)
15570 tg3_phy_start(tp);
15573 return err;
15576 static int tg3_resume(struct device *device)
15578 struct pci_dev *pdev = to_pci_dev(device);
15579 struct net_device *dev = pci_get_drvdata(pdev);
15580 struct tg3 *tp = netdev_priv(dev);
15581 int err;
15583 if (!netif_running(dev))
15584 return 0;
15586 netif_device_attach(dev);
15588 tg3_full_lock(tp, 0);
15590 tg3_flag_set(tp, INIT_COMPLETE);
15591 err = tg3_restart_hw(tp, 1);
15592 if (err)
15593 goto out;
15595 tp->timer.expires = jiffies + tp->timer_offset;
15596 add_timer(&tp->timer);
15598 tg3_netif_start(tp);
15600 out:
15601 tg3_full_unlock(tp);
15603 if (!err)
15604 tg3_phy_start(tp);
15606 return err;
15609 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15610 #define TG3_PM_OPS (&tg3_pm_ops)
15612 #else
15614 #define TG3_PM_OPS NULL
15616 #endif /* CONFIG_PM_SLEEP */
15619 * tg3_io_error_detected - called when PCI error is detected
15620 * @pdev: Pointer to PCI device
15621 * @state: The current pci connection state
15623 * This function is called after a PCI bus error affecting
15624 * this device has been detected.
15626 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15627 pci_channel_state_t state)
15629 struct net_device *netdev = pci_get_drvdata(pdev);
15630 struct tg3 *tp = netdev_priv(netdev);
15631 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15633 netdev_info(netdev, "PCI I/O error detected\n");
15635 rtnl_lock();
15637 if (!netif_running(netdev))
15638 goto done;
15640 tg3_phy_stop(tp);
15642 tg3_netif_stop(tp);
15644 del_timer_sync(&tp->timer);
15645 tg3_flag_clear(tp, RESTART_TIMER);
15647 /* Want to make sure that the reset task doesn't run */
15648 cancel_work_sync(&tp->reset_task);
15649 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15650 tg3_flag_clear(tp, RESTART_TIMER);
15652 netif_device_detach(netdev);
15654 /* Clean up software state, even if MMIO is blocked */
15655 tg3_full_lock(tp, 0);
15656 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15657 tg3_full_unlock(tp);
15659 done:
15660 if (state == pci_channel_io_perm_failure)
15661 err = PCI_ERS_RESULT_DISCONNECT;
15662 else
15663 pci_disable_device(pdev);
15665 rtnl_unlock();
15667 return err;
15671 * tg3_io_slot_reset - called after the pci bus has been reset.
15672 * @pdev: Pointer to PCI device
15674 * Restart the card from scratch, as if from a cold-boot.
15675 * At this point, the card has exprienced a hard reset,
15676 * followed by fixups by BIOS, and has its config space
15677 * set up identically to what it was at cold boot.
15679 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15681 struct net_device *netdev = pci_get_drvdata(pdev);
15682 struct tg3 *tp = netdev_priv(netdev);
15683 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15684 int err;
15686 rtnl_lock();
15688 if (pci_enable_device(pdev)) {
15689 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15690 goto done;
15693 pci_set_master(pdev);
15694 pci_restore_state(pdev);
15695 pci_save_state(pdev);
15697 if (!netif_running(netdev)) {
15698 rc = PCI_ERS_RESULT_RECOVERED;
15699 goto done;
15702 err = tg3_power_up(tp);
15703 if (err)
15704 goto done;
15706 rc = PCI_ERS_RESULT_RECOVERED;
15708 done:
15709 rtnl_unlock();
15711 return rc;
15715 * tg3_io_resume - called when traffic can start flowing again.
15716 * @pdev: Pointer to PCI device
15718 * This callback is called when the error recovery driver tells
15719 * us that its OK to resume normal operation.
15721 static void tg3_io_resume(struct pci_dev *pdev)
15723 struct net_device *netdev = pci_get_drvdata(pdev);
15724 struct tg3 *tp = netdev_priv(netdev);
15725 int err;
15727 rtnl_lock();
15729 if (!netif_running(netdev))
15730 goto done;
15732 tg3_full_lock(tp, 0);
15733 tg3_flag_set(tp, INIT_COMPLETE);
15734 err = tg3_restart_hw(tp, 1);
15735 tg3_full_unlock(tp);
15736 if (err) {
15737 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15738 goto done;
15741 netif_device_attach(netdev);
15743 tp->timer.expires = jiffies + tp->timer_offset;
15744 add_timer(&tp->timer);
15746 tg3_netif_start(tp);
15748 tg3_phy_start(tp);
15750 done:
15751 rtnl_unlock();
15754 static struct pci_error_handlers tg3_err_handler = {
15755 .error_detected = tg3_io_error_detected,
15756 .slot_reset = tg3_io_slot_reset,
15757 .resume = tg3_io_resume
15760 static struct pci_driver tg3_driver = {
15761 .name = DRV_MODULE_NAME,
15762 .id_table = tg3_pci_tbl,
15763 .probe = tg3_init_one,
15764 .remove = __devexit_p(tg3_remove_one),
15765 .err_handler = &tg3_err_handler,
15766 .driver.pm = TG3_PM_OPS,
15769 static int __init tg3_init(void)
15771 return pci_register_driver(&tg3_driver);
15774 static void __exit tg3_cleanup(void)
15776 pci_unregister_driver(&tg3_driver);
15779 module_init(tg3_init);
15780 module_exit(tg3_cleanup);